summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/arm/msm/diag_mhi.txt12
-rw-r--r--Documentation/devicetree/bindings/arm/msm/msm.txt5
-rw-r--r--Documentation/devicetree/bindings/arm/msm/msm_ipc_router_mhi_xprt.txt14
-rw-r--r--Documentation/devicetree/bindings/arm/msm/rdbg-smp2p.txt5
-rw-r--r--Documentation/devicetree/bindings/cnss/cnss-wlan.txt14
-rw-r--r--Documentation/devicetree/bindings/cnss/icnss.txt2
-rw-r--r--Documentation/devicetree/bindings/drm/msm/mdp.txt3
-rw-r--r--Documentation/devicetree/bindings/media/video/msm-cci.txt3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,msm8998-pinctrl.txt201
-rw-r--r--Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt30
-rw-r--r--Documentation/devicetree/bindings/sound/qcom-audio-dev.txt7
-rw-r--r--Documentation/vm/page_owner.txt9
-rw-r--r--Makefile14
-rw-r--r--arch/arm/boot/dts/bcm5301x.dtsi4
-rw-r--r--arch/arm/boot/dts/qcom/Makefile5
-rw-r--r--arch/arm/boot/dts/qcom/dsi-adv7533-1024-600p.dtsi75
-rw-r--r--arch/arm/boot/dts/qcom/msm-pm660l.dtsi6
-rw-r--r--arch/arm/boot/dts/qcom/msm-rdbg.dtsi33
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi7
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi5
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-mdss-panels.dtsi3
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-mtp.dtsi4
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-regulator.dtsi7
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-sde-display.dtsi40
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-sde.dtsi41
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-v2.dtsi4
-rw-r--r--arch/arm/boot/dts/qcom/msm8996.dtsi1
-rw-r--r--arch/arm/boot/dts/qcom/msm8996pro-auto-adp.dts4
-rw-r--r--arch/arm/boot/dts/qcom/msm8996pro.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/msm8996v3-auto.dtsi39
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-9x55-cdp.dts24
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-9x55-mtp.dts24
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-9x55-rcm.dts24
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-9x55.dtsi25
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-camera-sensor-qrd-vr1.dtsi56
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-cdp.dtsi10
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-mtp.dtsi10
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-pinctrl.dtsi80
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-qrd-skuk.dtsi10
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-qrd.dtsi10
-rw-r--r--arch/arm/boot/dts/qcom/sda660-pm660a-qrd-hdk.dts9
-rw-r--r--arch/arm/boot/dts/qcom/sdm630.dtsi21
-rw-r--r--arch/arm/boot/dts/qcom/sdm660.dtsi14
-rw-r--r--arch/arm/boot/dts/qcom/vplatform-lfv-msm8996.dts23
-rw-r--r--arch/arm/mm/mmu.c8
-rw-r--r--arch/arm64/configs/msmcortex-perf_defconfig1
-rw-r--r--arch/arm64/include/asm/acpi.h6
-rw-r--r--arch/arm64/kernel/perf_trace_counters.c3
-rw-r--r--arch/mips/ath79/common.c16
-rw-r--r--arch/mips/kernel/entry.S3
-rw-r--r--arch/mips/kernel/pm-cps.c9
-rw-r--r--arch/mips/kernel/traps.c2
-rw-r--r--arch/mips/ralink/mt7620.c84
-rw-r--r--arch/mips/ralink/rt288x.c2
-rw-r--r--arch/powerpc/kernel/eeh.c10
-rw-r--r--arch/powerpc/kernel/kprobes.c11
-rw-r--r--arch/powerpc/kvm/book3s_hv.c21
-rw-r--r--arch/powerpc/mm/slb_low.S10
-rw-r--r--arch/s390/include/asm/ctl_reg.h4
-rw-r--r--arch/x86/include/asm/kvm_emulate.h4
-rw-r--r--arch/x86/kvm/emulate.c16
-rw-r--r--arch/x86/kvm/pmu_intel.c2
-rw-r--r--arch/x86/kvm/vmx.c2
-rw-r--r--arch/x86/kvm/x86.c20
-rw-r--r--arch/x86/mm/mpx.c12
-rw-r--r--arch/x86/mm/tlb.c4
-rw-r--r--drivers/char/adsprpc.c30
-rw-r--r--drivers/char/diag/diag_masks.c66
-rw-r--r--drivers/char/diag/diag_memorydevice.c31
-rw-r--r--drivers/char/diag/diag_mux.c7
-rw-r--r--drivers/char/diag/diagchar.h1
-rw-r--r--drivers/char/diag/diagchar_core.c116
-rw-r--r--drivers/char/diag/diagfwd_cntl.c1
-rw-r--r--drivers/char/diag/diagfwd_glink.c60
-rw-r--r--drivers/char/diag/diagfwd_mhi.c32
-rw-r--r--drivers/char/diag/diagfwd_peripheral.c82
-rw-r--r--drivers/char/diag/diagfwd_peripheral.h3
-rw-r--r--drivers/char/rdbg.c18
-rw-r--r--drivers/char/virtio_console.c2
-rw-r--r--drivers/clk/msm/clock-local2.c4
-rw-r--r--drivers/clk/msm/mdss/mdss-dsi-pll-8998.c399
-rw-r--r--drivers/cpufreq/s3c2416-cpufreq.c1
-rw-r--r--drivers/crypto/msm/ice.c29
-rw-r--r--drivers/crypto/msm/ota_crypto.c4
-rw-r--r--drivers/crypto/msm/qce50.c16
-rw-r--r--drivers/crypto/msm/qcedev.c4
-rw-r--r--drivers/crypto/msm/qcrypto.c22
-rw-r--r--drivers/esoc/esoc-mdm-4x.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_crtc.c4
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h6
-rw-r--r--drivers/gpu/drm/ast/ast_main.c264
-rw-r--r--drivers/gpu/drm/ast/ast_post.c7
-rw-r--r--drivers/gpu/drm/msm/Makefile1
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c59
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.h1
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c4
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c96
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h19
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c123
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.c129
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.h19
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c101
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h18
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h18
-rw-r--r--drivers/gpu/drm/msm/msm_mmu.h2
-rw-r--r--drivers/gpu/drm/msm/msm_smmu.c24
-rw-r--r--drivers/gpu/drm/msm/sde/sde_connector.c31
-rw-r--r--drivers/gpu/drm/msm/sde/sde_connector.h4
-rw-r--r--drivers/gpu/drm/msm/sde/sde_crtc.c243
-rw-r--r--drivers/gpu/drm/msm/sde/sde_crtc.h2
-rw-r--r--drivers/gpu/drm/msm/sde/sde_encoder.c132
-rw-r--r--drivers/gpu/drm/msm/sde/sde_encoder_phys.h4
-rw-r--r--drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c54
-rw-r--r--drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c111
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_catalog.c10
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_catalog.h3
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_cdm.c2
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_mdss.h8
-rw-r--r--drivers/gpu/drm/msm/sde/sde_kms.c88
-rw-r--r--drivers/gpu/drm/msm/sde/sde_kms.h4
-rw-r--r--drivers/gpu/drm/msm/sde/sde_plane.c41
-rw-r--r--drivers/gpu/drm/msm/sde/sde_splash.c636
-rw-r--r--drivers/gpu/drm/msm/sde/sde_splash.h124
-rw-r--r--drivers/gpu/drm/msm/sde_edid_parser.c33
-rw-r--r--drivers/gpu/drm/msm/sde_edid_parser.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c1
-rw-r--r--drivers/gpu/msm/kgsl.c8
-rw-r--r--drivers/gpu/msm/kgsl_drawobj.c28
-rw-r--r--drivers/gpu/msm/kgsl_pwrctrl.c9
-rw-r--r--drivers/gpu/msm/kgsl_pwrscale.c20
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c9
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hwtracing/coresight/coresight-remote-etm.c3
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.c36
-rw-r--r--drivers/i2c/busses/i2c-msm-v2.c19
-rw-r--r--drivers/iio/adc/qcom-rradc.c4
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h7
-rw-r--r--drivers/iommu/amd_iommu_v2.c2
-rw-r--r--drivers/iommu/intel-iommu.c2
-rw-r--r--drivers/iommu/iommu.c37
-rw-r--r--drivers/md/dm-android-verity.c2
-rw-r--r--drivers/media/platform/msm/ais/ispif/msm_ispif.c5
-rw-r--r--drivers/media/platform/msm/ais/msm.c3
-rw-r--r--drivers/media/platform/msm/ais/pproc/cpp/msm_cpp.c21
-rw-r--r--drivers/media/platform/msm/ais/sensor/actuator/msm_actuator.c4
-rw-r--r--drivers/media/platform/msm/ais/sensor/flash/msm_flash.c58
-rw-r--r--drivers/media/platform/msm/ais/sensor/ois/msm_ois.c3
-rw-r--r--drivers/media/platform/msm/camera_v2/common/msm_camera_io_util.c40
-rw-r--r--drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.c6
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp.h1
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp47.c9
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c64
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.h2
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c3
-rw-r--r--drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c3
-rw-r--r--drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_hw.c35
-rw-r--r--drivers/media/platform/msm/camera_v2/msm.c11
-rw-r--r--drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.c85
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.c4
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.c4
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.c46
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_dt_util.c25
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c3
-rw-r--r--drivers/media/platform/msm/vidc/msm_vdec.c48
-rw-r--r--drivers/media/platform/msm/vidc/msm_venc.c2
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc.c16
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc_common.c57
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc_common.h6
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc_resources.h1
-rw-r--r--drivers/media/platform/msm/vidc/venus_hfi.c41
-rw-r--r--drivers/misc/qseecom.c132
-rw-r--r--drivers/mmc/core/core.c6
-rw-r--r--drivers/mmc/core/mmc.c8
-rw-r--r--drivers/mmc/host/sdhci-msm.c21
-rw-r--r--drivers/mmc/host/sdhci.c2
-rw-r--r--drivers/mmc/host/sdhci.h1
-rw-r--r--drivers/mtd/bcm47xxpart.c42
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c7
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c4
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c13
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c7
-rw-r--r--drivers/net/ethernet/korina.c8
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c14
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c24
-rw-r--r--drivers/net/ethernet/sfc/falcon.c10
-rw-r--r--drivers/net/phy/marvell.c2
-rw-r--r--drivers/net/phy/mdio-bcm-iproc.c6
-rw-r--r--drivers/net/virtio_net.c10
-rw-r--r--drivers/net/vxlan.c2
-rw-r--r--drivers/net/wireless/Kconfig9
-rw-r--r--drivers/net/wireless/Makefile1
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c12
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c107
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-ops.h12
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c38
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c18
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h7
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/sysfs.c3
-rw-r--r--drivers/net/wireless/cnss/Kconfig5
-rw-r--r--drivers/net/wireless/cnss2/Kconfig17
-rw-r--r--drivers/net/wireless/cnss2/Makefile9
-rw-r--r--drivers/net/wireless/cnss2/debug.c174
-rw-r--r--drivers/net/wireless/cnss2/debug.h73
-rw-r--r--drivers/net/wireless/cnss2/main.c2360
-rw-r--r--drivers/net/wireless/cnss2/main.h225
-rw-r--r--drivers/net/wireless/cnss2/pci.c1612
-rw-r--r--drivers/net/wireless/cnss2/pci.h141
-rw-r--r--drivers/net/wireless/cnss2/power.c386
-rw-r--r--drivers/net/wireless/cnss2/qmi.c1037
-rw-r--r--drivers/net/wireless/cnss2/qmi.h41
-rw-r--r--drivers/net/wireless/cnss2/utils.c129
-rw-r--r--drivers/net/wireless/cnss2/wlan_firmware_service_v01.c2221
-rw-r--r--drivers/net/wireless/cnss2/wlan_firmware_service_v01.h657
-rw-r--r--drivers/net/wireless/cnss_utils/cnss_utils.c2
-rw-r--r--drivers/net/wireless/wcnss/wcnss_wlan.c2
-rw-r--r--drivers/net/xen-netfront.c2
-rw-r--r--drivers/nvme/host/nvme.h7
-rw-r--r--drivers/nvme/host/pci.c5
-rw-r--r--drivers/of/fdt.c9
-rw-r--r--drivers/platform/msm/Kconfig2
-rw-r--r--drivers/platform/msm/ipa/ipa_common_i.h5
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa.c30
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_flt.c64
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c91
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_i.h30
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_intf.c20
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_nat.c70
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_rt.c87
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c4
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_utils.c86
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa.c68
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_client.c6
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_dp.c15
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_flt.c85
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c89
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_i.h29
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_intf.c28
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c5
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_nat.c74
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c3
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_rt.c121
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c4
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_utils.c34
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c16
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h9
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h3
-rw-r--r--drivers/platform/msm/mhi_uci/mhi_uci.c113
-rw-r--r--drivers/platform/x86/ideapad-laptop.c1
-rw-r--r--drivers/power/qcom/msm-core.c4
-rw-r--r--drivers/power/reset/msm-poweroff.c1
-rw-r--r--drivers/power/supply/qcom/Makefile2
-rw-r--r--drivers/power/supply/qcom/qpnp-fg-gen3.c9
-rw-r--r--drivers/power/supply/qcom/qpnp-smb2.c260
-rw-r--r--drivers/power/supply/qcom/smb-lib.c615
-rw-r--r--drivers/power/supply/qcom/smb-lib.h15
-rw-r--r--drivers/power/supply/qcom/smb138x-charger.c7
-rw-r--r--drivers/power/supply/qcom/step-chg-jeita.c272
-rw-r--r--drivers/power/supply/qcom/step-chg-jeita.h17
-rw-r--r--drivers/pwm/pwm-qpnp.c46
-rw-r--r--drivers/regulator/core.c12
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c2
-rw-r--r--drivers/scsi/sd.c3
-rw-r--r--drivers/scsi/ufs/ufshcd.c14
-rw-r--r--drivers/scsi/ufs/ufshcd.h2
-rw-r--r--drivers/scsi/virtio_scsi.c11
-rw-r--r--drivers/soc/Makefile1
-rw-r--r--drivers/soc/qcom/Kconfig4
-rw-r--r--drivers/soc/qcom/Makefile1
-rw-r--r--drivers/soc/qcom/icnss.c206
-rw-r--r--drivers/soc/qcom/ipc_router_mhi_xprt.c94
-rw-r--r--drivers/soc/qcom/scm_qcpe.c1137
-rw-r--r--drivers/soc/qcom/service-notifier.c58
-rw-r--r--drivers/soc/qcom/spcom.c54
-rw-r--r--drivers/soc/qcom/watchdog_v2.c11
-rw-r--r--drivers/spi/spi-davinci.c4
-rw-r--r--drivers/staging/android/sync_debug.c2
-rw-r--r--drivers/target/iscsi/iscsi_target.c12
-rw-r--r--drivers/target/target_core_internal.h2
-rw-r--r--drivers/target/target_core_tmr.c16
-rw-r--r--drivers/target/target_core_transport.c9
-rw-r--r--drivers/thermal/msm_thermal.c2
-rw-r--r--drivers/tty/serial/msm_serial_hs.c2
-rw-r--r--drivers/usb/dwc3/core.h3
-rw-r--r--drivers/usb/dwc3/dwc3-msm.c128
-rw-r--r--drivers/usb/dwc3/gadget.c50
-rw-r--r--drivers/usb/gadget/composite.c3
-rw-r--r--drivers/usb/gadget/function/f_accessory.c31
-rw-r--r--drivers/usb/gadget/function/f_qdss.c140
-rw-r--r--drivers/usb/gadget/function/f_qdss.h4
-rw-r--r--drivers/usb/pd/policy_engine.c23
-rw-r--r--drivers/usb/phy/phy-msm-qusb-v2.c15
-rw-r--r--drivers/usb/usbip/vhci_hcd.c11
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c4
-rw-r--r--drivers/video/fbdev/msm/mdp3_ppp.c7
-rw-r--r--drivers/video/fbdev/msm/mdss.h1
-rw-r--r--drivers/video/fbdev/msm/mdss_dba_utils.c3
-rw-r--r--drivers/video/fbdev/msm/mdss_dp.c35
-rw-r--r--drivers/video/fbdev/msm/mdss_dp_aux.c10
-rw-r--r--drivers/video/fbdev/msm/mdss_dp_hdcp2p2.c12
-rw-r--r--drivers/video/fbdev/msm/mdss_dsi.c9
-rw-r--r--drivers/video/fbdev/msm/mdss_hdmi_edid.c33
-rw-r--r--drivers/video/fbdev/msm/mdss_hdmi_edid.h6
-rw-r--r--drivers/video/fbdev/msm/mdss_hdmi_tx.c14
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp.c9
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_ctl.c25
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_debug.c15
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_intf_video.c9
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_layer.c8
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_overlay.c5
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_pp_cache_config.c34
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_pp_v1_7.c34
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_pp_v3.c11
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_splash_logo.c5
-rw-r--r--drivers/video/fbdev/msm/mdss_rotator.c12
-rw-r--r--drivers/video/fbdev/msm/msm_mdss_io_8974.c3
-rw-r--r--drivers/watchdog/bcm_kona_wdt.c3
-rw-r--r--drivers/xen/swiotlb-xen.c5
-rw-r--r--fs/Kconfig5
-rw-r--r--fs/autofs4/dev-ioctl.c2
-rw-r--r--fs/binfmt_elf.c1
-rw-r--r--fs/btrfs/inode.c13
-rw-r--r--fs/cifs/smb1ops.c9
-rw-r--r--fs/cifs/smb2ops.c4
-rw-r--r--fs/coredump.c18
-rw-r--r--fs/dcache.c5
-rw-r--r--fs/exec.c28
-rw-r--r--fs/file_table.c137
-rw-r--r--fs/internal.h26
-rw-r--r--fs/namei.c2
-rw-r--r--fs/namespace.c2
-rw-r--r--fs/nfs/nfs4proc.c2
-rw-r--r--fs/ocfs2/cluster/heartbeat.c8
-rw-r--r--fs/squashfs/block.c15
-rw-r--r--include/dt-bindings/clock/msm-clocks-8998.h10
-rw-r--r--include/linux/coredump.h1
-rw-r--r--include/linux/fs.h4
-rw-r--r--include/linux/list_lru.h1
-rw-r--r--include/linux/migrate.h6
-rw-r--r--include/linux/mm.h1
-rw-r--r--include/linux/mmzone.h3
-rw-r--r--include/linux/page_ext.h5
-rw-r--r--include/linux/page_idle.h43
-rw-r--r--include/linux/page_owner.h58
-rw-r--r--include/linux/stackdepot.h32
-rw-r--r--include/linux/timekeeper_internal.h1
-rw-r--r--include/linux/usb/usb_qdss.h5
-rw-r--r--include/net/cnss2.h192
-rw-r--r--include/net/xfrm.h10
-rw-r--r--include/soc/qcom/qseecomi.h5
-rw-r--r--include/soc/qcom/scm.h4
-rw-r--r--include/sound/apr_audio-v2.h156
-rw-r--r--include/sound/q6adm-v2.h18
-rw-r--r--include/sound/q6asm-v2.h8
-rw-r--r--include/uapi/drm/msm_drm.h15
-rw-r--r--include/uapi/linux/msm_ipa.h199
-rw-r--r--include/uapi/media/msm_camsensor_sdk.h2
-rw-r--r--include/uapi/sound/devdep_params.h12
-rw-r--r--kernel/cpu.c19
-rw-r--r--kernel/irq/proc.c5
-rw-r--r--kernel/locking/rwsem-xadd.c35
-rw-r--r--kernel/panic.c2
-rw-r--r--kernel/sched/loadavg.c4
-rw-r--r--kernel/signal.c20
-rw-r--r--kernel/time/timekeeping.c47
-rw-r--r--lib/Kconfig4
-rw-r--r--lib/Kconfig.debug1
-rw-r--r--lib/Makefile2
-rw-r--r--lib/cmdline.c6
-rw-r--r--lib/stackdepot.c284
-rw-r--r--lib/swiotlb.c6
-rw-r--r--mm/compaction.c44
-rw-r--r--mm/debug.c13
-rw-r--r--mm/huge_memory.c6
-rw-r--r--mm/internal.h2
-rw-r--r--mm/list_lru.c14
-rw-r--r--mm/migrate.c12
-rw-r--r--mm/page_alloc.c83
-rw-r--r--mm/page_isolation.c9
-rw-r--r--mm/page_owner.c263
-rw-r--r--mm/swap_cgroup.c2
-rw-r--r--mm/vmstat.c17
-rw-r--r--net/8021q/vlan.c3
-rw-r--r--net/caif/cfpkt_skbuff.c6
-rw-r--r--net/core/dev.c10
-rw-r--r--net/core/dst.c14
-rw-r--r--net/core/rtnetlink.c5
-rw-r--r--net/decnet/dn_route.c14
-rw-r--r--net/decnet/netfilter/dn_rtmsg.c4
-rw-r--r--net/dsa/slave.c6
-rw-r--r--net/ipv4/igmp.c22
-rw-r--r--net/ipv6/addrconf.c6
-rw-r--r--net/ipv6/datagram.c5
-rw-r--r--net/ipv6/fib6_rules.c22
-rw-r--r--net/ipv6/ip6_fib.c3
-rw-r--r--net/ipv6/ip6_output.c6
-rw-r--r--net/ipv6/udp.c24
-rw-r--r--net/key/af_key.c17
-rw-r--r--net/mac80211/main.c13
-rw-r--r--net/netfilter/nf_conntrack_netlink.c4
-rw-r--r--net/netfilter/xt_TCPMSS.c6
-rw-r--r--net/rxrpc/ar-key.c64
-rw-r--r--net/sctp/socket.c2
-rw-r--r--net/unix/af_unix.c7
-rw-r--r--net/wireless/db.txt2
-rw-r--r--net/xfrm/xfrm_policy.c47
-rw-r--r--security/pfe/pfk_kc.c61
-rw-r--r--security/selinux/hooks.c1
-rw-r--r--sound/pci/hda/hda_codec.h2
-rw-r--r--sound/pci/hda/hda_controller.c8
-rw-r--r--sound/pci/hda/hda_generic.c1
-rw-r--r--sound/soc/codecs/msm_sdw/msm_sdw_cdc.c13
-rw-r--r--sound/soc/codecs/wcd-mbhc-v2.c3
-rw-r--r--sound/soc/codecs/wcd9335.c4
-rw-r--r--sound/soc/codecs/wsa881x.c1
-rw-r--r--sound/soc/msm/apq8096-auto.c17
-rw-r--r--sound/soc/msm/msm-dai-fe.c148
-rw-r--r--sound/soc/msm/msm8998.c112
-rw-r--r--sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c57
-rw-r--r--sound/soc/msm/qdsp6v2/msm-pcm-q6-noirq.c150
-rw-r--r--sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c264
-rw-r--r--sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c726
-rw-r--r--sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h31
-rw-r--r--sound/soc/msm/qdsp6v2/msm-qti-pp-config.c10
-rw-r--r--sound/soc/msm/qdsp6v2/msm-transcode-loopback-q6-v2.c14
-rw-r--r--sound/soc/msm/qdsp6v2/q6adm.c114
-rw-r--r--sound/soc/msm/qdsp6v2/q6asm.c320
-rw-r--r--tools/perf/util/probe-finder.c10
-rw-r--r--tools/vm/page_owner_sort.c9
446 files changed, 21046 insertions, 3201 deletions
diff --git a/Documentation/devicetree/bindings/arm/msm/diag_mhi.txt b/Documentation/devicetree/bindings/arm/msm/diag_mhi.txt
new file mode 100644
index 000000000000..02f5103d5563
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/diag_mhi.txt
@@ -0,0 +1,12 @@
+Qualcomm Technologies, Inc. Diag MHI Driver
+
+Required properties:
+-compatible : should be "qcom,diag-mhi".
+-qcom,mhi : phandle of MHI Device to connect to.
+
+Example:
+ qcom,diag {
+ compatible = "qcom,diag-mhi";
+ qcom,mhi = <&mhi_wlan>;
+ };
+
diff --git a/Documentation/devicetree/bindings/arm/msm/msm.txt b/Documentation/devicetree/bindings/arm/msm/msm.txt
index d442cf02a816..41cbb91351bb 100644
--- a/Documentation/devicetree/bindings/arm/msm/msm.txt
+++ b/Documentation/devicetree/bindings/arm/msm/msm.txt
@@ -86,6 +86,9 @@ SoCs:
- MSM8998
compatible = "qcom,msm8998"
+- MSM8998_9x55
+ compatible = "qcom,msm8998-9x55"
+
- MSMHAMSTER
compatible = "qcom,msmhamster"
@@ -270,6 +273,8 @@ compatible = "qcom,msm8998-rumi"
compatible = "qcom,msm8998-cdp"
compatible = "qcom,msm8998-mtp"
compatible = "qcom,msm8998-qrd"
+compatible = "qcom,msm8998-9x55-cdp"
+compatible = "qcom,msm8998-9x55-mtp"
compatible = "qcom,msmhamster-rumi"
compatible = "qcom,msmhamster-cdp"
compatible = "qcom,msmhamster-mtp"
diff --git a/Documentation/devicetree/bindings/arm/msm/msm_ipc_router_mhi_xprt.txt b/Documentation/devicetree/bindings/arm/msm/msm_ipc_router_mhi_xprt.txt
index de5ab2c37967..2ef119e74bda 100644
--- a/Documentation/devicetree/bindings/arm/msm/msm_ipc_router_mhi_xprt.txt
+++ b/Documentation/devicetree/bindings/arm/msm/msm_ipc_router_mhi_xprt.txt
@@ -1,18 +1,20 @@
Qualcomm Technologies, Inc. IPC Router MHI Transport
Required properties:
--compatible: should be "qcom,ipc_router_mhi_xprt"
--qcom,out-chan-id: MHI Channel ID for the transmit path
--qcom,in-chan-id: MHI Channel ID for the receive path
--qcom,xprt-remote: string that defines the edge of the transport (PIL Name)
+-compatible: should be "qcom,ipc_router_mhi_xprt".
+-qcom,mhi: phandle of MHI Device to connect to.
+-qcom,out-chan-id: MHI Channel ID for the transmit path.
+-qcom,in-chan-id: MHI Channel ID for the receive path.
+-qcom,xprt-remote: string that defines the edge of the transport(PIL Name).
-qcom,xprt-linkid: unique integer to identify the tier to which the link
belongs to in the network and is used to avoid the
- routing loops while forwarding the broadcast messages
--qcom,xprt-version: unique version ID used by MHI transport header
+ routing loops while forwarding the broadcast messages.
+-qcom,xprt-version: unique version ID used by MHI transport header.
Example:
qcom,ipc_router_external_modem_xprt2 {
compatible = "qcom,ipc_router_mhi_xprt";
+ qcom,mhi = <&mhi_wlan>;
qcom,out-chan-id = <34>;
qcom,in-chan-id = <35>;
qcom,xprt-remote = "external-modem";
diff --git a/Documentation/devicetree/bindings/arm/msm/rdbg-smp2p.txt b/Documentation/devicetree/bindings/arm/msm/rdbg-smp2p.txt
index ce2d8bd54e43..1114308f9436 100644
--- a/Documentation/devicetree/bindings/arm/msm/rdbg-smp2p.txt
+++ b/Documentation/devicetree/bindings/arm/msm/rdbg-smp2p.txt
@@ -2,12 +2,15 @@ Qualcomm Technologies, Inc. Remote Debugger (RDBG) driver
Required properties:
-compatible : Should be one of
- To communicate with modem
+ To communicate with adsp
qcom,smp2pgpio_client_rdbg_2_in (inbound)
qcom,smp2pgpio_client_rdbg_2_out (outbound)
To communicate with modem
qcom,smp2pgpio_client_rdbg_1_in (inbound)
qcom,smp2pgpio_client_rdbg_1_out (outbound)
+ To communicate with cdsp
+ qcom,smp2pgpio_client_rdbg_5_in (inbound)
+ qcom,smp2pgpio_client_rdbg_5_out (outbound)
-gpios : the relevant gpio pins of the entry.
Example:
diff --git a/Documentation/devicetree/bindings/cnss/cnss-wlan.txt b/Documentation/devicetree/bindings/cnss/cnss-wlan.txt
index 6d63d1123f4c..6aa3bfe4b1d8 100644
--- a/Documentation/devicetree/bindings/cnss/cnss-wlan.txt
+++ b/Documentation/devicetree/bindings/cnss/cnss-wlan.txt
@@ -11,8 +11,9 @@ the WLAN enable GPIO, 3.3V fixed voltage regulator resources. It also
provides the reserved RAM dump memory location and size.
Required properties:
- - compatible: "qcom,cnss"
- - wlan-en-gpio: WLAN_EN GPIO signal specified by QCA6174 specifications
+ - compatible: "qcom,cnss" for QCA6174 device
+ "qcom,cnss-qca6290" for QCA6290 device
+ - wlan-en-gpio: WLAN_EN GPIO signal specified by the chip specifications
- vdd-wlan-supply: phandle to the regulator device tree node
- pinctrl-names: Names corresponding to the numbered pinctrl states
- pinctrl-<n>: Pinctrl states as described in
@@ -44,6 +45,13 @@ Optional properties:
which should be drived depending on platforms
- qcom,is-dual-wifi-enabled: Boolean property to control wlan enable(wlan-en)
gpio on dual-wifi platforms.
+ - vdd-wlan-en-supply: WLAN_EN fixed regulator specified by QCA6174 specifications.
+ - qcom,wlan-en-vreg-support: Boolean property to decide the whether the WLAN_EN pin
+ is a gpio or fixed regulator.
+ - qcom,mhi: phandle to indicate the device which needs MHI support.
+ - qcom,cap-tsf-gpio: WLAN_TSF_CAPTURED GPIO signal specified by the chip
+ specifications, should be drived depending on
+ products
Example:
@@ -60,4 +68,6 @@ Example:
pinctrl-0 = <&cnss_default>;
qcom,wlan-rc-num = <0>;
qcom,wlan-smmu-iova-address = <0 0x10000000>;
+ qcom,mhi = <&mhi_wlan>;
+ qcom,cap-tsf-gpio = <&tlmm 126 1>;
};
diff --git a/Documentation/devicetree/bindings/cnss/icnss.txt b/Documentation/devicetree/bindings/cnss/icnss.txt
index c801e8486f87..700a8f7b077e 100644
--- a/Documentation/devicetree/bindings/cnss/icnss.txt
+++ b/Documentation/devicetree/bindings/cnss/icnss.txt
@@ -28,6 +28,7 @@ Optional properties:
- qcom,icnss-vadc: VADC handle for vph_pwr read APIs.
- qcom,icnss-adc_tm: VADC handle for vph_pwr notification APIs.
- qcom,smmu-s1-bypass: Boolean context flag to set SMMU to S1 bypass
+ - qcom,wlan-msa-fixed-region: phandle, specifier pairs to children of /reserved-memory
Example:
@@ -54,6 +55,7 @@ Example:
<0 140 0 /* CE10 */ >,
<0 141 0 /* CE11 */ >;
qcom,wlan-msa-memory = <0x200000>;
+ qcom,wlan-msa-fixed-region = <&wlan_msa_mem>;
qcom,smmu-s1-bypass;
vdd-0.8-cx-mx-supply = <&pm8998_l5>;
qcom,vdd-0.8-cx-mx-config = <800000 800000 2400 1000>;
diff --git a/Documentation/devicetree/bindings/drm/msm/mdp.txt b/Documentation/devicetree/bindings/drm/msm/mdp.txt
index 3a6db0553fe3..a76b604445bd 100644
--- a/Documentation/devicetree/bindings/drm/msm/mdp.txt
+++ b/Documentation/devicetree/bindings/drm/msm/mdp.txt
@@ -3,6 +3,7 @@ Qualcomm Technologies,Inc. Adreno/Snapdragon display controller
Required properties:
Optional properties:
+- contiguous-region: reserved memory for HDMI and DSI buffer.
- qcom,sde-plane-id-map: plane id mapping for virtual plane.
- qcom,sde-plane-id: each virtual plane mapping node.
- reg: reg property.
@@ -17,6 +18,8 @@ Optional properties:
Example:
&mdss_mdp {
+ contiguous-region = <&cont_splash_mem &cont_splash_mem_hdmi>;
+
qcom,sde-plane-id-map {
qcom,sde-plane-id@0 {
reg = <0x0>;
diff --git a/Documentation/devicetree/bindings/media/video/msm-cci.txt b/Documentation/devicetree/bindings/media/video/msm-cci.txt
index 9fb84020add7..bb413af4b54d 100644
--- a/Documentation/devicetree/bindings/media/video/msm-cci.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-cci.txt
@@ -123,6 +123,9 @@ Optional properties:
- qcom,gpio-vdig : should contain index to gpio used by sensors digital vreg enable
- qcom,gpio-vaf : should contain index to gpio used by sensors af vreg enable
- qcom,gpio-af-pwdm : should contain index to gpio used by sensors af pwdm_n
+- qcom,gpio-custom1 : should contain index to gpio used by sensors specific to usecase
+- qcom,gpio-custom2 : should contain index to gpio used by sensors specific to usecase
+- qcom,gpio-custom3 : should contain index to gpio used by sensors specific to usecase
- qcom,gpio-req-tbl-num : should contain index to gpios specific to this sensor
- qcom,gpio-req-tbl-flags : should contain direction of gpios present in
qcom,gpio-req-tbl-num property (in the same order)
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,msm8998-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,msm8998-pinctrl.txt
new file mode 100644
index 000000000000..8a9514153172
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,msm8998-pinctrl.txt
@@ -0,0 +1,201 @@
+Qualcomm Technologies, Inc. MSM8998 TLMM block
+
+This binding describes the Top Level Mode Multiplexer block found in the
+MSM8998 platform. With new GPIOs tiling, GPIO pins are
+grouped into various cores - NORTH, WEST, EAST. TLMM_GPIO_ID_STATUSn
+register's value for a GPIO pin decides the core location for it.
+
+- compatible:
+ Usage: required
+ Value type: <string>
+ Definition: must be "qcom,msm8998-pinctrl"
+
+- reg:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: the base address and size of the TLMM register space.
+
+- interrupts:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: should specify the TLMM summary IRQ.
+
+- interrupt-controller:
+ Usage: required
+ Value type: <none>
+ Definition: identifies this node as an interrupt controller
+
+- #interrupt-cells:
+ Usage: required
+ Value type: <u32>
+ Definition: must be 2. Specifying the pin number and flags, as defined
+ in <dt-bindings/interrupt-controller/irq.h>
+
+- gpio-controller:
+ Usage: required
+ Value type: <none>
+ Definition: identifies this node as a gpio controller
+
+- #gpio-cells:
+ Usage: required
+ Value type: <u32>
+ Definition: must be 2. Specifying the pin number and flags, as defined
+ in <dt-bindings/gpio/gpio.h>
+
+Please refer to ../gpio/gpio.txt and ../interrupt-controller/interrupts.txt for
+a general description of GPIO and interrupt bindings.
+
+Please refer to pinctrl-bindings.txt in this directory for details of the
+common pinctrl bindings used by client devices, including the meaning of the
+phrase "pin configuration node".
+
+The pin configuration nodes act as a container for an arbitrary number of
+subnodes. Each of these subnodes represents some desired configuration for a
+pin, a group, or a list of pins or groups. This configuration can include the
+mux function to select on those pin(s)/group(s), and various pin configuration
+parameters, such as pull-up, drive strength, etc.
+
+
+PIN CONFIGURATION NODES:
+
+The name of each subnode is not important; all subnodes should be enumerated
+and processed purely based on their content.
+
+Each subnode only affects those parameters that are explicitly listed. In
+other words, a subnode that lists a mux function but no pin configuration
+parameters implies no information about any pin configuration parameters.
+Similarly, a pin subnode that describes a pullup parameter implies no
+information about e.g. the mux function.
+
+
+The following generic properties as defined in pinctrl-bindings.txt are valid
+to specify in a pin configuration subnode:
+
+- pins:
+ Usage: required
+ Value type: <string-array>
+ Definition: List of gpio pins affected by the properties specified in
+ this subnode. Valid pins are:
+ gpio0-gpio149,
+ sdc1_clk,
+ sdc1_cmd,
+ sdc1_data
+ sdc2_clk,
+ sdc2_cmd,
+ sdc2_data
+ sdc1_rclk,
+
+- function:
+ Usage: required
+ Value type: <string>
+ Definition: Specify the alternative function to be configured for the
+ specified pins. Functions are only valid for gpio pins.
+ Valid values are:
+
+ blsp_uart1, blsp_spi1, blsp_i2c1, blsp_uim1, atest_tsens,
+ bimc_dte1, dac_calib0, blsp_spi8, blsp_uart8, blsp_uim8,
+ qdss_cti_trig_out_b, bimc_dte0, dac_calib1, qdss_cti_trig_in_b,
+ dac_calib2, atest_tsens2, atest_usb1, blsp_spi10, blsp_uart10,
+ blsp_uim10, atest_bbrx1, atest_usb13, atest_bbrx0, atest_usb12,
+ mdp_vsync, edp_lcd, blsp_i2c10, atest_gpsadc1, atest_usb11,
+ atest_gpsadc0, edp_hot, atest_usb10, m_voc, dac_gpio, atest_char,
+ cam_mclk, pll_bypassnl, qdss_stm7, blsp_i2c8, qdss_tracedata_b,
+ pll_reset, qdss_stm6, qdss_stm5, qdss_stm4, atest_usb2, cci_i2c,
+ qdss_stm3, dac_calib3, atest_usb23, atest_char3, dac_calib4,
+ qdss_stm2, atest_usb22, atest_char2, qdss_stm1, dac_calib5,
+ atest_usb21, atest_char1, dbg_out, qdss_stm0, dac_calib6,
+ atest_usb20, atest_char0, dac_calib10, qdss_stm10,
+ qdss_cti_trig_in_a, cci_timer4, blsp_spi6, blsp_uart6, blsp_uim6,
+ blsp2_spi, qdss_stm9, qdss_cti_trig_out_a, dac_calib11,
+ qdss_stm8, cci_timer0, qdss_stm13, dac_calib7, cci_timer1,
+ qdss_stm12, dac_calib8, cci_timer2, blsp1_spi, qdss_stm11,
+ dac_calib9, cci_timer3, cci_async, dac_calib12, blsp_i2c6,
+ qdss_tracectl_a, dac_calib13, qdss_traceclk_a, dac_calib14,
+ dac_calib15, hdmi_rcv, dac_calib16, hdmi_cec, pwr_modem,
+ dac_calib17, hdmi_ddc, pwr_nav, dac_calib18, pwr_crypto,
+ dac_calib19, hdmi_hot, dac_calib20, dac_calib21, pci_e0,
+ dac_calib22, dac_calib23, dac_calib24, tsif1_sync, dac_calib25,
+ sd_write, tsif1_error, blsp_spi2, blsp_uart2, blsp_uim2,
+ qdss_cti, blsp_i2c2, blsp_spi3, blsp_uart3, blsp_uim3, blsp_i2c3,
+ uim3, blsp_spi9, blsp_uart9, blsp_uim9, blsp10_spi, blsp_i2c9,
+ blsp_spi7, blsp_uart7, blsp_uim7, qdss_tracedata_a, blsp_i2c7,
+ qua_mi2s, gcc_gp1_clk_a, ssc_irq, uim4, blsp_spi11, blsp_uart11,
+ blsp_uim11, gcc_gp2_clk_a, gcc_gp3_clk_a, blsp_i2c11, cri_trng0,
+ cri_trng1, cri_trng, qdss_stm18, pri_mi2s, qdss_stm17, blsp_spi4,
+ blsp_uart4, blsp_uim4, qdss_stm16, qdss_stm15, blsp_i2c4,
+ qdss_stm14, dac_calib26, spkr_i2s, audio_ref, lpass_slimbus,
+ isense_dbg, tsense_pwm1, tsense_pwm2, btfm_slimbus, ter_mi2s,
+ qdss_stm22, qdss_stm21, qdss_stm20, qdss_stm19, gcc_gp1_clk_b,
+ sec_mi2s, blsp_spi5, blsp_uart5, blsp_uim5, gcc_gp2_clk_b,
+ gcc_gp3_clk_b, blsp_i2c5, blsp_spi12, blsp_uart12, blsp_uim12,
+ qdss_stm25, qdss_stm31, blsp_i2c12, qdss_stm30, qdss_stm29,
+ tsif1_clk, qdss_stm28, tsif1_en, tsif1_data, sdc4_cmd, qdss_stm27,
+ qdss_traceclk_b, tsif2_error, sdc43, vfr_1, qdss_stm26, tsif2_clk,
+ sdc4_clk, qdss_stm24, tsif2_en, sdc42, qdss_stm23, qdss_tracectl_b,
+ sd_card, tsif2_data, sdc41, tsif2_sync, sdc40, mdp_vsync_p_b,
+ ldo_en, mdp_vsync_s_b, ldo_update, blsp11_uart_tx_b, blsp11_uart_rx_b,
+ blsp11_i2c_sda_b, prng_rosc, blsp11_i2c_scl_b, uim2, uim1, uim_batt,
+ pci_e2, pa_indicator, adsp_ext, ddr_bist, qdss_tracedata_11,
+ qdss_tracedata_12, modem_tsync, nav_dr, nav_pps, pci_e1, gsm_tx,
+ qspi_cs, ssbi2, ssbi1, mss_lte, qspi_clk, qspi0, qspi1, qspi2, qspi3,
+ gpio
+
+- bias-disable:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins should be configued as no pull.
+
+- bias-pull-down:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins should be configued as pull down.
+
+- bias-pull-up:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins should be configued as pull up.
+
+- output-high:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins are configured in output mode, driven
+ high.
+ Not valid for sdc pins.
+
+- output-low:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins are configured in output mode, driven
+ low.
+ Not valid for sdc pins.
+
+- drive-strength:
+ Usage: optional
+ Value type: <u32>
+ Definition: Selects the drive strength for the specified pins, in mA.
+ Valid values are: 2, 4, 6, 8, 10, 12, 14 and 16
+
+Example:
+
+ tlmm: pinctrl@01010000 {
+ compatible = "qcom,msm8998-pinctrl";
+ reg = <0x01010000 0x300000>;
+ interrupts = <0 208 0>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ uart_console_active: uart_console_active {
+ mux {
+ pins = "gpio4", "gpio5";
+ function = "blsp_uart8";
+ };
+
+ config {
+ pins = "gpio4", "gpio5";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt
index 468db388b0a6..3527779ef93c 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt
+++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt
@@ -85,21 +85,6 @@ Charger specific properties:
maximum charge current in mA for each thermal
level.
-- qcom,step-soc-thresholds
- Usage: optional
- Value type: Array of <u32>
- Definition: Array of SOC threshold values, size of 4. This should be a
- flat array that denotes the percentage ranging from 0 to 100.
- If the array is not present, step charging is disabled.
-
-- qcom,step-current-deltas
- Usage: optional
- Value type: Array of <s32>
- Definition: Array of delta values for charging current, size of 5, with
- FCC as base. This should be a flat array that denotes the
- offset of charging current in uA, from -3100000 to 3200000.
- If the array is not present, step charging is disabled.
-
- io-channels
Usage: optional
Value type: List of <phandle u32>
@@ -182,6 +167,18 @@ Charger specific properties:
Definition: Specifies the deglitch interval for OTG detection.
If the value is not present, 50 msec is used as default.
+- qcom,step-charging-enable
+ Usage: optional
+ Value type: bool
+ Definition: Boolean flag which when present enables step-charging.
+
+- qcom,wd-bark-time-secs
+ Usage: optional
+ Value type: <u32>
+ Definition: WD bark-timeout in seconds. The possible values are
+ 16, 32, 64, 128. If not defined it defaults to 64.
+
+
=============================================
Second Level Nodes - SMB2 Charger Peripherals
=============================================
@@ -217,9 +214,6 @@ pmi8998_charger: qcom,qpnp-smb2 {
dpdm-supply = <&qusb_phy0>;
- qcom,step-soc-thresholds = <60 70 80 90>;
- qcom,step-current-deltas = <500000 250000 150000 0 (-150000)>;
-
qcom,chgr@1000 {
reg = <0x1000 0x100>;
interrupts = <0x2 0x10 0x0 IRQ_TYPE_NONE>,
diff --git a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
index 38e056cdc0ee..db21a2b58c2b 100644
--- a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
+++ b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
@@ -1311,6 +1311,13 @@ Optional properties:
- pinctrl-x: Defines pinctrl state for each pin group.
+ - qcom,msm-cpudai-tdm-clk-attribute: Clock attribute for tdm.
+ 0 - Clk invalid attribute
+ 1 - Clk attribute couple no
+ 2 - Clk attribute couple dividend
+ 3 - Clk attribute couple divisor
+ 4 - Clk attribute invert couple no
+
Example:
qcom,msm-dai-tdm-quat-rx {
diff --git a/Documentation/vm/page_owner.txt b/Documentation/vm/page_owner.txt
index 8f3ce9b3aa11..ffff1439076a 100644
--- a/Documentation/vm/page_owner.txt
+++ b/Documentation/vm/page_owner.txt
@@ -28,10 +28,11 @@ with page owner and page owner is disabled in runtime due to no enabling
boot option, runtime overhead is marginal. If disabled in runtime, it
doesn't require memory to store owner information, so there is no runtime
memory overhead. And, page owner inserts just two unlikely branches into
-the page allocator hotpath and if it returns false then allocation is
-done like as the kernel without page owner. These two unlikely branches
-would not affect to allocation performance. Following is the kernel's
-code size change due to this facility.
+the page allocator hotpath and if not enabled, then allocation is done
+like as the kernel without page owner. These two unlikely branches should
+not affect to allocation performance, especially if the static keys jump
+label patching functionality is available. Following is the kernel's code
+size change due to this facility.
- Without page owner
text data bss dec hex filename
diff --git a/Makefile b/Makefile
index 85f821d8d07c..64e9303560f7 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 4
-SUBLEVEL = 74
+SUBLEVEL = 76
EXTRAVERSION =
NAME = Blurry Fish Butt
@@ -637,6 +637,12 @@ endif
# Tell gcc to never replace conditional load with a non-conditional one
KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
+# check for 'asm goto'
+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y)
+ KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
+ KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
+endif
+
ifdef CONFIG_READABLE_ASM
# Disable optimizations that make assembler listings hard to read.
# reorder blocks reorders the control in the function
@@ -792,12 +798,6 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=date-time)
# use the deterministic mode of AR if available
KBUILD_ARFLAGS := $(call ar-option,D)
-# check for 'asm goto'
-ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y)
- KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
- KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
-endif
-
include scripts/Makefile.kasan
include scripts/Makefile.extrawarn
include scripts/Makefile.ubsan
diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi
index 6f50f672efbd..de8ac998604d 100644
--- a/arch/arm/boot/dts/bcm5301x.dtsi
+++ b/arch/arm/boot/dts/bcm5301x.dtsi
@@ -54,14 +54,14 @@
timer@0200 {
compatible = "arm,cortex-a9-global-timer";
reg = <0x0200 0x100>;
- interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>;
clocks = <&clk_periph>;
};
local-timer@0600 {
compatible = "arm,cortex-a9-twd-timer";
reg = <0x0600 0x100>;
- interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_PPI 13 IRQ_TYPE_EDGE_RISING>;
clocks = <&clk_periph>;
};
diff --git a/arch/arm/boot/dts/qcom/Makefile b/arch/arm/boot/dts/qcom/Makefile
index c938988d6634..297d6535382e 100644
--- a/arch/arm/boot/dts/qcom/Makefile
+++ b/arch/arm/boot/dts/qcom/Makefile
@@ -170,7 +170,10 @@ dtb-$(CONFIG_ARCH_MSM8998) += msm8998-sim.dtb \
apq8098-v2.1-mediabox.dtb \
msm8998-v2.1-interposer-sdm660-cdp.dtb \
msm8998-v2.1-interposer-sdm660-mtp.dtb \
- msm8998-v2.1-interposer-sdm660-qrd.dtb
+ msm8998-v2.1-interposer-sdm660-qrd.dtb \
+ msm8998-9x55-rcm.dtb \
+ msm8998-9x55-cdp.dtb \
+ msm8998-9x55-mtp.dtb
endif
dtb-$(CONFIG_ARCH_MSMHAMSTER) += msmhamster-rumi.dtb
diff --git a/arch/arm/boot/dts/qcom/dsi-adv7533-1024-600p.dtsi b/arch/arm/boot/dts/qcom/dsi-adv7533-1024-600p.dtsi
new file mode 100644
index 000000000000..5e382f307530
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/dsi-adv7533-1024-600p.dtsi
@@ -0,0 +1,75 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&mdss_mdp {
+
+dsi_adv7533_1024_600p: qcom,mdss_dsi_adv7533_1024_600p {
+ label = "adv7533 1024x600p video mode dsi panel";
+ qcom,mdss-dsi-panel-name = "dsi_adv7533_1024_600p";
+ qcom,mdss-dsi-panel-controller = <&mdss_dsi0>;
+ qcom,mdss-dsi-panel-type = "dsi_video_mode";
+ qcom,mdss-dsi-panel-destination = "display_1";
+ qcom,mdss-dsi-panel-framerate = <60>;
+ qcom,mdss-dsi-virtual-channel-id = <0>;
+ qcom,mdss-dsi-stream = <0>;
+ qcom,mdss-dsi-panel-width = <1024>;
+ qcom,mdss-dsi-panel-height = <600>;
+ qcom,mdss-dsi-h-front-porch = <110>;
+ qcom,mdss-dsi-h-back-porch = <220>;
+ qcom,mdss-dsi-h-pulse-width = <40>;
+ qcom,mdss-dsi-h-sync-skew = <0>;
+ qcom,mdss-dsi-v-back-porch = <20>;
+ qcom,mdss-dsi-v-front-porch = <5>;
+ qcom,mdss-dsi-v-pulse-width = <5>;
+ qcom,mdss-dsi-h-left-border = <0>;
+ qcom,mdss-dsi-h-right-border = <0>;
+ qcom,mdss-dsi-v-top-border = <0>;
+ qcom,mdss-dsi-v-bottom-border = <0>;
+ qcom,mdss-dsi-bpp = <24>;
+ qcom,mdss-dsi-underflow-color = <0xff>;
+ qcom,mdss-dsi-border-color = <0>;
+ qcom,mdss-dsi-on-command = [
+ 05 01 00 00 c8 00 02 11 00
+ 05 01 00 00 0a 00 02 29 00];
+ qcom,mdss-dsi-off-command = [05 01 00 00 00 00 02 28 00
+ 05 01 00 00 00 00 02 10 00];
+ qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+ qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+ qcom,mdss-dsi-h-sync-pulse = <1>;
+ qcom,mdss-dsi-traffic-mode = "non_burst_sync_pulse";
+ qcom,mdss-dsi-bllp-eof-power-mode;
+ qcom,mdss-dsi-bllp-power-mode;
+ qcom,mdss-dsi-lane-0-state;
+ qcom,mdss-dsi-lane-1-state;
+ qcom,mdss-dsi-lane-2-state;
+ qcom,mdss-dsi-panel-timings = [
+ AB 1A 10 00 3E 43 16 1E 15 03 04 00];
+ qcom,mdss-dsi-t-clk-post = <0x03>;
+ qcom,mdss-dsi-t-clk-pre = <0x20>;
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,mdss-dsi-dma-trigger = "trigger_sw";
+ qcom,mdss-dsi-mdp-trigger = "none";
+ qcom,mdss-dsi-reset-sequence = <1 20>, <0 1>, <1 20>;
+ qcom,mdss-pan-physical-width-dimension = <160>;
+ qcom,mdss-pan-physical-height-dimension = <90>;
+ qcom,mdss-dsi-force-clock-lane-hs;
+ qcom,mdss-dsi-always-on;
+ qcom,mdss-dsi-panel-timings-phy-v2 = [1c 19 02 03 01 03 04 a0
+ 1c 19 02 03 01 03 04 a0
+ 1c 19 02 03 01 03 04 a0
+ 1c 19 02 03 01 03 04 a0
+ 1c 08 02 03 01 03 04 a0];
+ qcom,dba-panel;
+ qcom,bridge-name = "adv7533";
+ };
+};
diff --git a/arch/arm/boot/dts/qcom/msm-pm660l.dtsi b/arch/arm/boot/dts/qcom/msm-pm660l.dtsi
index 0f18ba5c94c7..9cd117ce4e0c 100644
--- a/arch/arm/boot/dts/qcom/msm-pm660l.dtsi
+++ b/arch/arm/boot/dts/qcom/msm-pm660l.dtsi
@@ -250,9 +250,8 @@
<0xd900 0x100>;
reg-names = "qpnp-wled-ctrl-base",
"qpnp-wled-sink-base";
- interrupts = <0x3 0xd8 0x1 IRQ_TYPE_EDGE_RISING>,
- <0x3 0xd8 0x2 IRQ_TYPE_EDGE_RISING>;
- interrupt-names = "ovp-irq", "sc-irq";
+ interrupts = <0x3 0xd8 0x1 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "ovp-irq";
linux,name = "wled";
linux,default-trigger = "bkl-trigger";
qcom,fdbk-output = "auto";
@@ -268,7 +267,6 @@
qcom,fs-curr-ua = <25000>;
qcom,cons-sync-write-delay-us = <1000>;
qcom,led-strings-list = [00 01 02];
- qcom,en-ext-pfet-sc-pro;
qcom,loop-auto-gm-en;
qcom,pmic-revid = <&pm660l_revid>;
status = "ok";
diff --git a/arch/arm/boot/dts/qcom/msm-rdbg.dtsi b/arch/arm/boot/dts/qcom/msm-rdbg.dtsi
index d0c91f9e72ae..6de1a8e2fb7e 100644
--- a/arch/arm/boot/dts/qcom/msm-rdbg.dtsi
+++ b/arch/arm/boot/dts/qcom/msm-rdbg.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015, 2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -72,4 +72,35 @@
compatible = "qcom,smp2pgpio_client_rdbg_1_out";
gpios = <&smp2pgpio_rdbg_1_out 0 0>;
};
+
+ smp2pgpio_rdbg_5_in: qcom,smp2pgpio-rdbg-5-in {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "rdbg";
+ qcom,remote-pid = <5>;
+ qcom,is-inbound;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ qcom,smp2pgpio_client_rdbg_5_in {
+ compatible = "qcom,smp2pgpio_client_rdbg_5_in";
+ gpios = <&smp2pgpio_rdbg_5_in 0 0>;
+ };
+
+ smp2pgpio_rdbg_5_out: qcom,smp2pgpio-rdbg-5-out {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "rdbg";
+ qcom,remote-pid = <5>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ qcom,smp2pgpio_client_rdbg_5_out {
+ compatible = "qcom,smp2pgpio_client_rdbg_5_out";
+ gpios = <&smp2pgpio_rdbg_5_out 0 0>;
+ };
};
diff --git a/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi b/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi
index 3b55215c7e5d..9dbec1e87fda 100644
--- a/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi
@@ -547,7 +547,7 @@
#include "msm8996-sde-display.dtsi"
-&mdss_mdp {
+&sde_kms {
qcom,mdss-pref-prim-intf = "dsi";
qcom,sde-plane-id-map {
qcom,sde-plane-id@0 {
@@ -869,6 +869,9 @@
qcom,ntn-rst-delay-msec = <100>;
qcom,ntn-rc-num = <1>;
+ qcom,ntn-bus-num = <1>;
+ qcom,ntn-mdio-bus-id = <1>;
+ qcom,ntn-phy-addr = <7>;
qcom,msm-bus,name = "ntn";
qcom,msm-bus,num-cases = <2>;
@@ -1238,7 +1241,7 @@
qcom,vin-sel = <2>; /* 1.8 */
qcom,out-strength = <1>;
qcom,src-sel = <0>; /* GPIO */
- qcom,master-en = <0>; /* Disable GPIO */
+ qcom,master-en = <1>; /* Enable GPIO */
status = "okay";
};
diff --git a/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi b/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi
index 4fe6f0d67fbe..ad14bfd00cd1 100644
--- a/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi
@@ -333,7 +333,7 @@
};
};
-&mdss_mdp {
+&sde_kms {
qcom,mdss-pref-prim-intf = "dsi";
qcom,sde-plane-id-map {
qcom,sde-plane-id@0 {
@@ -635,6 +635,8 @@
qcom,ntn-rst-delay-msec = <100>;
qcom,ntn-rc-num = <1>;
qcom,ntn-bus-num = <1>;
+ qcom,ntn-mdio-bus-id = <1>;
+ qcom,ntn-phy-addr = <7>;
qcom,msm-bus,name = "ntn";
qcom,msm-bus,num-cases = <2>;
@@ -649,6 +651,7 @@
qcom,ntn-rst-delay-msec = <100>;
qcom,ntn-rc-num = <2>;
qcom,ntn-bus-num = <1>;
+ qcom,ntn-mdio-bus-id = <2>;
qcom,msm-bus,name = "ntn";
qcom,msm-bus,num-cases = <2>;
diff --git a/arch/arm/boot/dts/qcom/msm8996-mdss-panels.dtsi b/arch/arm/boot/dts/qcom/msm8996-mdss-panels.dtsi
index 18a0f29e4d8a..94be1082c2be 100644
--- a/arch/arm/boot/dts/qcom/msm8996-mdss-panels.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-mdss-panels.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -24,6 +24,7 @@
#include "dsi-panel-nt35597-dsc-wqxga-cmd.dtsi"
#include "dsi-panel-hx8379a-truly-fwvga-video.dtsi"
#include "dsi-panel-r69007-dualdsi-wqxga-cmd.dtsi"
+#include "dsi-adv7533-1024-600p.dtsi"
#include "dsi-adv7533-720p.dtsi"
#include "dsi-adv7533-1080p.dtsi"
#include "dsi-panel-nt35950-dsc-4k-cmd.dtsi"
diff --git a/arch/arm/boot/dts/qcom/msm8996-mtp.dtsi b/arch/arm/boot/dts/qcom/msm8996-mtp.dtsi
index ab10a71d1fd7..0bd9b02f3d2e 100644
--- a/arch/arm/boot/dts/qcom/msm8996-mtp.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-mtp.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -343,7 +343,7 @@
qcom,mdss-pref-prim-intf = "dsi";
};
-&mdss_hdmi {
+&sde_hdmi {
status = "ok";
};
diff --git a/arch/arm/boot/dts/qcom/msm8996-regulator.dtsi b/arch/arm/boot/dts/qcom/msm8996-regulator.dtsi
index e1921c3baeb3..936dfd4d1cb2 100644
--- a/arch/arm/boot/dts/qcom/msm8996-regulator.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-regulator.dtsi
@@ -1918,6 +1918,13 @@
gpio = <&pm8994_gpios 9 0>;
};
+ wlan_en_vreg: wlan_en_vreg {
+ compatible = "regulator-fixed";
+ regulator-name = "wlan_en_vreg";
+ enable-active-high;
+ gpio = <&pm8994_gpios 8 0>;
+ };
+
hl7509_en_vreg: hl7509_en_vreg {
compatible = "regulator-fixed";
regulator-name = "hl7509_en_vreg";
diff --git a/arch/arm/boot/dts/qcom/msm8996-sde-display.dtsi b/arch/arm/boot/dts/qcom/msm8996-sde-display.dtsi
index 061301f1c479..1c81bc433374 100644
--- a/arch/arm/boot/dts/qcom/msm8996-sde-display.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-sde-display.dtsi
@@ -94,8 +94,8 @@
label = "dsi_dual_sharp_video";
qcom,display-type = "primary";
- qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
- qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
+ qcom,dsi-ctrl = <&sde_dsi0 &sde_dsi1>;
+ qcom,dsi-phy = <&sde_dsi_phy0 &sde_dsi_phy1>;
clocks = <&clock_mmss clk_ext_byte0_clk_src>,
<&clock_mmss clk_ext_pclk0_clk_src>;
clock-names = "src_byte_clk", "src_pixel_clk";
@@ -118,8 +118,8 @@
label = "single_dsi_sim";
qcom,display-type = "primary";
- qcom,dsi-ctrl = <&mdss_dsi0>;
- qcom,dsi-phy = <&mdss_dsi_phy0>;
+ qcom,dsi-ctrl = <&sde_dsi0>;
+ qcom,dsi-phy = <&sde_dsi_phy0>;
clocks = <&clock_mmss clk_ext_byte0_clk_src>,
<&clock_mmss clk_ext_pclk0_clk_src>;
clock-names = "src_byte_clk", "src_pixel_clk";
@@ -140,8 +140,8 @@
label = "single_dsi_toshiba_720p";
qcom,display-type = "primary";
- qcom,dsi-ctrl = <&mdss_dsi0>;
- qcom,dsi-phy = <&mdss_dsi_phy0>;
+ qcom,dsi-ctrl = <&sde_dsi0>;
+ qcom,dsi-phy = <&sde_dsi_phy0>;
clocks = <&clock_mmss clk_ext_byte0_clk_src>,
<&clock_mmss clk_ext_pclk0_clk_src>;
clock-names = "src_byte_clk", "src_pixel_clk";
@@ -161,8 +161,8 @@
label = "single_dsi_jdi_1080p";
qcom,display-type = "primary";
- qcom,dsi-ctrl = <&mdss_dsi0>;
- qcom,dsi-phy = <&mdss_dsi_phy0>;
+ qcom,dsi-ctrl = <&sde_dsi0>;
+ qcom,dsi-phy = <&sde_dsi_phy0>;
clocks = <&clock_mmss clk_ext_byte0_clk_src>,
<&clock_mmss clk_ext_pclk0_clk_src>;
clock-names = "src_byte_clk", "src_pixel_clk";
@@ -180,8 +180,8 @@
label = "single_dsi_sharp_1080p";
qcom,display-type = "primary";
- qcom,dsi-ctrl = <&mdss_dsi0>;
- qcom,dsi-phy = <&mdss_dsi_phy0>;
+ qcom,dsi-ctrl = <&sde_dsi0>;
+ qcom,dsi-phy = <&sde_dsi_phy0>;
clocks = <&clock_mmss clk_ext_byte0_clk_src>,
<&clock_mmss clk_ext_pclk0_clk_src>;
clock-names = "src_byte_clk", "src_pixel_clk";
@@ -209,8 +209,8 @@
qcom,display-type = "primary";
/* dsi1/dsi0 swapped due to IMGSWAP */
- qcom,dsi-ctrl = <&mdss_dsi1 &mdss_dsi0>;
- qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
+ qcom,dsi-ctrl = <&sde_dsi1 &sde_dsi0>;
+ qcom,dsi-phy = <&sde_dsi_phy0 &sde_dsi_phy1>;
clocks = <&clock_mmss clk_ext_byte0_clk_src>,
<&clock_mmss clk_ext_pclk0_clk_src>;
clock-names = "src_byte_clk", "src_pixel_clk";
@@ -231,8 +231,8 @@
label = "dsi_dual_nt35597_video";
qcom,display-type = "primary";
- qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
- qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
+ qcom,dsi-ctrl = <&sde_dsi0 &sde_dsi1>;
+ qcom,dsi-phy = <&sde_dsi_phy0 &sde_dsi_phy1>;
clocks = <&clock_mmss clk_ext_byte0_clk_src>,
<&clock_mmss clk_ext_pclk0_clk_src>;
clock-names = "src_byte_clk", "src_pixel_clk";
@@ -253,8 +253,8 @@
label = "dsi_adv_7533_1";
qcom,display-type = "secondary";
- qcom,dsi-ctrl = <&mdss_dsi0>;
- qcom,dsi-phy = <&mdss_dsi_phy0>;
+ qcom,dsi-ctrl = <&sde_dsi0>;
+ qcom,dsi-phy = <&sde_dsi_phy0>;
clocks = <&clock_mmss clk_ext_byte0_clk_src>,
<&clock_mmss clk_ext_pclk0_clk_src>;
clock-names = "src_byte_clk", "src_pixel_clk";
@@ -269,8 +269,8 @@
label = "dsi_adv_7533_2";
qcom,display-type = "tertiary";
- qcom,dsi-ctrl = <&mdss_dsi1>;
- qcom,dsi-phy = <&mdss_dsi_phy1>;
+ qcom,dsi-ctrl = <&sde_dsi1>;
+ qcom,dsi-phy = <&sde_dsi_phy1>;
clocks = <&clock_mmss clk_ext_byte1_clk_src>,
<&clock_mmss clk_ext_pclk1_clk_src>;
clock-names = "src_byte_clk", "src_pixel_clk";
@@ -297,8 +297,8 @@
};
};
-&mdss_mdp {
- connectors = <&mdss_hdmi &sde_hdmi &dsi_adv_7533_1 &dsi_adv_7533_2>;
+&sde_kms {
+ connectors = <&sde_hdmi_tx &sde_hdmi &dsi_adv_7533_1 &dsi_adv_7533_2>;
};
&dsi_dual_sharp_video {
diff --git a/arch/arm/boot/dts/qcom/msm8996-sde.dtsi b/arch/arm/boot/dts/qcom/msm8996-sde.dtsi
index f0fa5dcb2224..b0688668e667 100644
--- a/arch/arm/boot/dts/qcom/msm8996-sde.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-sde.dtsi
@@ -11,7 +11,7 @@
*/
&soc {
- mdss_mdp: qcom,mdss_mdp@900000 {
+ sde_kms: qcom,sde_kms@900000 {
compatible = "qcom,sde-kms";
reg = <0x00900000 0x90000>,
<0x009b0000 0x1040>,
@@ -20,6 +20,8 @@
"vbif_phys",
"vbif_nrt_phys";
+ contiguous-region = <&cont_splash_mem &cont_splash_mem_hdmi>;
+
/* clock and supply entries */
clocks = <&clock_mmss clk_mdss_ahb_clk>,
<&clock_mmss clk_mdss_axi_clk>,
@@ -180,8 +182,8 @@
};
};
- smmu_mdp_unsec: qcom,smmu_mdp_unsec_cb {
- compatible = "qcom,smmu_mdp_unsec";
+ smmu_kms_unsec: qcom,smmu_kms_unsec_cb {
+ compatible = "qcom,smmu_kms_unsec";
iommus = <&mdp_smmu 0>;
};
@@ -211,7 +213,7 @@
};
};
- mdss_dsi0: qcom,mdss_dsi_ctrl0@994000 {
+ sde_dsi0: qcom,sde_dsi_ctrl0@994000 {
compatible = "qcom,dsi-ctrl-hw-v1.4";
label = "dsi-ctrl-0";
cell-index = <0>;
@@ -246,7 +248,7 @@
<22 512 0 0>,
<22 512 0 1000>;
- interrupt-parent = <&mdss_mdp>;
+ interrupt-parent = <&sde_kms>;
interrupts = <4 0>;
qcom,core-supply-entries {
#address-cells = <1>;
@@ -287,7 +289,7 @@
};
};
- mdss_dsi1: qcom,mdss_dsi_ctrl1@996000 {
+ sde_dsi1: qcom,sde_dsi_ctrl1@996000 {
compatible = "qcom,dsi-ctrl-hw-v1.4";
label = "dsi-ctrl-1";
cell-index = <1>;
@@ -321,7 +323,7 @@
<22 512 0 0>,
<22 512 0 1000>;
- interrupt-parent = <&mdss_mdp>;
+ interrupt-parent = <&sde_kms>;
interrupts = <5 0>;
qcom,core-supply-entries {
#address-cells = <1>;
@@ -361,7 +363,7 @@
};
};
- mdss_dsi_phy0: qcom,mdss_dsi_phy0@994400 {
+ sde_dsi_phy0: qcom,sde_dsi_phy0@994400 {
compatible = "qcom,dsi-phy-v4.0";
label = "dsi-phy-0";
cell-index = <0>;
@@ -420,7 +422,7 @@
};
};
- mdss_dsi_phy1: qcom,mdss_dsi_phy1@996400 {
+ sde_dsi_phy1: qcom,sde_dsi_phy1@996400 {
compatible = "qcom,dsi-phy-v4.0";
label = "dsi-phy-1";
cell-index = <1>;
@@ -479,7 +481,7 @@
};
};
- mdss_hdmi: qcom,hdmi_tx@9a0000 {
+ sde_hdmi_tx: qcom,hdmi_tx_8996@9a0000 {
compatible = "qcom,hdmi-tx-8996";
reg = <0x009a0000 0x50c>,
@@ -499,7 +501,7 @@
"core_clk",
"alt_iface_clk",
"extp_clk";
- interrupt-parent = <&mdss_mdp>;
+ interrupt-parent = <&sde_kms>;
interrupts = <8 0>;
hpd-gdsc-supply = <&gdsc_mdss>;
qcom,hdmi-tx-hpd-gpio = <&pm8994_mpps 4 0>;
@@ -511,23 +513,8 @@
&mdss_hdmi_ddc_suspend
&mdss_hdmi_cec_suspend>;
- hdmi_audio: qcom,msm-hdmi-audio-rx {
+ sde_hdmi_audio: qcom,sde-hdmi-audio-rx {
compatible = "qcom,msm-hdmi-audio-codec-rx";
};
};
};
-
-/* dummy nodes for compatibility with 8996 mdss dtsi */
-&soc {
- mdss_dsi: qcom,mdss_dsi_dummy {
- /* dummy node for backward compatibility */
- };
-
- mdss_hdmi_tx: qcom,mdss_hdmi_tx_dummy {
- /* dummy node for backward compatibility */
- };
-
- mdss_fb2: qcom,mdss_fb2_dummy {
- /* dummy node for backward compatibility */
- };
-};
diff --git a/arch/arm/boot/dts/qcom/msm8996-v2.dtsi b/arch/arm/boot/dts/qcom/msm8996-v2.dtsi
index 9725bc3ee530..698c0193a164 100644
--- a/arch/arm/boot/dts/qcom/msm8996-v2.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-v2.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -480,7 +480,7 @@
gdsc-venus-supply = <&gdsc_venus>;
};
-&mdss_hdmi {
+&sde_hdmi_tx {
hpd-gdsc-venus-supply = <&gdsc_venus>;
};
diff --git a/arch/arm/boot/dts/qcom/msm8996.dtsi b/arch/arm/boot/dts/qcom/msm8996.dtsi
index 80b3437beac6..dcd884b4a745 100644
--- a/arch/arm/boot/dts/qcom/msm8996.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996.dtsi
@@ -242,6 +242,7 @@
#include "msm8996-ion.dtsi"
#include "msm8996-sde.dtsi"
+#include "msm8996-mdss.dtsi"
#include "msm8996-mdss-pll.dtsi"
#include "msm8996-smp2p.dtsi"
#include "msm8996-ipcrouter.dtsi"
diff --git a/arch/arm/boot/dts/qcom/msm8996pro-auto-adp.dts b/arch/arm/boot/dts/qcom/msm8996pro-auto-adp.dts
index 7f6f3d5d4a4c..d2aa5c854c83 100644
--- a/arch/arm/boot/dts/qcom/msm8996pro-auto-adp.dts
+++ b/arch/arm/boot/dts/qcom/msm8996pro-auto-adp.dts
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -22,7 +22,7 @@
model = "Qualcomm Technologies, Inc. MSM 8996pro AUTO ADP";
compatible = "qcom,msm8996-adp", "qcom,msm8996", "qcom,adp";
qcom,msm-id = <315 0x10000>;
- qcom,board-id = <0x02010019 0>;
+ qcom,board-id = <0x02010019 0>, <0x00010001 0>;
};
&spi_9 {
diff --git a/arch/arm/boot/dts/qcom/msm8996pro.dtsi b/arch/arm/boot/dts/qcom/msm8996pro.dtsi
index 252940c9c3e5..b9a2ccb973f2 100644
--- a/arch/arm/boot/dts/qcom/msm8996pro.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996pro.dtsi
@@ -22,7 +22,7 @@
qcom,msm-id = <305 0x10000>;
chosen {
- bootargs = "fpsimd.fpsimd_settings=1 app_setting.use_app_setting=0 app_setting.use_32bit_app_setting_pro=1";
+ bootargs = "lpm_levels.sleep_disabled=1 fpsimd.fpsimd_settings=1 app_setting.use_app_setting=0 app_setting.use_32bit_app_setting_pro=1";
};
};
diff --git a/arch/arm/boot/dts/qcom/msm8996v3-auto.dtsi b/arch/arm/boot/dts/qcom/msm8996v3-auto.dtsi
index 32adb9a36dd4..355062adf7ef 100644
--- a/arch/arm/boot/dts/qcom/msm8996v3-auto.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996v3-auto.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016 - 2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -167,3 +167,40 @@
< 560000000 7 >,
< 624000000 7 >;
};
+
+&soc {
+ ipa_hw: qcom,ipa@680000 {
+ compatible = "qcom,ipa";
+ reg = <0x680000 0x4effc>,
+ <0x684000 0x26934>;
+ reg-names = "ipa-base", "bam-base";
+ interrupts = <0 333 0>,
+ <0 432 0>;
+ interrupt-names = "ipa-irq", "bam-irq";
+ qcom,ipa-hw-ver = <5>; /* IPA core version = IPAv2.5 */
+ qcom,ipa-hw-mode = <0>;
+ qcom,ee = <0>;
+ qcom,use-ipa-tethering-bridge;
+ qcom,ipa-bam-remote-mode;
+ qcom,modem-cfg-emb-pipe-flt;
+ clocks = <&clock_gcc clk_ipa_clk>;
+ clock-names = "core_clk";
+ qcom,use-dma-zone;
+ qcom,msm-bus,name = "ipa";
+ qcom,msm-bus,num-cases = <3>;
+ qcom,msm-bus,num-paths = <2>;
+ qcom,msm-bus,vectors-KBps =
+ <90 512 0 0>, <90 585 0 0>, /* No vote */
+ <90 512 80000 640000>, <90 585 80000 640000>, /* SVS */
+ <90 512 206000 960000>, <90 585 206000 960000>; /* PERF */
+ qcom,bus-vector-names = "MIN", "SVS", "PERF";
+ };
+
+ qcom,rmnet-ipa {
+ compatible = "qcom,rmnet-ipa";
+ qcom,rmnet-ipa-ssr;
+ qcom,ipa-loaduC;
+ qcom,ipa-advertise-sg-support;
+ };
+};
+
diff --git a/arch/arm/boot/dts/qcom/msm8998-9x55-cdp.dts b/arch/arm/boot/dts/qcom/msm8998-9x55-cdp.dts
new file mode 100644
index 000000000000..cf167897bb89
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/msm8998-9x55-cdp.dts
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "msm8998-9x55.dtsi"
+#include "msm8998-mdss-panels.dtsi"
+#include "msm8998-cdp.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. MSM8998-9x55 CDP";
+ compatible = "qcom,msm8998-9x55-cdp", "qcom,msm8998-9x55", "qcom,cdp";
+ qcom,board-id= <1 2>;
+};
diff --git a/arch/arm/boot/dts/qcom/msm8998-9x55-mtp.dts b/arch/arm/boot/dts/qcom/msm8998-9x55-mtp.dts
new file mode 100644
index 000000000000..a95e9e4f272f
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/msm8998-9x55-mtp.dts
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "msm8998-9x55.dtsi"
+#include "msm8998-mdss-panels.dtsi"
+#include "msm8998-mtp.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. MSM8998-9x55 MTP";
+ compatible = "qcom,msm8998-9x55-mtp", "qcom,msm8998-9x55", "qcom,mtp";
+ qcom,board-id= <8 6>;
+};
diff --git a/arch/arm/boot/dts/qcom/msm8998-9x55-rcm.dts b/arch/arm/boot/dts/qcom/msm8998-9x55-rcm.dts
new file mode 100644
index 000000000000..094ecbc50061
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/msm8998-9x55-rcm.dts
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "msm8998-9x55.dtsi"
+#include "msm8998-mdss-panels.dtsi"
+#include "msm8998-cdp.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. MSM8998-9x55 RCM";
+ compatible = "qcom,msm8998-9x55-cdp", "qcom,msm8998-9x55", "qcom,cdp";
+ qcom,board-id= <0x21 2>;
+};
diff --git a/arch/arm/boot/dts/qcom/msm8998-9x55.dtsi b/arch/arm/boot/dts/qcom/msm8998-9x55.dtsi
new file mode 100644
index 000000000000..be947507e398
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/msm8998-9x55.dtsi
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+#include "skeleton64.dtsi"
+#include "msm8998-v2.1.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. MSM8998-9x55";
+ compatible = "qcom,msm8998-9x55";
+ qcom,msm-id = <292 0x0>;
+ interrupt-parent = <&intc>;
+
+ soc: soc { };
+};
diff --git a/arch/arm/boot/dts/qcom/msm8998-camera-sensor-qrd-vr1.dtsi b/arch/arm/boot/dts/qcom/msm8998-camera-sensor-qrd-vr1.dtsi
index 14567c3b5010..2af3bf277096 100644
--- a/arch/arm/boot/dts/qcom/msm8998-camera-sensor-qrd-vr1.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998-camera-sensor-qrd-vr1.dtsi
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -345,6 +345,60 @@
clock-names = "cam_src_clk", "cam_clk";
qcom,clock-rates = <24000000 0>;
};
+
+ /* ToF Camera*/
+ qcom,camera@3 {
+ cell-index = <3>;
+ compatible = "qcom,camera";
+ reg = <0x3>;
+ qcom,csiphy-sd-index = <1>;
+ qcom,csid-sd-index = <3>;
+ qcom,mount-angle = <90>;
+ cam_vio-supply = <&pm8998_lvs1>;
+ qcom,cam-vreg-name = "cam_vio";
+ qcom,cam-vreg-min-voltage = <1800000>;
+ qcom,cam-vreg-max-voltage = <1800000>;
+ qcom,cam-vreg-op-mode = <80000>;
+ qcom,gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk3_active
+ &cam_sensor_depth_v1_active
+ &cam_sensor_depth_v2_active
+ &cam_sensor_depth_default>;
+ pinctrl-1 = <&cam_sensor_mclk3_suspend
+ &cam_sensor_depth_v1_sleep
+ &cam_sensor_depth_v2_sleep
+ &cam_sensor_depth_sleep>;
+ gpios = <&tlmm 16 0>,
+ <&tlmm 24 0>,
+ <&tlmm 21 0>,
+ <&tlmm 28 0>,
+ <&tlmm 23 0>,
+ <&tlmm 7 0>;
+ qcom,gpio-vana = <1>;
+ qcom,gpio-custom2 = <2>;
+ qcom,gpio-reset = <3>;
+ qcom,gpio-custom3 = <4>;
+ qcom,gpio-custom1 = <5>;
+ qcom,gpio-req-tbl-num = <0 1 2 3 4 5>;
+ qcom,gpio-req-tbl-flags = <1 0 0 0 1 1>;
+ qcom,gpio-req-tbl-label =
+ "CAMIF_MCLK3",
+ "CAM_VANA",
+ "CAM_CUSTOM2",
+ "CAM_RESET1",
+ "CAM_CUSTOM3",
+ "CAM_CUSTOM1";
+ qcom,sensor-position = <1>; /* 0 rear */
+ qcom,sensor-mode = <0>;
+ qcom,cci-master = <1>; /* I2C 1 */
+ status = "ok";
+ clocks = <&clock_mmss clk_mclk3_clk_src>,
+ <&clock_mmss clk_mmss_camss_mclk3_clk>;
+ clock-names = "cam_src_clk", "cam_clk";
+ qcom,clock-rates = <24000000 0>;
+ };
+
};
&pm8998_gpios {
diff --git a/arch/arm/boot/dts/qcom/msm8998-cdp.dtsi b/arch/arm/boot/dts/qcom/msm8998-cdp.dtsi
index 6ff62544b03c..0859fd638a00 100644
--- a/arch/arm/boot/dts/qcom/msm8998-cdp.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998-cdp.dtsi
@@ -149,6 +149,16 @@
status = "okay";
};
+ gpio@d200 { /* GPIO 19 - wil6210 refclk3_en */
+ qcom,mode = <0>; /* Input */
+ qcom,pull = <5>; /* No Pull */
+ qcom,vin-sel = <1>; /* VIN1 GPIO_MV */
+ qcom,src-sel = <0>; /* GPIO */
+ qcom,invert = <0>; /* Invert */
+ qcom,master-en = <1>; /* Enable GPIO */
+ status = "okay";
+ };
+
/* GPIO 21 (NFC_CLK_REQ) */
gpio@d400 {
qcom,mode = <0>;
diff --git a/arch/arm/boot/dts/qcom/msm8998-mtp.dtsi b/arch/arm/boot/dts/qcom/msm8998-mtp.dtsi
index bafe29211ef0..3827b1bbf8ba 100644
--- a/arch/arm/boot/dts/qcom/msm8998-mtp.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998-mtp.dtsi
@@ -150,6 +150,16 @@
status = "okay";
};
+ gpio@d200 { /* GPIO 19 - wil6210 refclk3_en */
+ qcom,mode = <0>; /* Input */
+ qcom,pull = <5>; /* No Pull */
+ qcom,vin-sel = <1>; /* VIN1 GPIO_MV */
+ qcom,src-sel = <0>; /* GPIO */
+ qcom,invert = <0>; /* Invert */
+ qcom,master-en = <1>; /* Enable GPIO */
+ status = "okay";
+ };
+
/* GPIO 21 (NFC_CLK_REQ) */
gpio@d400 {
qcom,mode = <0>;
diff --git a/arch/arm/boot/dts/qcom/msm8998-pinctrl.dtsi b/arch/arm/boot/dts/qcom/msm8998-pinctrl.dtsi
index 71593012148d..ed1259918620 100644
--- a/arch/arm/boot/dts/qcom/msm8998-pinctrl.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998-pinctrl.dtsi
@@ -993,6 +993,86 @@
};
};
+ cam_sensor_depth_default: cam_sensor_depth_default {
+ mux {
+ pins = "gpio28","gpio23","gpio7";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio28","gpio23","gpio7";
+ bias-disable; /* No PULL */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ cam_sensor_depth_sleep: cam_sensor_depth_sleep {
+ mux {
+ pins = "gpio28","gpio23","gpio7";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio28","gpio23","gpio7";
+ bias-disable; /* No PULL */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ cam_sensor_depth_v1_active: cam_sensor_depth_v1_active {
+ /* Depth VANA */
+ mux {
+ pins = "gpio24";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio24";
+ bias-disable; /* No PULL */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ cam_sensor_depth_v1_sleep: cam_sensor_depth_v1_sleep {
+ mux {
+ pins = "gpio24";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio24";
+ bias-disable; /* No PULL */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ cam_sensor_depth_v2_active: cam_sensor_depth_v2_active {
+ /* Depth CUSTOM2 */
+ mux {
+ pins = "gpio21";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio21";
+ bias-disable; /* No PULL */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ cam_sensor_depth_v2_sleep: cam_sensor_depth_v2_sleep {
+ mux {
+ pins = "gpio21";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio21";
+ bias-disable; /* No PULL */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
cam_sensor_mclk0_active: cam_sensor_mclk0_active {
/* MCLK0 */
mux {
diff --git a/arch/arm/boot/dts/qcom/msm8998-qrd-skuk.dtsi b/arch/arm/boot/dts/qcom/msm8998-qrd-skuk.dtsi
index 97c4c5b1d455..c4a428635231 100644
--- a/arch/arm/boot/dts/qcom/msm8998-qrd-skuk.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998-qrd-skuk.dtsi
@@ -143,6 +143,16 @@
qcom,out-strength = <1>;
};
+ gpio@d200 { /* GPIO 19 - wil6210 refclk3_en */
+ qcom,mode = <0>; /* Input */
+ qcom,pull = <5>; /* No Pull */
+ qcom,vin-sel = <1>; /* VIN1 GPIO_MV */
+ qcom,src-sel = <0>; /* GPIO */
+ qcom,invert = <0>; /* Invert */
+ qcom,master-en = <1>; /* Enable GPIO */
+ status = "okay";
+ };
+
/* GPIO 21 (NFC_CLK_REQ) */
gpio@d400 {
qcom,mode = <0>;
diff --git a/arch/arm/boot/dts/qcom/msm8998-qrd.dtsi b/arch/arm/boot/dts/qcom/msm8998-qrd.dtsi
index a3eb3e5ab0d0..3c6b23d9581c 100644
--- a/arch/arm/boot/dts/qcom/msm8998-qrd.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998-qrd.dtsi
@@ -139,6 +139,16 @@
status = "okay";
};
+ gpio@d200 { /* GPIO 19 - wil6210 refclk3_en */
+ qcom,mode = <0>; /* Input */
+ qcom,pull = <5>; /* No Pull */
+ qcom,vin-sel = <1>; /* VIN1 GPIO_MV */
+ qcom,src-sel = <0>; /* GPIO */
+ qcom,invert = <0>; /* Invert */
+ qcom,master-en = <1>; /* Enable GPIO */
+ status = "okay";
+ };
+
/* GPIO 21 (NFC_CLK_REQ) */
gpio@d400 {
qcom,mode = <0>;
diff --git a/arch/arm/boot/dts/qcom/sda660-pm660a-qrd-hdk.dts b/arch/arm/boot/dts/qcom/sda660-pm660a-qrd-hdk.dts
index 604823ff416e..0f4b462fd57b 100644
--- a/arch/arm/boot/dts/qcom/sda660-pm660a-qrd-hdk.dts
+++ b/arch/arm/boot/dts/qcom/sda660-pm660a-qrd-hdk.dts
@@ -170,6 +170,15 @@
qcom,mdss-pref-prim-intf = "dsi";
};
+&mdss_dp_ctrl {
+ pinctrl-names = "mdss_dp_active", "mdss_dp_sleep";
+ pinctrl-0 = <&mdss_dp_aux_active &mdss_dp_usbplug_cc_active>;
+ pinctrl-1 = <&mdss_dp_aux_suspend &mdss_dp_usbplug_cc_suspend>;
+ qcom,aux-en-gpio = <&tlmm 55 0>;
+ qcom,aux-sel-gpio = <&tlmm 56 0>;
+ qcom,usbplug-cc-gpio = <&tlmm 58 0>;
+};
+
&mdss_dsi {
hw-config = "single_dsi";
};
diff --git a/arch/arm/boot/dts/qcom/sdm630.dtsi b/arch/arm/boot/dts/qcom/sdm630.dtsi
index 56a6d5105568..54f014aa7681 100644
--- a/arch/arm/boot/dts/qcom/sdm630.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm630.dtsi
@@ -321,16 +321,22 @@
#size-cells = <2>;
ranges;
- removed_region0: removed_region0@85800000 {
+ wlan_msa_guard: wlan_msa_guard@85600000 {
compatible = "removed-dma-pool";
no-map;
- reg = <0x0 0x85800000 0x0 0x700000>;
+ reg = <0x0 0x85600000 0x0 0x100000>;
};
- removed_region1: removed_region1@86000000 {
+ wlan_msa_mem: wlan_msa_mem@85700000 {
compatible = "removed-dma-pool";
no-map;
- reg = <0x0 0x86000000 0x0 0x2f00000>;
+ reg = <0x0 0x85700000 0x0 0x100000>;
+ };
+
+ removed_region: removed_region0@85800000 {
+ compatible = "removed-dma-pool";
+ no-map;
+ reg = <0x0 0x85800000 0x0 0x3700000>;
};
modem_fw_mem: modem_fw_region@8ac00000 {
@@ -607,6 +613,7 @@
compatible = "qcom,memshare-peripheral";
qcom,peripheral-size = <0x0>;
qcom,client-id = <1>;
+ qcom,allocate-boot-time;
label = "modem";
};
};
@@ -1159,8 +1166,9 @@
< 1113600 762 >,
< 1344000 2086 >,
< 1670400 2929 >,
- < 2150400 3879 >,
- < 2380800 4943 >;
+ < 1881600 3879 >,
+ < 2150400 4943 >,
+ < 2380800 5163 >;
};
devfreq_memlat_4: qcom,arm-memlat-mon-4 {
@@ -1674,6 +1682,7 @@
qcom,vdd-1.3-rfa-config = <1200000 1370000>;
qcom,vdd-3.3-ch0-config = <3200000 3400000>;
qcom,wlan-msa-memory = <0x100000>;
+ qcom,wlan-msa-fixed-region = <&wlan_msa_mem>;
qcom,smmu-s1-bypass;
};
diff --git a/arch/arm/boot/dts/qcom/sdm660.dtsi b/arch/arm/boot/dts/qcom/sdm660.dtsi
index 3debda8c084d..f14c9a32c2f9 100644
--- a/arch/arm/boot/dts/qcom/sdm660.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660.dtsi
@@ -319,6 +319,18 @@
#size-cells = <2>;
ranges;
+ wlan_msa_guard: wlan_msa_guard@85600000 {
+ compatible = "removed-dma-pool";
+ no-map;
+ reg = <0x0 0x85600000 0x0 0x100000>;
+ };
+
+ wlan_msa_mem: wlan_msa_mem@85700000 {
+ compatible = "removed-dma-pool";
+ no-map;
+ reg = <0x0 0x85700000 0x0 0x100000>;
+ };
+
removed_regions: removed_regions@85800000 {
compatible = "removed-dma-pool";
no-map;
@@ -1928,6 +1940,7 @@
qcom,vdd-1.3-rfa-config = <1200000 1370000>;
qcom,vdd-3.3-ch0-config = <3200000 3400000>;
qcom,wlan-msa-memory = <0x100000>;
+ qcom,wlan-msa-fixed-region = <&wlan_msa_mem>;
qcom,smmu-s1-bypass;
};
@@ -2140,6 +2153,7 @@
qcom,qsee-ce-hw-instance = <0>;
qcom,disk-encrypt-pipe-pair = <2>;
qcom,support-fde;
+ qcom,fde-key-size;
qcom,no-clock-support;
qcom,msm-bus,name = "qseecom-noc";
qcom,msm-bus,num-cases = <4>;
diff --git a/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996.dts b/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996.dts
index 7cf55acf900b..b108f7d56e57 100644
--- a/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996.dts
+++ b/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996.dts
@@ -42,6 +42,13 @@
reg = <0x1 0x0 0 0x10000000>;
label = "ion_system_mem";
};
+ qseecom_mem: qseecom_region {
+ compatible = "shared-dma-pool";
+ alloc-ranges = <0 0x00000000 0 0xffffffff>;
+ reusable;
+ alignment = <0 0x400000>;
+ size = <0 0x1400000>;
+ };
ion_audio: ion_audio_region {
reg = <0 0xc8000000 0 0x00400000>;
label = "ion_audio_mem";
@@ -477,6 +484,22 @@
};
};
+ qcom_seecom: qseecom@86600000 {
+ compatible = "qcom,qseecom";
+ reg = <0x86600000 0x2200000>;
+ reg-names = "secapp-region";
+ qcom,hlos-num-ce-hw-instances = <1>;
+ qcom,hlos-ce-hw-instance = <0>;
+ qcom,qsee-ce-hw-instance = <0>;
+ qcom,disk-encrypt-pipe-pair = <2>;
+ qcom,no-clock-support;
+ qcom,msm-bus,name = "qseecom-noc";
+ qcom,msm-bus,num-cases = <4>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,ce-opp-freq = <171430000>;
+ qcom,qsee-reentrancy-support = <2>;
+ };
+
hostless: qcom,msm-pcm-hostless {
compatible = "qcom,msm-pcm-hostless";
};
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 221b11bb50e3..f353849d9388 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -1197,15 +1197,15 @@ void __init sanity_check_meminfo(void)
high_memory = __va(arm_lowmem_limit - 1) + 1;
+ if (!memblock_limit)
+ memblock_limit = arm_lowmem_limit;
+
/*
* Round the memblock limit down to a pmd size. This
* helps to ensure that we will allocate memory from the
* last full pmd, which should be mapped.
*/
- if (memblock_limit)
- memblock_limit = round_down(memblock_limit, PMD_SIZE);
- if (!memblock_limit)
- memblock_limit = arm_lowmem_limit;
+ memblock_limit = round_down(memblock_limit, PMD_SIZE);
memblock_set_current_limit(memblock_limit);
}
diff --git a/arch/arm64/configs/msmcortex-perf_defconfig b/arch/arm64/configs/msmcortex-perf_defconfig
index 5692f0dcd65e..894cb466b075 100644
--- a/arch/arm64/configs/msmcortex-perf_defconfig
+++ b/arch/arm64/configs/msmcortex-perf_defconfig
@@ -16,7 +16,6 @@ CONFIG_RCU_NOCB_CPU_ALL=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
-CONFIG_CGROUP_DEBUG=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CPUSETS=y
CONFIG_CGROUP_CPUACCT=y
diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h
index aee323b13802..0a11cd502dbc 100644
--- a/arch/arm64/include/asm/acpi.h
+++ b/arch/arm64/include/asm/acpi.h
@@ -22,9 +22,9 @@
#define ACPI_MADT_GICC_LENGTH \
(acpi_gbl_FADT.header.revision < 6 ? 76 : 80)
-#define BAD_MADT_GICC_ENTRY(entry, end) \
- (!(entry) || (unsigned long)(entry) + sizeof(*(entry)) > (end) || \
- (entry)->header.length != ACPI_MADT_GICC_LENGTH)
+#define BAD_MADT_GICC_ENTRY(entry, end) \
+ (!(entry) || (entry)->header.length != ACPI_MADT_GICC_LENGTH || \
+ (unsigned long)(entry) + ACPI_MADT_GICC_LENGTH > (end))
/* Basic configuration for ACPI */
#ifdef CONFIG_ACPI
diff --git a/arch/arm64/kernel/perf_trace_counters.c b/arch/arm64/kernel/perf_trace_counters.c
index 748ad449fc18..dc92b29ac103 100644
--- a/arch/arm64/kernel/perf_trace_counters.c
+++ b/arch/arm64/kernel/perf_trace_counters.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2014, 2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -124,6 +124,7 @@ static ssize_t write_enabled_perftp_file_bool(struct file *file,
char buf[32];
size_t buf_size;
+ buf[0] = 0;
buf_size = min(count, (sizeof(buf)-1));
if (copy_from_user(buf, user_buf, buf_size))
return -EFAULT;
diff --git a/arch/mips/ath79/common.c b/arch/mips/ath79/common.c
index 3cedd1f95e0f..8ae4067a5eda 100644
--- a/arch/mips/ath79/common.c
+++ b/arch/mips/ath79/common.c
@@ -76,14 +76,14 @@ void ath79_ddr_set_pci_windows(void)
{
BUG_ON(!ath79_ddr_pci_win_base);
- __raw_writel(AR71XX_PCI_WIN0_OFFS, ath79_ddr_pci_win_base + 0);
- __raw_writel(AR71XX_PCI_WIN1_OFFS, ath79_ddr_pci_win_base + 1);
- __raw_writel(AR71XX_PCI_WIN2_OFFS, ath79_ddr_pci_win_base + 2);
- __raw_writel(AR71XX_PCI_WIN3_OFFS, ath79_ddr_pci_win_base + 3);
- __raw_writel(AR71XX_PCI_WIN4_OFFS, ath79_ddr_pci_win_base + 4);
- __raw_writel(AR71XX_PCI_WIN5_OFFS, ath79_ddr_pci_win_base + 5);
- __raw_writel(AR71XX_PCI_WIN6_OFFS, ath79_ddr_pci_win_base + 6);
- __raw_writel(AR71XX_PCI_WIN7_OFFS, ath79_ddr_pci_win_base + 7);
+ __raw_writel(AR71XX_PCI_WIN0_OFFS, ath79_ddr_pci_win_base + 0x0);
+ __raw_writel(AR71XX_PCI_WIN1_OFFS, ath79_ddr_pci_win_base + 0x4);
+ __raw_writel(AR71XX_PCI_WIN2_OFFS, ath79_ddr_pci_win_base + 0x8);
+ __raw_writel(AR71XX_PCI_WIN3_OFFS, ath79_ddr_pci_win_base + 0xc);
+ __raw_writel(AR71XX_PCI_WIN4_OFFS, ath79_ddr_pci_win_base + 0x10);
+ __raw_writel(AR71XX_PCI_WIN5_OFFS, ath79_ddr_pci_win_base + 0x14);
+ __raw_writel(AR71XX_PCI_WIN6_OFFS, ath79_ddr_pci_win_base + 0x18);
+ __raw_writel(AR71XX_PCI_WIN7_OFFS, ath79_ddr_pci_win_base + 0x1c);
}
EXPORT_SYMBOL_GPL(ath79_ddr_set_pci_windows);
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S
index 7791840cf22c..db07793f7b43 100644
--- a/arch/mips/kernel/entry.S
+++ b/arch/mips/kernel/entry.S
@@ -11,6 +11,7 @@
#include <asm/asm.h>
#include <asm/asmmacro.h>
#include <asm/compiler.h>
+#include <asm/irqflags.h>
#include <asm/regdef.h>
#include <asm/mipsregs.h>
#include <asm/stackframe.h>
@@ -137,6 +138,7 @@ work_pending:
andi t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS
beqz t0, work_notifysig
work_resched:
+ TRACE_IRQS_OFF
jal schedule
local_irq_disable # make sure need_resched and
@@ -173,6 +175,7 @@ syscall_exit_work:
beqz t0, work_pending # trace bit set?
local_irq_enable # could let syscall_trace_leave()
# call schedule() instead
+ TRACE_IRQS_ON
move a0, sp
jal syscall_trace_leave
b resume_userspace
diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
index f63a289977cc..0b3e58a3189f 100644
--- a/arch/mips/kernel/pm-cps.c
+++ b/arch/mips/kernel/pm-cps.c
@@ -55,7 +55,6 @@ DECLARE_BITMAP(state_support, CPS_PM_STATE_COUNT);
* state. Actually per-core rather than per-CPU.
*/
static DEFINE_PER_CPU_ALIGNED(u32*, ready_count);
-static DEFINE_PER_CPU_ALIGNED(void*, ready_count_alloc);
/* Indicates online CPUs coupled with the current CPU */
static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled);
@@ -625,7 +624,6 @@ static int __init cps_gen_core_entries(unsigned cpu)
{
enum cps_pm_state state;
unsigned core = cpu_data[cpu].core;
- unsigned dlinesz = cpu_data[cpu].dcache.linesz;
void *entry_fn, *core_rc;
for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) {
@@ -645,16 +643,11 @@ static int __init cps_gen_core_entries(unsigned cpu)
}
if (!per_cpu(ready_count, core)) {
- core_rc = kmalloc(dlinesz * 2, GFP_KERNEL);
+ core_rc = kmalloc(sizeof(u32), GFP_KERNEL);
if (!core_rc) {
pr_err("Failed allocate core %u ready_count\n", core);
return -ENOMEM;
}
- per_cpu(ready_count_alloc, core) = core_rc;
-
- /* Ensure ready_count is aligned to a cacheline boundary */
- core_rc += dlinesz - 1;
- core_rc = (void *)((unsigned long)core_rc & ~(dlinesz - 1));
per_cpu(ready_count, core) = core_rc;
}
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 99a402231f4d..31ca2edd7218 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -194,6 +194,8 @@ void show_stack(struct task_struct *task, unsigned long *sp)
{
struct pt_regs regs;
mm_segment_t old_fs = get_fs();
+
+ regs.cp0_status = KSU_KERNEL;
if (sp) {
regs.regs[29] = (unsigned long)sp;
regs.regs[31] = 0;
diff --git a/arch/mips/ralink/mt7620.c b/arch/mips/ralink/mt7620.c
index dfb04fcedb04..48d6349fd9d7 100644
--- a/arch/mips/ralink/mt7620.c
+++ b/arch/mips/ralink/mt7620.c
@@ -107,31 +107,31 @@ static struct rt2880_pmx_group mt7620a_pinmux_data[] = {
};
static struct rt2880_pmx_func pwm1_grp_mt7628[] = {
- FUNC("sdcx", 3, 19, 1),
+ FUNC("sdxc d6", 3, 19, 1),
FUNC("utif", 2, 19, 1),
FUNC("gpio", 1, 19, 1),
- FUNC("pwm", 0, 19, 1),
+ FUNC("pwm1", 0, 19, 1),
};
static struct rt2880_pmx_func pwm0_grp_mt7628[] = {
- FUNC("sdcx", 3, 18, 1),
+ FUNC("sdxc d7", 3, 18, 1),
FUNC("utif", 2, 18, 1),
FUNC("gpio", 1, 18, 1),
- FUNC("pwm", 0, 18, 1),
+ FUNC("pwm0", 0, 18, 1),
};
static struct rt2880_pmx_func uart2_grp_mt7628[] = {
- FUNC("sdcx", 3, 20, 2),
+ FUNC("sdxc d5 d4", 3, 20, 2),
FUNC("pwm", 2, 20, 2),
FUNC("gpio", 1, 20, 2),
- FUNC("uart", 0, 20, 2),
+ FUNC("uart2", 0, 20, 2),
};
static struct rt2880_pmx_func uart1_grp_mt7628[] = {
- FUNC("sdcx", 3, 45, 2),
+ FUNC("sw_r", 3, 45, 2),
FUNC("pwm", 2, 45, 2),
FUNC("gpio", 1, 45, 2),
- FUNC("uart", 0, 45, 2),
+ FUNC("uart1", 0, 45, 2),
};
static struct rt2880_pmx_func i2c_grp_mt7628[] = {
@@ -143,21 +143,21 @@ static struct rt2880_pmx_func i2c_grp_mt7628[] = {
static struct rt2880_pmx_func refclk_grp_mt7628[] = { FUNC("reclk", 0, 36, 1) };
static struct rt2880_pmx_func perst_grp_mt7628[] = { FUNC("perst", 0, 37, 1) };
-static struct rt2880_pmx_func wdt_grp_mt7628[] = { FUNC("wdt", 0, 15, 38) };
+static struct rt2880_pmx_func wdt_grp_mt7628[] = { FUNC("wdt", 0, 38, 1) };
static struct rt2880_pmx_func spi_grp_mt7628[] = { FUNC("spi", 0, 7, 4) };
static struct rt2880_pmx_func sd_mode_grp_mt7628[] = {
FUNC("jtag", 3, 22, 8),
FUNC("utif", 2, 22, 8),
FUNC("gpio", 1, 22, 8),
- FUNC("sdcx", 0, 22, 8),
+ FUNC("sdxc", 0, 22, 8),
};
static struct rt2880_pmx_func uart0_grp_mt7628[] = {
FUNC("-", 3, 12, 2),
FUNC("-", 2, 12, 2),
FUNC("gpio", 1, 12, 2),
- FUNC("uart", 0, 12, 2),
+ FUNC("uart0", 0, 12, 2),
};
static struct rt2880_pmx_func i2s_grp_mt7628[] = {
@@ -171,7 +171,7 @@ static struct rt2880_pmx_func spi_cs1_grp_mt7628[] = {
FUNC("-", 3, 6, 1),
FUNC("refclk", 2, 6, 1),
FUNC("gpio", 1, 6, 1),
- FUNC("spi", 0, 6, 1),
+ FUNC("spi cs1", 0, 6, 1),
};
static struct rt2880_pmx_func spis_grp_mt7628[] = {
@@ -188,28 +188,44 @@ static struct rt2880_pmx_func gpio_grp_mt7628[] = {
FUNC("gpio", 0, 11, 1),
};
-#define MT7628_GPIO_MODE_MASK 0x3
-
-#define MT7628_GPIO_MODE_PWM1 30
-#define MT7628_GPIO_MODE_PWM0 28
-#define MT7628_GPIO_MODE_UART2 26
-#define MT7628_GPIO_MODE_UART1 24
-#define MT7628_GPIO_MODE_I2C 20
-#define MT7628_GPIO_MODE_REFCLK 18
-#define MT7628_GPIO_MODE_PERST 16
-#define MT7628_GPIO_MODE_WDT 14
-#define MT7628_GPIO_MODE_SPI 12
-#define MT7628_GPIO_MODE_SDMODE 10
-#define MT7628_GPIO_MODE_UART0 8
-#define MT7628_GPIO_MODE_I2S 6
-#define MT7628_GPIO_MODE_CS1 4
-#define MT7628_GPIO_MODE_SPIS 2
-#define MT7628_GPIO_MODE_GPIO 0
+static struct rt2880_pmx_func wled_kn_grp_mt7628[] = {
+ FUNC("rsvd", 3, 35, 1),
+ FUNC("rsvd", 2, 35, 1),
+ FUNC("gpio", 1, 35, 1),
+ FUNC("wled_kn", 0, 35, 1),
+};
+
+static struct rt2880_pmx_func wled_an_grp_mt7628[] = {
+ FUNC("rsvd", 3, 44, 1),
+ FUNC("rsvd", 2, 44, 1),
+ FUNC("gpio", 1, 44, 1),
+ FUNC("wled_an", 0, 44, 1),
+};
+
+#define MT7628_GPIO_MODE_MASK 0x3
+
+#define MT7628_GPIO_MODE_WLED_KN 48
+#define MT7628_GPIO_MODE_WLED_AN 32
+#define MT7628_GPIO_MODE_PWM1 30
+#define MT7628_GPIO_MODE_PWM0 28
+#define MT7628_GPIO_MODE_UART2 26
+#define MT7628_GPIO_MODE_UART1 24
+#define MT7628_GPIO_MODE_I2C 20
+#define MT7628_GPIO_MODE_REFCLK 18
+#define MT7628_GPIO_MODE_PERST 16
+#define MT7628_GPIO_MODE_WDT 14
+#define MT7628_GPIO_MODE_SPI 12
+#define MT7628_GPIO_MODE_SDMODE 10
+#define MT7628_GPIO_MODE_UART0 8
+#define MT7628_GPIO_MODE_I2S 6
+#define MT7628_GPIO_MODE_CS1 4
+#define MT7628_GPIO_MODE_SPIS 2
+#define MT7628_GPIO_MODE_GPIO 0
static struct rt2880_pmx_group mt7628an_pinmux_data[] = {
- GRP_G("pmw1", pwm1_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ GRP_G("pwm1", pwm1_grp_mt7628, MT7628_GPIO_MODE_MASK,
1, MT7628_GPIO_MODE_PWM1),
- GRP_G("pmw1", pwm0_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ GRP_G("pwm0", pwm0_grp_mt7628, MT7628_GPIO_MODE_MASK,
1, MT7628_GPIO_MODE_PWM0),
GRP_G("uart2", uart2_grp_mt7628, MT7628_GPIO_MODE_MASK,
1, MT7628_GPIO_MODE_UART2),
@@ -233,6 +249,10 @@ static struct rt2880_pmx_group mt7628an_pinmux_data[] = {
1, MT7628_GPIO_MODE_SPIS),
GRP_G("gpio", gpio_grp_mt7628, MT7628_GPIO_MODE_MASK,
1, MT7628_GPIO_MODE_GPIO),
+ GRP_G("wled_an", wled_an_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ 1, MT7628_GPIO_MODE_WLED_AN),
+ GRP_G("wled_kn", wled_kn_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ 1, MT7628_GPIO_MODE_WLED_KN),
{ 0 }
};
@@ -439,7 +459,7 @@ void __init ralink_clk_init(void)
ralink_clk_add("10000c00.uartlite", periph_rate);
ralink_clk_add("10180000.wmac", xtal_rate);
- if (IS_ENABLED(CONFIG_USB) && is_mt76x8()) {
+ if (IS_ENABLED(CONFIG_USB) && !is_mt76x8()) {
/*
* When the CPU goes into sleep mode, the BUS clock will be
* too low for USB to function properly. Adjust the busses
diff --git a/arch/mips/ralink/rt288x.c b/arch/mips/ralink/rt288x.c
index 15506a1ff22a..9dd67749c592 100644
--- a/arch/mips/ralink/rt288x.c
+++ b/arch/mips/ralink/rt288x.c
@@ -109,5 +109,5 @@ void prom_soc_init(struct ralink_soc_info *soc_info)
soc_info->mem_size_max = RT2880_MEM_SIZE_MAX;
rt2880_pinmux_data = rt2880_pinmux_data_act;
- ralink_soc == RT2880_SOC;
+ ralink_soc = RT2880_SOC;
}
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index 98949b0df00a..6696c1986844 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -304,9 +304,17 @@ void eeh_slot_error_detail(struct eeh_pe *pe, int severity)
*
* For pHyp, we have to enable IO for log retrieval. Otherwise,
* 0xFF's is always returned from PCI config space.
+ *
+ * When the @severity is EEH_LOG_PERM, the PE is going to be
+ * removed. Prior to that, the drivers for devices included in
+ * the PE will be closed. The drivers rely on working IO path
+ * to bring the devices to quiet state. Otherwise, PCI traffic
+ * from those devices after they are removed is like to cause
+ * another unexpected EEH error.
*/
if (!(pe->type & EEH_PE_PHB)) {
- if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG))
+ if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG) ||
+ severity == EEH_LOG_PERM)
eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
/*
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index 7c053f281406..1138fec3dd65 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -514,6 +514,15 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
#endif
#endif
+ /*
+ * jprobes use jprobe_return() which skips the normal return
+ * path of the function, and this messes up the accounting of the
+ * function graph tracer.
+ *
+ * Pause function graph tracing while performing the jprobe function.
+ */
+ pause_graph_tracing();
+
return 1;
}
@@ -536,6 +545,8 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
* saved regs...
*/
memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
+ /* It's OK to start function graph tracing again */
+ unpause_graph_tracing();
preempt_enable_no_resched();
return 1;
}
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 3c3a367b6e59..396dc44e783b 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -2693,6 +2693,27 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
return -EINVAL;
}
+ /*
+ * Don't allow entry with a suspended transaction, because
+ * the guest entry/exit code will lose it.
+ * If the guest has TM enabled, save away their TM-related SPRs
+ * (they will get restored by the TM unavailable interrupt).
+ */
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ if (cpu_has_feature(CPU_FTR_TM) && current->thread.regs &&
+ (current->thread.regs->msr & MSR_TM)) {
+ if (MSR_TM_ACTIVE(current->thread.regs->msr)) {
+ run->exit_reason = KVM_EXIT_FAIL_ENTRY;
+ run->fail_entry.hardware_entry_failure_reason = 0;
+ return -EINVAL;
+ }
+ current->thread.tm_tfhar = mfspr(SPRN_TFHAR);
+ current->thread.tm_tfiar = mfspr(SPRN_TFIAR);
+ current->thread.tm_texasr = mfspr(SPRN_TEXASR);
+ current->thread.regs->msr &= ~MSR_TM;
+ }
+#endif
+
kvmppc_core_prepare_to_enter(vcpu);
/* No need to go into the guest when all we'll do is come back out */
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index 4c48b487698c..0b48ce40d351 100644
--- a/arch/powerpc/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
@@ -179,6 +179,16 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
b slb_finish_load
8: /* invalid EA */
+ /*
+ * It's possible the bad EA is too large to fit in the SLB cache, which
+ * would mean we'd fail to invalidate it on context switch. So mark the
+ * SLB cache as full so we force a full flush. We also set cr7+eq to
+ * mark the address as a kernel address, so slb_finish_load() skips
+ * trying to insert it into the SLB cache.
+ */
+ li r9,SLB_CACHE_ENTRIES + 1
+ sth r9,PACASLBCACHEPTR(r13)
+ crset 4*cr7+eq
li r10,0 /* BAD_VSID */
li r9,0 /* BAD_VSID */
li r11,SLB_VSID_USER /* flags don't much matter */
diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h
index d7697ab802f6..8e136b88cdf4 100644
--- a/arch/s390/include/asm/ctl_reg.h
+++ b/arch/s390/include/asm/ctl_reg.h
@@ -15,7 +15,9 @@
BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\
asm volatile( \
" lctlg %1,%2,%0\n" \
- : : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high));\
+ : \
+ : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high) \
+ : "memory"); \
}
#define __ctl_store(array, low, high) { \
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index e9cd7befcb76..19d14ac23ef9 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -221,6 +221,9 @@ struct x86_emulate_ops {
void (*get_cpuid)(struct x86_emulate_ctxt *ctxt,
u32 *eax, u32 *ebx, u32 *ecx, u32 *edx);
void (*set_nmi_mask)(struct x86_emulate_ctxt *ctxt, bool masked);
+
+ unsigned (*get_hflags)(struct x86_emulate_ctxt *ctxt);
+ void (*set_hflags)(struct x86_emulate_ctxt *ctxt, unsigned hflags);
};
typedef u32 __attribute__((vector_size(16))) sse128_t;
@@ -290,7 +293,6 @@ struct x86_emulate_ctxt {
/* interruptibility state, as a result of execution of STI or MOV SS */
int interruptibility;
- int emul_flags;
bool perm_ok; /* do not check permissions if true */
bool ud; /* inject an #UD if host doesn't support insn */
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 1dcea225977d..04b2f3cad7ba 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -2531,7 +2531,7 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
u64 smbase;
int ret;
- if ((ctxt->emul_flags & X86EMUL_SMM_MASK) == 0)
+ if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0)
return emulate_ud(ctxt);
/*
@@ -2580,11 +2580,11 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
return X86EMUL_UNHANDLEABLE;
}
- if ((ctxt->emul_flags & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
+ if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
ctxt->ops->set_nmi_mask(ctxt, false);
- ctxt->emul_flags &= ~X86EMUL_SMM_INSIDE_NMI_MASK;
- ctxt->emul_flags &= ~X86EMUL_SMM_MASK;
+ ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
+ ~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));
return X86EMUL_CONTINUE;
}
@@ -5296,6 +5296,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
const struct x86_emulate_ops *ops = ctxt->ops;
int rc = X86EMUL_CONTINUE;
int saved_dst_type = ctxt->dst.type;
+ unsigned emul_flags;
ctxt->mem_read.pos = 0;
@@ -5310,6 +5311,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
goto done;
}
+ emul_flags = ctxt->ops->get_hflags(ctxt);
if (unlikely(ctxt->d &
(No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
@@ -5343,7 +5345,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
fetch_possible_mmx_operand(ctxt, &ctxt->dst);
}
- if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) {
+ if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) {
rc = emulator_check_intercept(ctxt, ctxt->intercept,
X86_ICPT_PRE_EXCEPT);
if (rc != X86EMUL_CONTINUE)
@@ -5372,7 +5374,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
goto done;
}
- if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
+ if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
rc = emulator_check_intercept(ctxt, ctxt->intercept,
X86_ICPT_POST_EXCEPT);
if (rc != X86EMUL_CONTINUE)
@@ -5426,7 +5428,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
special_insn:
- if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
+ if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
rc = emulator_check_intercept(ctxt, ctxt->intercept,
X86_ICPT_POST_MEMACCESS);
if (rc != X86EMUL_CONTINUE)
diff --git a/arch/x86/kvm/pmu_intel.c b/arch/x86/kvm/pmu_intel.c
index ab38af4f4947..23a7c7ba377a 100644
--- a/arch/x86/kvm/pmu_intel.c
+++ b/arch/x86/kvm/pmu_intel.c
@@ -294,7 +294,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
((u64)1 << edx.split.bit_width_fixed) - 1;
}
- pmu->global_ctrl = ((1 << pmu->nr_arch_gp_counters) - 1) |
+ pmu->global_ctrl = ((1ull << pmu->nr_arch_gp_counters) - 1) |
(((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED);
pmu->global_ctrl_mask = ~pmu->global_ctrl;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 50ca8f409a7c..bbaa11f4e74b 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2264,7 +2264,7 @@ static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned nr)
if (!(vmcs12->exception_bitmap & (1u << nr)))
return 0;
- nested_vmx_vmexit(vcpu, to_vmx(vcpu)->exit_reason,
+ nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
vmcs_read32(VM_EXIT_INTR_INFO),
vmcs_readl(EXIT_QUALIFICATION));
return 1;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 6c82792487e9..8e526c6fd784 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4844,6 +4844,8 @@ static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector,
if (var.unusable) {
memset(desc, 0, sizeof(*desc));
+ if (base3)
+ *base3 = 0;
return false;
}
@@ -4999,6 +5001,16 @@ static void emulator_set_nmi_mask(struct x86_emulate_ctxt *ctxt, bool masked)
kvm_x86_ops->set_nmi_mask(emul_to_vcpu(ctxt), masked);
}
+static unsigned emulator_get_hflags(struct x86_emulate_ctxt *ctxt)
+{
+ return emul_to_vcpu(ctxt)->arch.hflags;
+}
+
+static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_flags)
+{
+ kvm_set_hflags(emul_to_vcpu(ctxt), emul_flags);
+}
+
static const struct x86_emulate_ops emulate_ops = {
.read_gpr = emulator_read_gpr,
.write_gpr = emulator_write_gpr,
@@ -5038,6 +5050,8 @@ static const struct x86_emulate_ops emulate_ops = {
.intercept = emulator_intercept,
.get_cpuid = emulator_get_cpuid,
.set_nmi_mask = emulator_set_nmi_mask,
+ .get_hflags = emulator_get_hflags,
+ .set_hflags = emulator_set_hflags,
};
static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
@@ -5090,7 +5104,6 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
BUILD_BUG_ON(HF_GUEST_MASK != X86EMUL_GUEST_MASK);
BUILD_BUG_ON(HF_SMM_MASK != X86EMUL_SMM_MASK);
BUILD_BUG_ON(HF_SMM_INSIDE_NMI_MASK != X86EMUL_SMM_INSIDE_NMI_MASK);
- ctxt->emul_flags = vcpu->arch.hflags;
init_decode_cache(ctxt);
vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
@@ -5486,8 +5499,6 @@ restart:
unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
toggle_interruptibility(vcpu, ctxt->interruptibility);
vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
- if (vcpu->arch.hflags != ctxt->emul_flags)
- kvm_set_hflags(vcpu, ctxt->emul_flags);
kvm_rip_write(vcpu, ctxt->eip);
if (r == EMULATE_DONE)
kvm_vcpu_check_singlestep(vcpu, rflags, &r);
@@ -5974,7 +5985,8 @@ static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
kvm_x86_ops->patch_hypercall(vcpu, instruction);
- return emulator_write_emulated(ctxt, rip, instruction, 3, NULL);
+ return emulator_write_emulated(ctxt, rip, instruction, 3,
+ &ctxt->exception);
}
static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
index ef05755a1900..7ed47b1e6f42 100644
--- a/arch/x86/mm/mpx.c
+++ b/arch/x86/mm/mpx.c
@@ -293,7 +293,7 @@ siginfo_t *mpx_generate_siginfo(struct pt_regs *regs)
* We were not able to extract an address from the instruction,
* probably because there was something invalid in it.
*/
- if (info->si_addr == (void *)-1) {
+ if (info->si_addr == (void __user *)-1) {
err = -EINVAL;
goto err_out;
}
@@ -525,15 +525,7 @@ int mpx_handle_bd_fault(void)
if (!kernel_managing_mpx_tables(current->mm))
return -EINVAL;
- if (do_mpx_bt_fault()) {
- force_sig(SIGSEGV, current);
- /*
- * The force_sig() is essentially "handling" this
- * exception, so we do not pass up the error
- * from do_mpx_bt_fault().
- */
- }
- return 0;
+ return do_mpx_bt_fault();
}
/*
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 5fb6adaaa796..5a760fd66bec 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -134,8 +134,6 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
{
struct flush_tlb_info info;
- if (end == 0)
- end = start + PAGE_SIZE;
info.flush_mm = mm;
info.flush_start = start;
info.flush_end = end;
@@ -264,7 +262,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
}
if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
- flush_tlb_others(mm_cpumask(mm), mm, start, 0UL);
+ flush_tlb_others(mm_cpumask(mm), mm, start, start + PAGE_SIZE);
preempt_enable();
}
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index 0c89ab992012..781fa96726e2 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -212,6 +212,7 @@ struct fastrpc_channel_ctx {
struct device *dev;
struct fastrpc_session_ctx session[NUM_SESSIONS];
struct completion work;
+ struct completion workport;
struct notifier_block nb;
struct kref kref;
int channel;
@@ -290,6 +291,7 @@ struct fastrpc_file {
int cid;
int ssrcount;
int pd;
+ int file_close;
struct fastrpc_apps *apps;
struct fastrpc_perf perf;
struct dentry *debugfs_file;
@@ -699,7 +701,7 @@ static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd, unsigned attr,
if (vmid) {
int srcVM[1] = {VMID_HLOS};
int destVM[2] = {VMID_HLOS, vmid};
- int destVMperm[2] = {PERM_READ | PERM_WRITE,
+ int destVMperm[2] = {PERM_READ | PERM_WRITE | PERM_EXEC,
PERM_READ | PERM_WRITE | PERM_EXEC};
VERIFY(err, !hyp_assign_phys(map->phys,
@@ -771,7 +773,7 @@ static int fastrpc_buf_alloc(struct fastrpc_file *fl, ssize_t size,
if (vmid) {
int srcVM[1] = {VMID_HLOS};
int destVM[2] = {VMID_HLOS, vmid};
- int destVMperm[2] = {PERM_READ | PERM_WRITE,
+ int destVMperm[2] = {PERM_READ | PERM_WRITE | PERM_EXEC,
PERM_READ | PERM_WRITE | PERM_EXEC};
VERIFY(err, !hyp_assign_phys(buf->phys, buf_page_size(size),
@@ -1454,7 +1456,7 @@ static void smd_event_handler(void *priv, unsigned event)
switch (event) {
case SMD_EVENT_OPEN:
- complete(&me->channel[cid].work);
+ complete(&me->channel[cid].workport);
break;
case SMD_EVENT_CLOSE:
fastrpc_notify_drivers(me, cid);
@@ -1475,6 +1477,7 @@ static void fastrpc_init(struct fastrpc_apps *me)
me->channel = &gcinfo[0];
for (i = 0; i < NUM_CHANNELS; i++) {
init_completion(&me->channel[i].work);
+ init_completion(&me->channel[i].workport);
me->channel[i].sesscount = 0;
}
}
@@ -2136,7 +2139,7 @@ void fastrpc_glink_notify_state(void *handle, const void *priv, unsigned event)
switch (event) {
case GLINK_CONNECTED:
link->port_state = FASTRPC_LINK_CONNECTED;
- complete(&me->channel[cid].work);
+ complete(&me->channel[cid].workport);
break;
case GLINK_LOCAL_DISCONNECTED:
link->port_state = FASTRPC_LINK_DISCONNECTED;
@@ -2194,6 +2197,9 @@ static int fastrpc_file_free(struct fastrpc_file *fl)
return 0;
}
(void)fastrpc_release_current_dsp_process(fl);
+ spin_lock(&fl->hlock);
+ fl->file_close = 1;
+ spin_unlock(&fl->hlock);
fastrpc_context_list_dtor(fl);
fastrpc_buf_list_free(fl);
hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
@@ -2286,8 +2292,7 @@ static void fastrpc_glink_close(void *chan, int cid)
return;
link = &gfa.channel[cid].link;
- if (link->port_state == FASTRPC_LINK_CONNECTED ||
- link->port_state == FASTRPC_LINK_CONNECTING) {
+ if (link->port_state == FASTRPC_LINK_CONNECTED) {
link->port_state = FASTRPC_LINK_DISCONNECTING;
glink_close(chan);
}
@@ -2485,8 +2490,9 @@ static int fastrpc_channel_open(struct fastrpc_file *fl)
if (err)
goto bail;
- VERIFY(err, wait_for_completion_timeout(&me->channel[cid].work,
- RPC_TIMEOUT));
+ VERIFY(err,
+ wait_for_completion_timeout(&me->channel[cid].workport,
+ RPC_TIMEOUT));
if (err) {
me->channel[cid].chan = 0;
goto bail;
@@ -2582,6 +2588,14 @@ static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
p.inv.fds = 0;
p.inv.attrs = 0;
+ spin_lock(&fl->hlock);
+ if (fl->file_close == 1) {
+ err = EBADF;
+ pr_warn("ADSPRPC: fastrpc_device_release is happening, So not sending any new requests to DSP");
+ spin_unlock(&fl->hlock);
+ goto bail;
+ }
+ spin_unlock(&fl->hlock);
switch (ioctl_num) {
case FASTRPC_IOCTL_INVOKE:
diff --git a/drivers/char/diag/diag_masks.c b/drivers/char/diag/diag_masks.c
index 20e617ed0770..e206d9db4d7d 100644
--- a/drivers/char/diag/diag_masks.c
+++ b/drivers/char/diag/diag_masks.c
@@ -320,6 +320,7 @@ static void diag_send_msg_mask_update(uint8_t peripheral, int first, int last)
struct diag_mask_info *mask_info = NULL;
struct diag_msg_mask_t *mask = NULL;
struct diag_ctrl_msg_mask header;
+ uint8_t msg_mask_tbl_count_local;
if (peripheral >= NUM_PERIPHERALS)
return;
@@ -360,6 +361,8 @@ static void diag_send_msg_mask_update(uint8_t peripheral, int first, int last)
return;
}
buf = mask_info->update_buf;
+ msg_mask_tbl_count_local = driver->msg_mask_tbl_count;
+ mutex_unlock(&driver->msg_mask_lock);
mutex_lock(&mask_info->lock);
switch (mask_info->status) {
case DIAG_CTRL_MASK_ALL_DISABLED:
@@ -376,9 +379,11 @@ static void diag_send_msg_mask_update(uint8_t peripheral, int first, int last)
goto err;
}
- for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
- if (((first < mask->ssid_first) ||
- (last > mask->ssid_last_tools)) && first != ALL_SSID) {
+ for (i = 0; i < msg_mask_tbl_count_local; i++, mask++) {
+ mutex_lock(&driver->msg_mask_lock);
+ if (((mask->ssid_first > first) ||
+ (mask->ssid_last_tools < last)) && first != ALL_SSID) {
+ mutex_unlock(&driver->msg_mask_lock);
continue;
}
@@ -419,19 +424,19 @@ proceed:
if (mask_size > 0)
memcpy(buf + header_len, mask->ptr, mask_size);
mutex_unlock(&mask->lock);
+ mutex_unlock(&driver->msg_mask_lock);
err = diagfwd_write(peripheral, TYPE_CNTL, buf,
header_len + mask_size);
if (err && err != -ENODEV)
- pr_err_ratelimited("diag: Unable to send msg masks to peripheral %d\n",
- peripheral);
+ pr_err_ratelimited("diag: Unable to send msg masks to peripheral %d, error = %d\n",
+ peripheral, err);
if (first != ALL_SSID)
break;
}
err:
mutex_unlock(&mask_info->lock);
- mutex_unlock(&driver->msg_mask_lock);
}
static void diag_send_time_sync_update(uint8_t peripheral)
@@ -711,8 +716,8 @@ static int diag_cmd_set_msg_mask(unsigned char *src_buf, int src_len,
}
req = (struct diag_msg_build_mask_t *)src_buf;
- mutex_lock(&driver->msg_mask_lock);
mutex_lock(&mask_info->lock);
+ mutex_lock(&driver->msg_mask_lock);
mask = (struct diag_msg_mask_t *)mask_info->ptr;
for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
if (i < (driver->msg_mask_tbl_count - 1)) {
@@ -752,6 +757,8 @@ static int diag_cmd_set_msg_mask(unsigned char *src_buf, int src_len,
pr_err_ratelimited("diag: In %s, unable to allocate memory for msg mask ptr, mask_size: %d\n",
__func__, mask_size);
mutex_unlock(&mask->lock);
+ mutex_unlock(&driver->msg_mask_lock);
+ mutex_unlock(&mask_info->lock);
return -ENOMEM;
}
mask->ptr = temp;
@@ -770,8 +777,8 @@ static int diag_cmd_set_msg_mask(unsigned char *src_buf, int src_len,
mask_info->status = DIAG_CTRL_MASK_VALID;
break;
}
- mutex_unlock(&mask_info->lock);
mutex_unlock(&driver->msg_mask_lock);
+ mutex_unlock(&mask_info->lock);
if (diag_check_update(APPS_DATA))
diag_update_userspace_clients(MSG_MASKS_TYPE);
@@ -796,7 +803,9 @@ static int diag_cmd_set_msg_mask(unsigned char *src_buf, int src_len,
for (i = 0; i < NUM_PERIPHERALS; i++) {
if (!diag_check_update(i))
continue;
+ mutex_lock(&driver->md_session_lock);
diag_send_msg_mask_update(i, req->ssid_first, req->ssid_last);
+ mutex_unlock(&driver->md_session_lock);
}
end:
return write_len;
@@ -824,9 +833,11 @@ static int diag_cmd_set_all_msg_mask(unsigned char *src_buf, int src_len,
}
req = (struct diag_msg_config_rsp_t *)src_buf;
+
+ mutex_lock(&mask_info->lock);
mutex_lock(&driver->msg_mask_lock);
+
mask = (struct diag_msg_mask_t *)mask_info->ptr;
- mutex_lock(&mask_info->lock);
mask_info->status = (req->rt_mask) ? DIAG_CTRL_MASK_ALL_ENABLED :
DIAG_CTRL_MASK_ALL_DISABLED;
for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
@@ -835,8 +846,8 @@ static int diag_cmd_set_all_msg_mask(unsigned char *src_buf, int src_len,
mask->range * sizeof(uint32_t));
mutex_unlock(&mask->lock);
}
- mutex_unlock(&mask_info->lock);
mutex_unlock(&driver->msg_mask_lock);
+ mutex_unlock(&mask_info->lock);
if (diag_check_update(APPS_DATA))
diag_update_userspace_clients(MSG_MASKS_TYPE);
@@ -856,7 +867,9 @@ static int diag_cmd_set_all_msg_mask(unsigned char *src_buf, int src_len,
for (i = 0; i < NUM_PERIPHERALS; i++) {
if (!diag_check_update(i))
continue;
+ mutex_lock(&driver->md_session_lock);
diag_send_msg_mask_update(i, ALL_SSID, ALL_SSID);
+ mutex_unlock(&driver->md_session_lock);
}
return write_len;
@@ -950,7 +963,9 @@ static int diag_cmd_update_event_mask(unsigned char *src_buf, int src_len,
for (i = 0; i < NUM_PERIPHERALS; i++) {
if (!diag_check_update(i))
continue;
+ mutex_lock(&driver->md_session_lock);
diag_send_event_mask_update(i);
+ mutex_unlock(&driver->md_session_lock);
}
return write_len;
@@ -997,7 +1012,9 @@ static int diag_cmd_toggle_events(unsigned char *src_buf, int src_len,
for (i = 0; i < NUM_PERIPHERALS; i++) {
if (!diag_check_update(i))
continue;
+ mutex_lock(&driver->md_session_lock);
diag_send_event_mask_update(i);
+ mutex_unlock(&driver->md_session_lock);
}
memcpy(dest_buf, &header, sizeof(header));
write_len += sizeof(header);
@@ -1251,7 +1268,9 @@ static int diag_cmd_set_log_mask(unsigned char *src_buf, int src_len,
for (i = 0; i < NUM_PERIPHERALS; i++) {
if (!diag_check_update(i))
continue;
+ mutex_lock(&driver->md_session_lock);
diag_send_log_mask_update(i, req->equip_id);
+ mutex_unlock(&driver->md_session_lock);
}
end:
return write_len;
@@ -1302,7 +1321,9 @@ static int diag_cmd_disable_log_mask(unsigned char *src_buf, int src_len,
for (i = 0; i < NUM_PERIPHERALS; i++) {
if (!diag_check_update(i))
continue;
+ mutex_lock(&driver->md_session_lock);
diag_send_log_mask_update(i, ALL_EQUIP_ID);
+ mutex_unlock(&driver->md_session_lock);
}
return write_len;
@@ -1340,8 +1361,8 @@ static int diag_create_msg_mask_table(void)
struct diag_msg_mask_t *mask = (struct diag_msg_mask_t *)msg_mask.ptr;
struct diag_ssid_range_t range;
- mutex_lock(&driver->msg_mask_lock);
mutex_lock(&msg_mask.lock);
+ mutex_lock(&driver->msg_mask_lock);
driver->msg_mask_tbl_count = MSG_MASK_TBL_CNT;
for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
range.ssid_first = msg_mask_tbl[i].ssid_first;
@@ -1350,8 +1371,8 @@ static int diag_create_msg_mask_table(void)
if (err)
break;
}
- mutex_unlock(&msg_mask.lock);
mutex_unlock(&driver->msg_mask_lock);
+ mutex_unlock(&msg_mask.lock);
return err;
}
@@ -1364,8 +1385,8 @@ static int diag_create_build_time_mask(void)
struct diag_msg_mask_t *build_mask = NULL;
struct diag_ssid_range_t range;
- mutex_lock(&driver->msg_mask_lock);
mutex_lock(&msg_bt_mask.lock);
+ mutex_lock(&driver->msg_mask_lock);
driver->bt_msg_mask_tbl_count = MSG_MASK_TBL_CNT;
build_mask = (struct diag_msg_mask_t *)msg_bt_mask.ptr;
for (i = 0; i < driver->bt_msg_mask_tbl_count; i++, build_mask++) {
@@ -1478,9 +1499,8 @@ static int diag_create_build_time_mask(void)
}
memcpy(build_mask->ptr, tbl, tbl_size);
}
- mutex_unlock(&msg_bt_mask.lock);
mutex_unlock(&driver->msg_mask_lock);
-
+ mutex_unlock(&msg_bt_mask.lock);
return err;
}
@@ -1650,8 +1670,8 @@ int diag_msg_mask_copy(struct diag_mask_info *dest, struct diag_mask_info *src)
err = __diag_mask_init(dest, MSG_MASK_SIZE, APPS_BUF_SIZE);
if (err)
return err;
- mutex_lock(&driver->msg_mask_lock);
mutex_lock(&dest->lock);
+ mutex_lock(&driver->msg_mask_lock);
src_mask = (struct diag_msg_mask_t *)src->ptr;
dest_mask = (struct diag_msg_mask_t *)dest->ptr;
@@ -1668,9 +1688,8 @@ int diag_msg_mask_copy(struct diag_mask_info *dest, struct diag_mask_info *src)
src_mask++;
dest_mask++;
}
- mutex_unlock(&dest->lock);
mutex_unlock(&driver->msg_mask_lock);
-
+ mutex_unlock(&dest->lock);
return err;
}
@@ -1681,15 +1700,15 @@ void diag_msg_mask_free(struct diag_mask_info *mask_info)
if (!mask_info)
return;
- mutex_lock(&driver->msg_mask_lock);
mutex_lock(&mask_info->lock);
+ mutex_lock(&driver->msg_mask_lock);
mask = (struct diag_msg_mask_t *)mask_info->ptr;
for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
kfree(mask->ptr);
mask->ptr = NULL;
}
- mutex_unlock(&mask_info->lock);
mutex_unlock(&driver->msg_mask_lock);
+ mutex_unlock(&mask_info->lock);
__diag_mask_exit(mask_info);
}
@@ -1858,8 +1877,9 @@ int diag_copy_to_user_msg_mask(char __user *buf, size_t count,
return -EIO;
}
mutex_unlock(&driver->diag_maskclear_mutex);
- mutex_lock(&driver->msg_mask_lock);
mutex_lock(&mask_info->lock);
+ mutex_lock(&driver->msg_mask_lock);
+
mask = (struct diag_msg_mask_t *)(mask_info->ptr);
for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
ptr = mask_info->update_buf;
@@ -1896,8 +1916,8 @@ int diag_copy_to_user_msg_mask(char __user *buf, size_t count,
}
total_len += len;
}
- mutex_unlock(&mask_info->lock);
mutex_unlock(&driver->msg_mask_lock);
+ mutex_unlock(&mask_info->lock);
return err ? err : total_len;
}
@@ -1966,9 +1986,11 @@ void diag_send_updates_peripheral(uint8_t peripheral)
diag_send_feature_mask_update(peripheral);
if (driver->time_sync_enabled)
diag_send_time_sync_update(peripheral);
+ mutex_lock(&driver->md_session_lock);
diag_send_msg_mask_update(peripheral, ALL_SSID, ALL_SSID);
diag_send_log_mask_update(peripheral, ALL_EQUIP_ID);
diag_send_event_mask_update(peripheral);
+ mutex_unlock(&driver->md_session_lock);
diag_send_real_time_update(peripheral,
driver->real_time_mode[DIAG_LOCAL_PROC]);
diag_send_peripheral_buffering_mode(
diff --git a/drivers/char/diag/diag_memorydevice.c b/drivers/char/diag/diag_memorydevice.c
index 06b83f5230bf..a27f12883c8d 100644
--- a/drivers/char/diag/diag_memorydevice.c
+++ b/drivers/char/diag/diag_memorydevice.c
@@ -129,37 +129,6 @@ void diag_md_close_all()
diag_ws_reset(DIAG_WS_MUX);
}
-static int diag_md_get_peripheral(int ctxt)
-{
- int peripheral;
-
- if (driver->num_pd_session) {
- peripheral = GET_PD_CTXT(ctxt);
- switch (peripheral) {
- case UPD_WLAN:
- case UPD_AUDIO:
- case UPD_SENSORS:
- break;
- case DIAG_ID_MPSS:
- case DIAG_ID_LPASS:
- case DIAG_ID_CDSP:
- default:
- peripheral =
- GET_BUF_PERIPHERAL(ctxt);
- if (peripheral > NUM_PERIPHERALS)
- peripheral = -EINVAL;
- break;
- }
- } else {
- /* Account for Apps data as well */
- peripheral = GET_BUF_PERIPHERAL(ctxt);
- if (peripheral > NUM_PERIPHERALS)
- peripheral = -EINVAL;
- }
-
- return peripheral;
-}
-
int diag_md_write(int id, unsigned char *buf, int len, int ctx)
{
int i;
diff --git a/drivers/char/diag/diag_mux.c b/drivers/char/diag/diag_mux.c
index d6f6ea7af8ea..8cc803eef552 100644
--- a/drivers/char/diag/diag_mux.c
+++ b/drivers/char/diag/diag_mux.c
@@ -153,12 +153,15 @@ int diag_mux_write(int proc, unsigned char *buf, int len, int ctx)
upd = PERIPHERAL_CDSP;
break;
case UPD_WLAN:
- if (!driver->num_pd_session)
+ if (!driver->pd_logging_mode[0])
upd = PERIPHERAL_MODEM;
break;
case UPD_AUDIO:
+ if (!driver->pd_logging_mode[1])
+ upd = PERIPHERAL_LPASS;
+ break;
case UPD_SENSORS:
- if (!driver->num_pd_session)
+ if (!driver->pd_logging_mode[2])
upd = PERIPHERAL_LPASS;
break;
default:
diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h
index cc56d68217b3..d81a39e2c637 100644
--- a/drivers/char/diag/diagchar.h
+++ b/drivers/char/diag/diagchar.h
@@ -642,6 +642,7 @@ struct diagchar_dev {
#endif
int time_sync_enabled;
uint8_t uses_time_api;
+ struct platform_device *pdev;
};
extern struct diagchar_dev *driver;
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index 8ab22ee3bc3c..eaed3b101095 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -22,6 +22,8 @@
#include <linux/sched.h>
#include <linux/ratelimit.h>
#include <linux/timer.h>
+#include <linux/platform_device.h>
+#include <linux/msm_mhi.h>
#ifdef CONFIG_DIAG_OVER_USB
#include <linux/usb/usbdiag.h>
#endif
@@ -456,6 +458,7 @@ static void diag_close_logging_process(const int pid)
{
int i, j;
int session_mask;
+ uint32_t p_mask;
struct diag_md_session_t *session_info = NULL;
struct diag_logging_mode_param_t params;
@@ -475,6 +478,9 @@ static void diag_close_logging_process(const int pid)
session_mask = session_info->peripheral_mask;
diag_md_session_close(session_info);
+ p_mask =
+ diag_translate_kernel_to_user_mask(session_mask);
+
for (i = 0; i < NUM_MD_SESSIONS; i++)
if (MD_PERIPHERAL_MASK(i) & session_mask)
diag_mux_close_peripheral(DIAG_LOCAL_PROC, i);
@@ -482,19 +488,17 @@ static void diag_close_logging_process(const int pid)
params.req_mode = USB_MODE;
params.mode_param = 0;
params.pd_mask = 0;
- params.peripheral_mask =
- diag_translate_kernel_to_user_mask(session_mask);
+ params.peripheral_mask = p_mask;
if (driver->num_pd_session > 0) {
- for (i = UPD_WLAN; ((i < NUM_MD_SESSIONS) &&
- (session_mask & MD_PERIPHERAL_MASK(i)));
- i++) {
- j = i - UPD_WLAN;
- driver->pd_session_clear[j] = 1;
- driver->pd_logging_mode[j] = 0;
- driver->num_pd_session -= 1;
- params.pd_mask =
- diag_translate_kernel_to_user_mask(session_mask);
+ for (i = UPD_WLAN; (i < NUM_MD_SESSIONS); i++) {
+ if (session_mask & MD_PERIPHERAL_MASK(i)) {
+ j = i - UPD_WLAN;
+ driver->pd_session_clear[j] = 1;
+ driver->pd_logging_mode[j] = 0;
+ driver->num_pd_session -= 1;
+ params.pd_mask = p_mask;
+ }
}
}
@@ -699,6 +703,11 @@ static void diag_cmd_invalidate_polling(int change_flag)
driver->polling_reg_flag = 0;
list_for_each_safe(start, temp, &driver->cmd_reg_list) {
item = list_entry(start, struct diag_cmd_reg_t, link);
+ if (&item->entry == NULL) {
+ pr_err("diag: In %s, unable to search command\n",
+ __func__);
+ return;
+ }
polling = diag_cmd_chk_polling(&item->entry);
if (polling == DIAG_CMD_POLLING) {
driver->polling_reg_flag = 1;
@@ -840,6 +849,12 @@ void diag_cmd_remove_reg_by_pid(int pid)
mutex_lock(&driver->cmd_reg_mutex);
list_for_each_safe(start, temp, &driver->cmd_reg_list) {
item = list_entry(start, struct diag_cmd_reg_t, link);
+ if (&item->entry == NULL) {
+ pr_err("diag: In %s, unable to search command\n",
+ __func__);
+ mutex_unlock(&driver->cmd_reg_mutex);
+ return;
+ }
if (item->pid == pid) {
list_del(&item->link);
kfree(item);
@@ -858,6 +873,12 @@ void diag_cmd_remove_reg_by_proc(int proc)
mutex_lock(&driver->cmd_reg_mutex);
list_for_each_safe(start, temp, &driver->cmd_reg_list) {
item = list_entry(start, struct diag_cmd_reg_t, link);
+ if (&item->entry == NULL) {
+ pr_err("diag: In %s, unable to search command\n",
+ __func__);
+ mutex_unlock(&driver->cmd_reg_mutex);
+ return;
+ }
if (item->proc == proc) {
list_del(&item->link);
kfree(item);
@@ -1020,6 +1041,11 @@ static int diag_send_raw_data_remote(int proc, void *buf, int len,
else
hdlc_disabled = driver->hdlc_disabled;
if (hdlc_disabled) {
+ if (len < 4) {
+ pr_err("diag: In %s, invalid len: %d of non_hdlc pkt",
+ __func__, len);
+ return -EBADMSG;
+ }
payload = *(uint16_t *)(buf + 2);
if (payload > DIAG_MAX_HDLC_BUF_SIZE) {
pr_err("diag: Dropping packet, payload size is %d\n",
@@ -1028,11 +1054,21 @@ static int diag_send_raw_data_remote(int proc, void *buf, int len,
}
driver->hdlc_encode_buf_len = payload;
/*
- * Adding 4 bytes for start (1 byte), version (1 byte) and
- * payload (2 bytes)
+ * Adding 5 bytes for start (1 byte), version (1 byte),
+ * payload (2 bytes) and end (1 byte)
*/
- memcpy(driver->hdlc_encode_buf, buf + 4, payload);
- goto send_data;
+ if (len == (payload + 5)) {
+ /*
+ * Adding 4 bytes for start (1 byte), version (1 byte)
+ * and payload (2 bytes)
+ */
+ memcpy(driver->hdlc_encode_buf, buf + 4, payload);
+ goto send_data;
+ } else {
+ pr_err("diag: In %s, invalid len: %d of non_hdlc pkt",
+ __func__, len);
+ return -EBADMSG;
+ }
}
if (hdlc_flag) {
@@ -1819,14 +1855,18 @@ static int diag_ioctl_lsm_deinit(void)
{
int i;
+ mutex_lock(&driver->diagchar_mutex);
for (i = 0; i < driver->num_clients; i++)
if (driver->client_map[i].pid == current->tgid)
break;
- if (i == driver->num_clients)
+ if (i == driver->num_clients) {
+ mutex_unlock(&driver->diagchar_mutex);
return -EINVAL;
+ }
driver->data_ready[i] |= DEINIT_TYPE;
+ mutex_unlock(&driver->diagchar_mutex);
wake_up_interruptible(&driver->wait_q);
return 1;
@@ -3568,6 +3608,41 @@ static int diagchar_cleanup(void)
return 0;
}
+static int diag_mhi_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ if (!mhi_is_device_ready(&pdev->dev, "qcom,mhi"))
+ return -EPROBE_DEFER;
+ driver->pdev = pdev;
+ ret = diag_remote_init();
+ if (ret) {
+ diag_remote_exit();
+ return ret;
+ }
+ ret = diagfwd_bridge_init();
+ if (ret) {
+ diagfwd_bridge_exit();
+ return ret;
+ }
+ pr_debug("diag: mhi device is ready\n");
+ return 0;
+}
+
+static const struct of_device_id diag_mhi_table[] = {
+ {.compatible = "qcom,diag-mhi"},
+ {},
+};
+
+static struct platform_driver diag_mhi_driver = {
+ .probe = diag_mhi_probe,
+ .driver = {
+ .name = "DIAG MHI Platform",
+ .owner = THIS_MODULE,
+ .of_match_table = diag_mhi_table,
+ },
+};
+
static int __init diagchar_init(void)
{
dev_t dev;
@@ -3657,9 +3732,6 @@ static int __init diagchar_init(void)
ret = diag_masks_init();
if (ret)
goto fail;
- ret = diag_remote_init();
- if (ret)
- goto fail;
ret = diag_mux_init();
if (ret)
goto fail;
@@ -3696,9 +3768,7 @@ static int __init diagchar_init(void)
goto fail;
pr_debug("diagchar initialized now");
- ret = diagfwd_bridge_init();
- if (ret)
- diagfwd_bridge_exit();
+ platform_driver_register(&diag_mhi_driver);
return 0;
fail:
@@ -3712,9 +3782,7 @@ fail:
diagfwd_cntl_exit();
diag_dci_exit();
diag_masks_exit();
- diag_remote_exit();
return -1;
-
}
static void diagchar_exit(void)
diff --git a/drivers/char/diag/diagfwd_cntl.c b/drivers/char/diag/diagfwd_cntl.c
index 4ae2158b5a6b..74777212e4cf 100644
--- a/drivers/char/diag/diagfwd_cntl.c
+++ b/drivers/char/diag/diagfwd_cntl.c
@@ -67,7 +67,6 @@ void diag_cntl_channel_close(struct diagfwd_info *p_info)
driver->feature[peripheral].sent_feature_mask = 0;
driver->feature[peripheral].rcvd_feature_mask = 0;
- flush_workqueue(driver->cntl_wq);
reg_dirty |= PERIPHERAL_MASK(peripheral);
diag_cmd_remove_reg_by_proc(peripheral);
driver->feature[peripheral].stm_support = DISABLE_STM;
diff --git a/drivers/char/diag/diagfwd_glink.c b/drivers/char/diag/diagfwd_glink.c
index 03d496c2dd91..f1f8f0b2b34b 100644
--- a/drivers/char/diag/diagfwd_glink.c
+++ b/drivers/char/diag/diagfwd_glink.c
@@ -361,13 +361,44 @@ static void diag_glink_read_work_fn(struct work_struct *work)
diagfwd_channel_read(glink_info->fwd_ctxt);
}
+struct diag_glink_read_work {
+ struct diag_glink_info *glink_info;
+ const void *ptr_read_done;
+ const void *ptr_rx_done;
+ size_t ptr_read_size;
+ struct work_struct work;
+};
+
+static void diag_glink_notify_rx_work_fn(struct work_struct *work)
+{
+ struct diag_glink_read_work *read_work = container_of(work,
+ struct diag_glink_read_work, work);
+ struct diag_glink_info *glink_info = read_work->glink_info;
+
+ if (!glink_info || !glink_info->hdl) {
+ kfree(read_work);
+ return;
+ }
+
+ diagfwd_channel_read_done(glink_info->fwd_ctxt,
+ (unsigned char *)(read_work->ptr_read_done),
+ read_work->ptr_read_size);
+
+ glink_rx_done(glink_info->hdl, read_work->ptr_rx_done, false);
+
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: Rx done for packet %pK of len: %d periph: %d ch: %d\n",
+ read_work->ptr_rx_done, (int)read_work->ptr_read_size,
+ glink_info->peripheral, glink_info->type);
+ kfree(read_work);
+}
static void diag_glink_notify_rx(void *hdl, const void *priv,
const void *pkt_priv, const void *ptr,
size_t size)
{
struct diag_glink_info *glink_info = (struct diag_glink_info *)priv;
- int err = 0;
+ struct diag_glink_read_work *read_work;
if (!glink_info || !glink_info->hdl || !ptr || !pkt_priv || !hdl)
return;
@@ -379,12 +410,25 @@ static void diag_glink_notify_rx(void *hdl, const void *priv,
"diag: received a packet %pK of len:%d from periph:%d ch:%d\n",
ptr, (int)size, glink_info->peripheral, glink_info->type);
+ read_work = kmalloc(sizeof(*read_work), GFP_ATOMIC);
+ if (!read_work) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: Could not allocate read_work\n");
+ glink_rx_done(glink_info->hdl, ptr, true);
+ return;
+ }
+
memcpy((void *)pkt_priv, ptr, size);
- err = diagfwd_channel_read_done(glink_info->fwd_ctxt,
- (unsigned char *)pkt_priv, size);
- glink_rx_done(glink_info->hdl, ptr, false);
+
+ read_work->glink_info = glink_info;
+ read_work->ptr_read_done = pkt_priv;
+ read_work->ptr_rx_done = ptr;
+ read_work->ptr_read_size = size;
+ INIT_WORK(&read_work->work, diag_glink_notify_rx_work_fn);
+ queue_work(glink_info->wq, &read_work->work);
+
DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
- "diag: Rx done for packet %pK of len:%d periph:%d ch:%d\n",
+ "diag: Rx queued for packet %pK of len: %d periph: %d ch: %d\n",
ptr, (int)size, glink_info->peripheral, glink_info->type);
}
@@ -473,6 +517,8 @@ static void diag_glink_connect_work_fn(struct work_struct *work)
atomic_set(&glink_info->opened, 1);
diagfwd_channel_open(glink_info->fwd_ctxt);
diagfwd_late_open(glink_info->fwd_ctxt);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "glink channel open: p: %d t: %d\n",
+ glink_info->peripheral, glink_info->type);
}
static void diag_glink_remote_disconnect_work_fn(struct work_struct *work)
@@ -494,9 +540,9 @@ static void diag_glink_late_init_work_fn(struct work_struct *work)
late_init_work);
if (!glink_info || !glink_info->hdl)
return;
- DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "p: %d t: %d\n",
- glink_info->peripheral, glink_info->type);
diagfwd_channel_open(glink_info->fwd_ctxt);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "glink late init p: %d t: %d\n",
+ glink_info->peripheral, glink_info->type);
}
static void diag_glink_transport_notify_state(void *handle, const void *priv,
diff --git a/drivers/char/diag/diagfwd_mhi.c b/drivers/char/diag/diagfwd_mhi.c
index 03133a5a89aa..edfba6bb09c9 100644
--- a/drivers/char/diag/diagfwd_mhi.c
+++ b/drivers/char/diag/diagfwd_mhi.c
@@ -197,7 +197,7 @@ static void mhi_buf_tbl_clear(struct diag_mhi_info *mhi_info)
struct diag_mhi_buf_tbl_t *item = NULL;
struct diag_mhi_ch_t *ch = NULL;
- if (!mhi_info || !mhi_info->enabled)
+ if (!mhi_info)
return;
/* Clear all the pending reads */
@@ -678,7 +678,25 @@ static int diag_mhi_register_ch(int id, struct diag_mhi_ch_t *ch)
atomic_set(&(ch->opened), 0);
ctxt = SET_CH_CTXT(id, ch->type);
ch->client_info.mhi_client_cb = mhi_notifier;
- return mhi_register_channel(&ch->hdl, NULL);
+ ch->client_info.chan = ch->chan;
+ ch->client_info.dev = &driver->pdev->dev;
+ ch->client_info.node_name = "qcom,mhi";
+ ch->client_info.user_data = (void *)(uintptr_t)ctxt;
+ return mhi_register_channel(&ch->hdl, &ch->client_info);
+}
+
+static void diag_mhi_dev_exit(int dev)
+{
+ struct diag_mhi_info *mhi_info = NULL;
+
+ mhi_info = &diag_mhi[dev];
+ if (!mhi_info)
+ return;
+ if (mhi_info->mhi_wq)
+ destroy_workqueue(mhi_info->mhi_wq);
+ mhi_close(mhi_info->id);
+ if (mhi_info->mempool_init)
+ diagmem_exit(driver, mhi_info->mempool);
}
int diag_mhi_init()
@@ -726,22 +744,16 @@ int diag_mhi_init()
return 0;
fail:
- diag_mhi_exit();
+ diag_mhi_dev_exit(i);
return -ENOMEM;
}
void diag_mhi_exit()
{
int i;
- struct diag_mhi_info *mhi_info = NULL;
for (i = 0; i < NUM_MHI_DEV; i++) {
- mhi_info = &diag_mhi[i];
- if (mhi_info->mhi_wq)
- destroy_workqueue(mhi_info->mhi_wq);
- mhi_close(mhi_info->id);
- if (mhi_info->mempool_init)
- diagmem_exit(driver, mhi_info->mempool);
+ diag_mhi_dev_exit(i);
}
}
diff --git a/drivers/char/diag/diagfwd_peripheral.c b/drivers/char/diag/diagfwd_peripheral.c
index e209039bed5a..78b8452b19b3 100644
--- a/drivers/char/diag/diagfwd_peripheral.c
+++ b/drivers/char/diag/diagfwd_peripheral.c
@@ -216,6 +216,45 @@ static int check_bufsize_for_encoding(struct diagfwd_buf_t *buf, uint32_t len)
return buf->len;
}
+int diag_md_get_peripheral(int ctxt)
+{
+ int peripheral;
+
+ if (driver->num_pd_session) {
+ peripheral = GET_PD_CTXT(ctxt);
+ switch (peripheral) {
+ case UPD_WLAN:
+ if (!driver->pd_logging_mode[0])
+ peripheral = PERIPHERAL_MODEM;
+ break;
+ case UPD_AUDIO:
+ if (!driver->pd_logging_mode[1])
+ peripheral = PERIPHERAL_LPASS;
+ break;
+ case UPD_SENSORS:
+ if (!driver->pd_logging_mode[2])
+ peripheral = PERIPHERAL_LPASS;
+ break;
+ case DIAG_ID_MPSS:
+ case DIAG_ID_LPASS:
+ case DIAG_ID_CDSP:
+ default:
+ peripheral =
+ GET_BUF_PERIPHERAL(ctxt);
+ if (peripheral > NUM_PERIPHERALS)
+ peripheral = -EINVAL;
+ break;
+ }
+ } else {
+ /* Account for Apps data as well */
+ peripheral = GET_BUF_PERIPHERAL(ctxt);
+ if (peripheral > NUM_PERIPHERALS)
+ peripheral = -EINVAL;
+ }
+
+ return peripheral;
+}
+
static void diagfwd_data_process_done(struct diagfwd_info *fwd_info,
struct diagfwd_buf_t *buf, int len)
{
@@ -245,13 +284,15 @@ static void diagfwd_data_process_done(struct diagfwd_info *fwd_info,
mutex_lock(&driver->hdlc_disable_mutex);
mutex_lock(&fwd_info->data_mutex);
- peripheral = GET_PD_CTXT(buf->ctxt);
- if (peripheral == DIAG_ID_MPSS)
- peripheral = PERIPHERAL_MODEM;
- if (peripheral == DIAG_ID_LPASS)
- peripheral = PERIPHERAL_LPASS;
- if (peripheral == DIAG_ID_CDSP)
- peripheral = PERIPHERAL_CDSP;
+ peripheral = diag_md_get_peripheral(buf->ctxt);
+ if (peripheral < 0) {
+ pr_err("diag:%s:%d invalid peripheral = %d\n",
+ __func__, __LINE__, peripheral);
+ mutex_unlock(&fwd_info->data_mutex);
+ mutex_unlock(&driver->hdlc_disable_mutex);
+ diag_ws_release();
+ return;
+ }
session_info =
diag_md_session_get_peripheral(peripheral);
@@ -1008,7 +1049,16 @@ void diagfwd_close_transport(uint8_t transport, uint8_t peripheral)
dest_info->buf_ptr[i] = fwd_info->buf_ptr[i];
if (!check_channel_state(dest_info->ctxt))
diagfwd_late_open(dest_info);
- diagfwd_cntl_open(dest_info);
+
+ /*
+ * Open control channel to update masks after buffers are
+ * initialized for peripherals that have transport other than
+ * GLINK. GLINK supported peripheral mask update will
+ * happen after glink buffers are initialized.
+ */
+
+ if (dest_info->transport != TRANSPORT_GLINK)
+ diagfwd_cntl_open(dest_info);
init_fn(peripheral);
mutex_unlock(&driver->diagfwd_channel_mutex[peripheral]);
diagfwd_queue_read(&peripheral_info[TYPE_DATA][peripheral]);
@@ -1199,7 +1249,18 @@ int diagfwd_channel_open(struct diagfwd_info *fwd_info)
mutex_lock(&driver->diagfwd_channel_mutex[fwd_info->peripheral]);
fwd_info->ch_open = 1;
diagfwd_buffers_init(fwd_info);
- diagfwd_write_buffers_init(fwd_info);
+
+ /*
+ * Initialize buffers for glink supported
+ * peripherals only. Open control channel to update
+ * masks after buffers are initialized.
+ */
+ if (fwd_info->transport == TRANSPORT_GLINK) {
+ diagfwd_write_buffers_init(fwd_info);
+ if (fwd_info->type == TYPE_CNTL)
+ diagfwd_cntl_open(fwd_info);
+ }
+
if (fwd_info && fwd_info->c_ops && fwd_info->c_ops->open)
fwd_info->c_ops->open(fwd_info);
for (i = 0; i < NUM_WRITE_BUFFERS; i++) {
@@ -1224,6 +1285,9 @@ int diagfwd_channel_close(struct diagfwd_info *fwd_info)
if (!fwd_info)
return -EIO;
+ if (fwd_info->type == TYPE_CNTL)
+ flush_workqueue(driver->cntl_wq);
+
mutex_lock(&driver->diagfwd_channel_mutex[fwd_info->peripheral]);
fwd_info->ch_open = 0;
if (fwd_info && fwd_info->c_ops && fwd_info->c_ops->close)
diff --git a/drivers/char/diag/diagfwd_peripheral.h b/drivers/char/diag/diagfwd_peripheral.h
index 037eeebdeb35..eda70dcfdcd9 100644
--- a/drivers/char/diag/diagfwd_peripheral.h
+++ b/drivers/char/diag/diagfwd_peripheral.h
@@ -105,6 +105,9 @@ void diagfwd_early_open(uint8_t peripheral);
void diagfwd_late_open(struct diagfwd_info *fwd_info);
void diagfwd_close(uint8_t peripheral, uint8_t type);
+
+int diag_md_get_peripheral(int ctxt);
+
int diagfwd_register(uint8_t transport, uint8_t peripheral, uint8_t type,
void *ctxt, struct diag_peripheral_ops *ops,
struct diagfwd_info **fwd_ctxt);
diff --git a/drivers/char/rdbg.c b/drivers/char/rdbg.c
index 0823ed78485e..8161d77ca194 100644
--- a/drivers/char/rdbg.c
+++ b/drivers/char/rdbg.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -22,7 +22,7 @@
#include <linux/uaccess.h>
#include <linux/interrupt.h>
-#define SMP2P_NUM_PROCS 8
+#define SMP2P_NUM_PROCS 16
#define MAX_RETRIES 20
#define SM_VERSION 1
@@ -146,9 +146,17 @@ static struct processor_specific_info proc_info[SMP2P_NUM_PROCS] = {
{"rdbg_adsp", SMEM_LC_DEBUGGER, 16*1024}, /*ADSP*/
{0}, /*SMP2P_RESERVED_PROC_1*/
{"rdbg_wcnss", 0, 0}, /*WCNSS*/
- {0}, /*SMP2P_RESERVED_PROC_2*/
- {0}, /*SMP2P_POWER_PROC*/
- {0} /*SMP2P_REMOTE_MOCK_PROC*/
+ {"rdbg_cdsp", SMEM_LC_DEBUGGER, 16*1024}, /*CDSP*/
+ {NULL}, /*SMP2P_POWER_PROC*/
+ {NULL}, /*SMP2P_TZ_PROC*/
+ {NULL}, /*EMPTY*/
+ {NULL}, /*EMPTY*/
+ {NULL}, /*EMPTY*/
+ {NULL}, /*EMPTY*/
+ {NULL}, /*EMPTY*/
+ {NULL}, /*EMPTY*/
+ {NULL}, /*EMPTY*/
+ {NULL} /*SMP2P_REMOTE_MOCK_PROC*/
};
static int smq_blockmap_get(struct smq_block_map *block_map,
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 31e8ae916ba0..be0b09a0fb44 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1864,7 +1864,7 @@ static void config_work_handler(struct work_struct *work)
{
struct ports_device *portdev;
- portdev = container_of(work, struct ports_device, control_work);
+ portdev = container_of(work, struct ports_device, config_work);
if (!use_multiport(portdev)) {
struct virtio_device *vdev;
struct port *port;
diff --git a/drivers/clk/msm/clock-local2.c b/drivers/clk/msm/clock-local2.c
index 076ead6aaf34..40d8d12cda82 100644
--- a/drivers/clk/msm/clock-local2.c
+++ b/drivers/clk/msm/clock-local2.c
@@ -1517,8 +1517,8 @@ static int set_rate_pixel(struct clk *clk, unsigned long rate)
{
struct rcg_clk *rcg = to_rcg_clk(clk);
struct clk_freq_tbl *pixel_freq = rcg->current_freq;
- int frac_num[] = {3, 2, 4, 1};
- int frac_den[] = {8, 9, 9, 1};
+ int frac_num[] = {1, 2, 4, 3, 2};
+ int frac_den[] = {1, 3, 9, 8, 9};
int delta = 100000;
int i, rc;
diff --git a/drivers/clk/msm/mdss/mdss-dsi-pll-8998.c b/drivers/clk/msm/mdss/mdss-dsi-pll-8998.c
index eb69ed35f46d..040707e58e25 100644
--- a/drivers/clk/msm/mdss/mdss-dsi-pll-8998.c
+++ b/drivers/clk/msm/mdss/mdss-dsi-pll-8998.c
@@ -152,8 +152,6 @@ struct dsi_pll_regs {
struct dsi_pll_config {
u32 ref_freq;
- bool div_override;
- u32 output_div;
bool ignore_frac;
bool disable_prescaler;
bool enable_ssc;
@@ -212,7 +210,6 @@ static void dsi_pll_setup_config(struct dsi_pll_8998 *pll,
struct dsi_pll_config *config = &pll->pll_configuration;
config->ref_freq = 19200000;
- config->output_div = 1;
config->dec_bits = 8;
config->frac_bits = 18;
config->lock_timer = 64;
@@ -222,7 +219,6 @@ static void dsi_pll_setup_config(struct dsi_pll_8998 *pll,
config->thresh_cycles = 32;
config->refclk_cycles = 256;
- config->div_override = false;
config->ignore_frac = false;
config->disable_prescaler = false;
config->enable_ssc = rsc->ssc_en;
@@ -243,54 +239,14 @@ static void dsi_pll_calc_dec_frac(struct dsi_pll_8998 *pll,
{
struct dsi_pll_config *config = &pll->pll_configuration;
struct dsi_pll_regs *regs = &pll->reg_setup;
- u64 target_freq;
u64 fref = rsc->vco_ref_clk_rate;
- u32 computed_output_div, div_log = 0;
u64 pll_freq;
u64 divider;
u64 dec, dec_multiple;
u32 frac;
u64 multiplier;
- u32 i;
-
- target_freq = rsc->vco_current_rate;
- pr_debug("target_freq = %llu\n", target_freq);
-
- if (config->div_override) {
- computed_output_div = config->output_div;
-
- /*
- * Computed_output_div = 2 ^ div_log
- * To get div_log from output div just get the index of the
- * 1 bit in the value.
- * div_log ranges from 0-3. so check the 4 lsbs
- */
-
- for (i = 0; i < 4; i++) {
- if (computed_output_div & (1 << i)) {
- div_log = i;
- break;
- }
- }
-
- } else {
- if (target_freq < MHZ_250) {
- computed_output_div = 8;
- div_log = 3;
- } else if (target_freq < MHZ_500) {
- computed_output_div = 4;
- div_log = 2;
- } else if (target_freq < MHZ_1000) {
- computed_output_div = 2;
- div_log = 1;
- } else {
- computed_output_div = 1;
- div_log = 0;
- }
- }
- pr_debug("computed_output_div = %d\n", computed_output_div);
- pll_freq = target_freq * computed_output_div;
+ pll_freq = rsc->vco_current_rate;
if (config->disable_prescaler)
divider = fref;
@@ -315,7 +271,6 @@ static void dsi_pll_calc_dec_frac(struct dsi_pll_8998 *pll,
else
regs->pll_clock_inverters = 0;
- regs->pll_outdiv_rate = div_log;
regs->pll_lockdet_rate = config->lock_timer;
regs->decimal_div_start = dec;
regs->frac_div_start_low = (frac & 0xff);
@@ -478,7 +433,6 @@ static void dsi_pll_commit(struct dsi_pll_8998 *pll,
MDSS_PLL_REG_W(pll_base, PLL_FRAC_DIV_START_HIGH_1,
reg->frac_div_start_high);
MDSS_PLL_REG_W(pll_base, PLL_PLL_LOCKDET_RATE_1, 0x40);
- MDSS_PLL_REG_W(pll_base, PLL_PLL_OUTDIV_RATE, reg->pll_outdiv_rate);
MDSS_PLL_REG_W(pll_base, PLL_PLL_LOCK_DELAY, 0x06);
MDSS_PLL_REG_W(pll_base, PLL_CMODE, 0x10);
MDSS_PLL_REG_W(pll_base, PLL_CLOCK_INVERTERS, reg->pll_clock_inverters);
@@ -597,11 +551,23 @@ static int dsi_pll_enable(struct dsi_pll_vco_clk *vco)
{
int rc;
struct mdss_pll_resources *rsc = vco->priv;
+ struct dsi_pll_8998 *pll = rsc->priv;
+ struct dsi_pll_regs *regs = &pll->reg_setup;
dsi_pll_enable_pll_bias(rsc);
if (rsc->slave)
dsi_pll_enable_pll_bias(rsc->slave);
+ /*
+ * The PLL out dividers are fixed divider clocks and hence the
+ * set_div is not called during set_rate cycle of the tree.
+ * The outdiv rate is therefore set in the pll out mux's set_sel
+ * callback. But that will be called only after vco's set rate.
+ * Hence PLL out div value is set here before locking the PLL.
+ */
+ MDSS_PLL_REG_W(rsc->pll_base, PLL_PLL_OUTDIV_RATE,
+ regs->pll_outdiv_rate);
+
/* Start PLL */
MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_PLL_CNTRL, 0x01);
@@ -728,7 +694,9 @@ static int vco_8998_prepare(struct clk *c)
static unsigned long dsi_pll_get_vco_rate(struct clk *c)
{
struct dsi_pll_vco_clk *vco = to_vco_clk(c);
- struct mdss_pll_resources *pll = vco->priv;
+ struct mdss_pll_resources *rsc = vco->priv;
+ struct dsi_pll_8998 *pll = rsc->priv;
+ struct dsi_pll_regs *regs = &pll->reg_setup;
int rc;
u64 ref_clk = vco->ref_clk_rate;
u64 vco_rate;
@@ -738,27 +706,30 @@ static unsigned long dsi_pll_get_vco_rate(struct clk *c)
u32 outdiv;
u64 pll_freq, tmp64;
- rc = mdss_pll_resource_enable(pll, true);
+ rc = mdss_pll_resource_enable(rsc, true);
if (rc) {
pr_err("failed to enable pll(%d) resource, rc=%d\n",
- pll->index, rc);
+ rsc->index, rc);
return 0;
}
- dec = MDSS_PLL_REG_R(pll->pll_base, PLL_DECIMAL_DIV_START_1);
+ dec = MDSS_PLL_REG_R(rsc->pll_base, PLL_DECIMAL_DIV_START_1);
dec &= 0xFF;
- frac = MDSS_PLL_REG_R(pll->pll_base, PLL_FRAC_DIV_START_LOW_1);
- frac |= ((MDSS_PLL_REG_R(pll->pll_base, PLL_FRAC_DIV_START_MID_1) &
+ frac = MDSS_PLL_REG_R(rsc->pll_base, PLL_FRAC_DIV_START_LOW_1);
+ frac |= ((MDSS_PLL_REG_R(rsc->pll_base, PLL_FRAC_DIV_START_MID_1) &
0xFF) <<
8);
- frac |= ((MDSS_PLL_REG_R(pll->pll_base, PLL_FRAC_DIV_START_HIGH_1) &
+ frac |= ((MDSS_PLL_REG_R(rsc->pll_base, PLL_FRAC_DIV_START_HIGH_1) &
0x3) <<
16);
/* OUTDIV_1:0 field is (log(outdiv, 2)) */
- outdiv = MDSS_PLL_REG_R(pll->pll_base, PLL_PLL_OUTDIV_RATE);
+ outdiv = MDSS_PLL_REG_R(rsc->pll_base, PLL_PLL_OUTDIV_RATE);
outdiv &= 0x3;
+
+ regs->pll_outdiv_rate = outdiv;
+
outdiv = 1 << outdiv;
/*
@@ -776,7 +747,7 @@ static unsigned long dsi_pll_get_vco_rate(struct clk *c)
pr_debug("dec=0x%x, frac=0x%x, outdiv=%d, vco=%llu\n",
dec, frac, outdiv, vco_rate);
- (void)mdss_pll_resource_enable(pll, false);
+ (void)mdss_pll_resource_enable(rsc, false);
return (unsigned long)vco_rate;
}
@@ -930,6 +901,26 @@ static int bit_clk_set_div(struct div_clk *clk, int div)
return rc;
}
+static int dsi_pll_out_set_mux_sel(struct mux_clk *clk, int sel)
+{
+ struct mdss_pll_resources *rsc = clk->priv;
+ struct dsi_pll_8998 *pll = rsc->priv;
+ struct dsi_pll_regs *regs = &pll->reg_setup;
+
+ regs->pll_outdiv_rate = sel;
+
+ return 0;
+}
+
+static int dsi_pll_out_get_mux_sel(struct mux_clk *clk)
+{
+ struct mdss_pll_resources *rsc = clk->priv;
+ struct dsi_pll_8998 *pll = rsc->priv;
+ struct dsi_pll_regs *regs = &pll->reg_setup;
+
+ return regs->pll_outdiv_rate;
+}
+
static int post_vco_clk_get_div(struct div_clk *clk)
{
int rc;
@@ -1125,52 +1116,75 @@ static struct clk_mux_ops mdss_mux_ops = {
.get_mux_sel = mdss_get_mux_sel,
};
+static struct clk_mux_ops mdss_pll_out_mux_ops = {
+ .set_mux_sel = dsi_pll_out_set_mux_sel,
+ .get_mux_sel = dsi_pll_out_get_mux_sel,
+};
+
/*
* Clock tree for generating DSI byte and pixel clocks.
*
- *
- * +---------------+
- * | vco_clk |
- * +-------+-------+
- * |
- * +----------------------+------------------+
- * | | |
- * +-------v-------+ +-------v-------+ +-------v-------+
- * | bitclk_src | | post_vco_div1 | | post_vco_div4 |
- * | DIV(1..15) | +-------+-------+ +-------+-------+
- * +-------+-------+ | |
- * | +------------+ |
- * +--------------------+ | |
- * Shadow Path | | | |
- * + +-------v-------+ +------v------+ +---v-----v------+
- * | | byteclk_src | |post_bit_div | \ post_vco_mux /
- * | | DIV(8) | |DIV(1,2) | \ /
- * | +-------+-------+ +------+------+ +---+------+
- * | | | |
- * | | +------+ +----+
- * | +--------+ | |
- * | | +----v-----v------+
- * +-v---------v----+ \ pclk_src_mux /
- * \ byteclk_mux / \ /
- * \ / +-----+-----+
- * +----+-----+ | Shadow Path
- * | | +
- * v +-----v------+ |
- * dsi_byte_clk | pclk_src | |
- * | DIV(1..15) | |
- * +-----+------+ |
- * | |
- * | |
- * +--------+ |
- * | |
- * +---v----v----+
- * \ pclk_mux /
- * \ /
- * +---+---+
- * |
- * |
- * v
- * dsi_pclk
+ * +---------------+
+ * | vco_clk |
+ * | |
+ * +-------+-------+
+ * |
+ * |
+ * +-------+--------+------------------+-----------------+
+ * | | | |
+ * +------v-------+ +------v-------+ +-------v------+ +------v-------+
+ * | pll_out_div1 | | pll_out_div2 | | pll_out_div4 | | pll_out_div8 |
+ * | DIV(1) | | DIV(2) | | DIV(4) | | DIV(8) |
+ * +------+-------+ +------+-------+ +-------+------+ +------+-------+
+ * | | | |
+ * +------------+ | +--------------+ |
+ * | | | +---------------------------+
+ * | | | |
+ * +--v---v---v----v--+
+ * \ pll_out_mux /
+ * \ /
+ * +------+-----+
+ * |
+ * +---------------+-----------------+
+ * | | |
+ * +------v-----+ +-------v-------+ +-------v-------+
+ * | bitclk_src | | post_vco_div1 | | post_vco_div4 |
+ * | DIV(1..15) | + DIV(1) | | DIV(4) |
+ * +------+-----+ +-------+-------+ +-------+-------+
+ * | | |
+ * Shadow | | +---------------------+
+ * Path | +-----------------------------+ |
+ * + | | |
+ * | +---------------------------------+ | |
+ * | | | | |
+ * | +------v------=+ +------v-------+ +-v---------v----+
+ * | | byteclk_src | | post_bit_div | \ post_vco_mux /
+ * | | DIV(8) | | DIV(1,2) | \ /
+ * | +------+-------+ +------+-------+ +---+------+
+ * | | | |
+ * | | | +----------+
+ * | | | |
+ * | | +----v-----v------+
+ * +-v--------v---------+ \ pclk_src_mux /
+ * \ byteclk_mux / \ /
+ * \ / +-----+-----+
+ * +------+-------+ | Shadow
+ * | | Path
+ * v +-----v------+ +
+ * dsi_byte_clk | pclk_src | |
+ * | DIV(1..15) | |
+ * +-----+------+ |
+ * | |
+ * +------+ |
+ * | |
+ * +---v----v----+
+ * \ pclk_mux /
+ * \ /
+ * +---+---+
+ * |
+ * |
+ * v
+ * dsi_pclk
*
*/
@@ -1186,6 +1200,83 @@ static struct dsi_pll_vco_clk dsi0pll_vco_clk = {
},
};
+static struct div_clk dsi0pll_pll_out_div1 = {
+ .data = {
+ .div = 1,
+ .min_div = 1,
+ .max_div = 1,
+ },
+ .c = {
+ .parent = &dsi0pll_vco_clk.c,
+ .dbg_name = "dsi0pll_pll_out_div1",
+ .ops = &clk_ops_div,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi0pll_pll_out_div1.c),
+ }
+};
+
+static struct div_clk dsi0pll_pll_out_div2 = {
+ .data = {
+ .div = 2,
+ .min_div = 2,
+ .max_div = 2,
+ },
+ .c = {
+ .parent = &dsi0pll_vco_clk.c,
+ .dbg_name = "dsi0pll_pll_out_div2",
+ .ops = &clk_ops_div,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi0pll_pll_out_div2.c),
+ }
+};
+
+static struct div_clk dsi0pll_pll_out_div4 = {
+ .data = {
+ .div = 4,
+ .min_div = 4,
+ .max_div = 4,
+ },
+ .c = {
+ .parent = &dsi0pll_vco_clk.c,
+ .dbg_name = "dsi0pll_pll_out_div4",
+ .ops = &clk_ops_div,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi0pll_pll_out_div4.c),
+ }
+};
+
+static struct div_clk dsi0pll_pll_out_div8 = {
+ .data = {
+ .div = 8,
+ .min_div = 8,
+ .max_div = 8,
+ },
+ .c = {
+ .parent = &dsi0pll_vco_clk.c,
+ .dbg_name = "dsi0pll_pll_out_div8",
+ .ops = &clk_ops_div,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi0pll_pll_out_div8.c),
+ }
+};
+
+static struct mux_clk dsi0pll_pll_out_mux = {
+ .num_parents = 4,
+ .parents = (struct clk_src[]) {
+ {&dsi0pll_pll_out_div1.c, 0},
+ {&dsi0pll_pll_out_div2.c, 1},
+ {&dsi0pll_pll_out_div4.c, 2},
+ {&dsi0pll_pll_out_div8.c, 3},
+ },
+ .ops = &mdss_pll_out_mux_ops,
+ .c = {
+ .parent = &dsi0pll_pll_out_div1.c,
+ .dbg_name = "dsi0pll_pll_out_mux",
+ .ops = &clk_ops_gen_mux,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi0pll_pll_out_mux.c),
+ }
+};
static struct div_clk dsi0pll_bitclk_src = {
.data = {
.div = 1,
@@ -1194,7 +1285,7 @@ static struct div_clk dsi0pll_bitclk_src = {
},
.ops = &clk_bitclk_src_ops,
.c = {
- .parent = &dsi0pll_vco_clk.c,
+ .parent = &dsi0pll_pll_out_mux.c,
.dbg_name = "dsi0pll_bitclk_src",
.ops = &clk_ops_bitclk_src_c,
.flags = CLKFLAG_NO_RATE_CACHE,
@@ -1210,7 +1301,7 @@ static struct div_clk dsi0pll_post_vco_div1 = {
},
.ops = &clk_post_vco_div_ops,
.c = {
- .parent = &dsi0pll_vco_clk.c,
+ .parent = &dsi0pll_pll_out_mux.c,
.dbg_name = "dsi0pll_post_vco_div1",
.ops = &clk_ops_post_vco_div_c,
.flags = CLKFLAG_NO_RATE_CACHE,
@@ -1226,7 +1317,7 @@ static struct div_clk dsi0pll_post_vco_div4 = {
},
.ops = &clk_post_vco_div_ops,
.c = {
- .parent = &dsi0pll_vco_clk.c,
+ .parent = &dsi0pll_pll_out_mux.c,
.dbg_name = "dsi0pll_post_vco_div4",
.ops = &clk_ops_post_vco_div_c,
.flags = CLKFLAG_NO_RATE_CACHE,
@@ -1355,6 +1446,84 @@ static struct dsi_pll_vco_clk dsi1pll_vco_clk = {
},
};
+static struct div_clk dsi1pll_pll_out_div1 = {
+ .data = {
+ .div = 1,
+ .min_div = 1,
+ .max_div = 1,
+ },
+ .c = {
+ .parent = &dsi1pll_vco_clk.c,
+ .dbg_name = "dsi1pll_pll_out_div1",
+ .ops = &clk_ops_div,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi1pll_pll_out_div1.c),
+ }
+};
+
+static struct div_clk dsi1pll_pll_out_div2 = {
+ .data = {
+ .div = 2,
+ .min_div = 2,
+ .max_div = 2,
+ },
+ .c = {
+ .parent = &dsi1pll_vco_clk.c,
+ .dbg_name = "dsi1pll_pll_out_div2",
+ .ops = &clk_ops_div,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi1pll_pll_out_div2.c),
+ }
+};
+
+static struct div_clk dsi1pll_pll_out_div4 = {
+ .data = {
+ .div = 4,
+ .min_div = 4,
+ .max_div = 4,
+ },
+ .c = {
+ .parent = &dsi1pll_vco_clk.c,
+ .dbg_name = "dsi1pll_pll_out_div4",
+ .ops = &clk_ops_div,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi1pll_pll_out_div4.c),
+ }
+};
+
+static struct div_clk dsi1pll_pll_out_div8 = {
+ .data = {
+ .div = 8,
+ .min_div = 8,
+ .max_div = 8,
+ },
+ .c = {
+ .parent = &dsi1pll_vco_clk.c,
+ .dbg_name = "dsi1pll_pll_out_div8",
+ .ops = &clk_ops_div,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi1pll_pll_out_div8.c),
+ }
+};
+
+static struct mux_clk dsi1pll_pll_out_mux = {
+ .num_parents = 4,
+ .parents = (struct clk_src[]) {
+ {&dsi1pll_pll_out_div1.c, 0},
+ {&dsi1pll_pll_out_div2.c, 1},
+ {&dsi1pll_pll_out_div4.c, 2},
+ {&dsi1pll_pll_out_div8.c, 3},
+ },
+ .ops = &mdss_pll_out_mux_ops,
+ .c = {
+ .parent = &dsi1pll_pll_out_div1.c,
+ .dbg_name = "dsi1pll_pll_out_mux",
+ .ops = &clk_ops_gen_mux,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi1pll_pll_out_mux.c),
+ }
+};
+
static struct div_clk dsi1pll_bitclk_src = {
.data = {
.div = 1,
@@ -1363,7 +1532,7 @@ static struct div_clk dsi1pll_bitclk_src = {
},
.ops = &clk_bitclk_src_ops,
.c = {
- .parent = &dsi1pll_vco_clk.c,
+ .parent = &dsi1pll_pll_out_mux.c,
.dbg_name = "dsi1pll_bitclk_src",
.ops = &clk_ops_bitclk_src_c,
.flags = CLKFLAG_NO_RATE_CACHE,
@@ -1379,7 +1548,7 @@ static struct div_clk dsi1pll_post_vco_div1 = {
},
.ops = &clk_post_vco_div_ops,
.c = {
- .parent = &dsi1pll_vco_clk.c,
+ .parent = &dsi1pll_pll_out_mux.c,
.dbg_name = "dsi1pll_post_vco_div1",
.ops = &clk_ops_post_vco_div_c,
.flags = CLKFLAG_NO_RATE_CACHE,
@@ -1395,7 +1564,7 @@ static struct div_clk dsi1pll_post_vco_div4 = {
},
.ops = &clk_post_vco_div_ops,
.c = {
- .parent = &dsi1pll_vco_clk.c,
+ .parent = &dsi1pll_pll_out_mux.c,
.dbg_name = "dsi1pll_post_vco_div4",
.ops = &clk_ops_post_vco_div_c,
.flags = CLKFLAG_NO_RATE_CACHE,
@@ -1523,6 +1692,11 @@ static struct clk_lookup mdss_dsi_pll0cc_8998[] = {
CLK_LIST(dsi0pll_post_vco_div1),
CLK_LIST(dsi0pll_post_vco_div4),
CLK_LIST(dsi0pll_bitclk_src),
+ CLK_LIST(dsi0pll_pll_out_mux),
+ CLK_LIST(dsi0pll_pll_out_div8),
+ CLK_LIST(dsi0pll_pll_out_div4),
+ CLK_LIST(dsi0pll_pll_out_div2),
+ CLK_LIST(dsi0pll_pll_out_div1),
CLK_LIST(dsi0pll_vco_clk),
};
static struct clk_lookup mdss_dsi_pll1cc_8998[] = {
@@ -1536,6 +1710,11 @@ static struct clk_lookup mdss_dsi_pll1cc_8998[] = {
CLK_LIST(dsi1pll_post_vco_div1),
CLK_LIST(dsi1pll_post_vco_div4),
CLK_LIST(dsi1pll_bitclk_src),
+ CLK_LIST(dsi1pll_pll_out_mux),
+ CLK_LIST(dsi1pll_pll_out_div8),
+ CLK_LIST(dsi1pll_pll_out_div4),
+ CLK_LIST(dsi1pll_pll_out_div2),
+ CLK_LIST(dsi1pll_pll_out_div1),
CLK_LIST(dsi1pll_vco_clk),
};
@@ -1596,6 +1775,11 @@ int dsi_pll_clock_register_8998(struct platform_device *pdev,
dsi0pll_post_vco_div1.priv = pll_res;
dsi0pll_post_vco_div4.priv = pll_res;
dsi0pll_bitclk_src.priv = pll_res;
+ dsi0pll_pll_out_div1.priv = pll_res;
+ dsi0pll_pll_out_div2.priv = pll_res;
+ dsi0pll_pll_out_div4.priv = pll_res;
+ dsi0pll_pll_out_div8.priv = pll_res;
+ dsi0pll_pll_out_mux.priv = pll_res;
dsi0pll_vco_clk.priv = pll_res;
rc = of_msm_clock_register(pdev->dev.of_node,
@@ -1612,6 +1796,11 @@ int dsi_pll_clock_register_8998(struct platform_device *pdev,
dsi1pll_post_vco_div1.priv = pll_res;
dsi1pll_post_vco_div4.priv = pll_res;
dsi1pll_bitclk_src.priv = pll_res;
+ dsi1pll_pll_out_div1.priv = pll_res;
+ dsi1pll_pll_out_div2.priv = pll_res;
+ dsi1pll_pll_out_div4.priv = pll_res;
+ dsi1pll_pll_out_div8.priv = pll_res;
+ dsi1pll_pll_out_mux.priv = pll_res;
dsi1pll_vco_clk.priv = pll_res;
rc = of_msm_clock_register(pdev->dev.of_node,
diff --git a/drivers/cpufreq/s3c2416-cpufreq.c b/drivers/cpufreq/s3c2416-cpufreq.c
index d6d425773fa4..5b2db3c6568f 100644
--- a/drivers/cpufreq/s3c2416-cpufreq.c
+++ b/drivers/cpufreq/s3c2416-cpufreq.c
@@ -400,7 +400,6 @@ static int s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy)
rate = clk_get_rate(s3c_freq->hclk);
if (rate < 133 * 1000 * 1000) {
pr_err("cpufreq: HCLK not at 133MHz\n");
- clk_put(s3c_freq->hclk);
ret = -EINVAL;
goto err_armclk;
}
diff --git a/drivers/crypto/msm/ice.c b/drivers/crypto/msm/ice.c
index f99a421c0ee4..49165daa807f 100644
--- a/drivers/crypto/msm/ice.c
+++ b/drivers/crypto/msm/ice.c
@@ -441,7 +441,7 @@ static int qcom_ice_enable(struct ice_device *ice_dev)
(ICE_REV(ice_dev->ice_hw_version, MINOR) >= 1))) {
reg = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_BYPASS_STATUS);
if ((reg & 0x80000000) != 0x0) {
- pr_err("%s: Bypass failed for ice = %p",
+ pr_err("%s: Bypass failed for ice = %pK",
__func__, (void *)ice_dev);
BUG();
}
@@ -467,7 +467,7 @@ static int qcom_ice_verify_ice(struct ice_device *ice_dev)
}
ice_dev->ice_hw_version = rev;
- dev_info(ice_dev->pdev, "QC ICE %d.%d.%d device found @0x%p\n",
+ dev_info(ice_dev->pdev, "QC ICE %d.%d.%d device found @0x%pK\n",
maj_rev, min_rev, step_rev,
ice_dev->mmio);
@@ -1256,7 +1256,7 @@ static void qcom_ice_debug(struct platform_device *pdev)
goto out;
}
- pr_err("%s: =========== REGISTER DUMP (%p)===========\n",
+ pr_err("%s: =========== REGISTER DUMP (%pK)===========\n",
ice_dev->ice_instance_type, ice_dev);
pr_err("%s: ICE Control: 0x%08x | ICE Reset: 0x%08x\n",
@@ -1432,7 +1432,7 @@ static int qcom_ice_config_start(struct platform_device *pdev,
int ret = 0;
bool is_pfe = false;
- if (!pdev || !req || !setting) {
+ if (!pdev || !req) {
pr_err("%s: Invalid params passed\n", __func__);
return -EINVAL;
}
@@ -1570,7 +1570,7 @@ struct platform_device *qcom_ice_get_pdevice(struct device_node *node)
struct ice_device *ice_dev = NULL;
if (!node) {
- pr_err("%s: invalid node %p", __func__, node);
+ pr_err("%s: invalid node %pK", __func__, node);
goto out;
}
@@ -1587,13 +1587,14 @@ struct platform_device *qcom_ice_get_pdevice(struct device_node *node)
list_for_each_entry(ice_dev, &ice_devices, list) {
if (ice_dev->pdev->of_node == node) {
- pr_info("%s: found ice device %p\n", __func__, ice_dev);
+ pr_info("%s: found ice device %pK\n", __func__,
+ ice_dev);
break;
}
}
ice_pdev = to_platform_device(ice_dev->pdev);
- pr_info("%s: matching platform device %p\n", __func__, ice_pdev);
+ pr_info("%s: matching platform device %pK\n", __func__, ice_pdev);
out:
return ice_pdev;
}
@@ -1632,7 +1633,7 @@ static int enable_ice_setup(struct ice_device *ice_dev)
}
ret = regulator_enable(ice_dev->reg);
if (ret) {
- pr_err("%s:%p: Could not enable regulator\n",
+ pr_err("%s:%pK: Could not enable regulator\n",
__func__, ice_dev);
goto out;
}
@@ -1640,7 +1641,7 @@ static int enable_ice_setup(struct ice_device *ice_dev)
/* Setup Clocks */
if (qcom_ice_enable_clocks(ice_dev, true)) {
- pr_err("%s:%p:%s Could not enable clocks\n", __func__,
+ pr_err("%s:%pK:%s Could not enable clocks\n", __func__,
ice_dev, ice_dev->ice_instance_type);
goto out_reg;
}
@@ -1652,7 +1653,7 @@ static int enable_ice_setup(struct ice_device *ice_dev)
ret = qcom_ice_set_bus_vote(ice_dev, vote);
if (ret) {
- pr_err("%s:%p: failed %d\n", __func__, ice_dev, ret);
+ pr_err("%s:%pK: failed %d\n", __func__, ice_dev, ret);
goto out_clocks;
}
@@ -1684,19 +1685,19 @@ static int disable_ice_setup(struct ice_device *ice_dev)
/* Setup Bus Vote */
vote = qcom_ice_get_bus_vote(ice_dev, "MIN");
if (vote < 0) {
- pr_err("%s:%p: Unable to get bus vote\n", __func__, ice_dev);
+ pr_err("%s:%pK: Unable to get bus vote\n", __func__, ice_dev);
goto out_disable_clocks;
}
ret = qcom_ice_set_bus_vote(ice_dev, vote);
if (ret)
- pr_err("%s:%p: failed %d\n", __func__, ice_dev, ret);
+ pr_err("%s:%pK: failed %d\n", __func__, ice_dev, ret);
out_disable_clocks:
/* Setup Clocks */
if (qcom_ice_enable_clocks(ice_dev, false))
- pr_err("%s:%p:%s Could not disable clocks\n", __func__,
+ pr_err("%s:%pK:%s Could not disable clocks\n", __func__,
ice_dev, ice_dev->ice_instance_type);
/* Setup Regulator */
@@ -1707,7 +1708,7 @@ out_disable_clocks:
}
ret = regulator_disable(ice_dev->reg);
if (ret) {
- pr_err("%s:%p: Could not disable regulator\n",
+ pr_err("%s:%pK: Could not disable regulator\n",
__func__, ice_dev);
goto out;
}
diff --git a/drivers/crypto/msm/ota_crypto.c b/drivers/crypto/msm/ota_crypto.c
index a568bf46f09f..96297fe7eaad 100644
--- a/drivers/crypto/msm/ota_crypto.c
+++ b/drivers/crypto/msm/ota_crypto.c
@@ -172,7 +172,7 @@ static int qcota_release(struct inode *inode, struct file *file)
podev = file->private_data;
if (podev != NULL && podev->magic != OTA_MAGIC) {
- pr_err("%s: invalid handle %p\n",
+ pr_err("%s: invalid handle %pK\n",
__func__, podev);
}
@@ -444,7 +444,7 @@ static long qcota_ioctl(struct file *file,
podev = file->private_data;
if (podev == NULL || podev->magic != OTA_MAGIC) {
- pr_err("%s: invalid handle %p\n",
+ pr_err("%s: invalid handle %pK\n",
__func__, podev);
return -ENOENT;
}
diff --git a/drivers/crypto/msm/qce50.c b/drivers/crypto/msm/qce50.c
index ee7e735761e2..4ab8ca143f6c 100644
--- a/drivers/crypto/msm/qce50.c
+++ b/drivers/crypto/msm/qce50.c
@@ -1,6 +1,6 @@
/* Qualcomm Crypto Engine driver.
*
- * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -294,11 +294,11 @@ static int _probe_ce_engine(struct qce_device *pce_dev)
pce_dev->ce_bam_info.ce_burst_size = MAX_CE_BAM_BURST_SIZE;
dev_info(pce_dev->pdev,
- "CE device = 0x%x\n, "
- "IO base, CE = 0x%p\n, "
+ "CE device = 0x%x\n"
+ "IO base, CE = 0x%pK\n"
"Consumer (IN) PIPE %d, "
"Producer (OUT) PIPE %d\n"
- "IO base BAM = 0x%p\n"
+ "IO base BAM = 0x%pK\n"
"BAM IRQ %d\n"
"Engines Availability = 0x%x\n",
pce_dev->ce_bam_info.ce_device,
@@ -1160,7 +1160,7 @@ static void _qce_dump_descr_fifos_dbg(struct qce_device *pce_dev, int req_info)
#define QCE_WRITE_REG(val, addr) \
{ \
- pr_info(" [0x%p] 0x%x\n", addr, (uint32_t)val); \
+ pr_info(" [0x%pK] 0x%x\n", addr, (uint32_t)val); \
writel_relaxed(val, addr); \
}
@@ -2730,7 +2730,7 @@ static int qce_sps_init_ep_conn(struct qce_device *pce_dev,
sps_event->callback = NULL;
}
- pr_debug("success, %s : pipe_handle=0x%lx, desc fifo base (phy) = 0x%p\n",
+ pr_debug("success, %s : pipe_handle=0x%lx, desc fifo base (phy) = 0x%pK\n",
is_producer ? "PRODUCER(RX/OUT)" : "CONSUMER(TX/IN)",
(uintptr_t)sps_pipe_info, &sps_connect_info->desc.phys_base);
goto out;
@@ -2895,7 +2895,7 @@ static int qce_sps_get_bam(struct qce_device *pce_dev)
bam.ipc_loglevel = QCE_BAM_DEFAULT_IPC_LOGLVL;
bam.options |= SPS_BAM_CACHED_WP;
pr_debug("bam physical base=0x%lx\n", (uintptr_t)bam.phys_addr);
- pr_debug("bam virtual base=0x%p\n", bam.virt_addr);
+ pr_debug("bam virtual base=0x%pK\n", bam.virt_addr);
/* Register CE Peripheral BAM device to SPS driver */
rc = sps_register_bam_device(&bam, &pbam->handle);
@@ -2998,7 +2998,7 @@ static void print_notify_debug(struct sps_event_notify *notify)
phys_addr_t addr =
DESC_FULL_ADDR((phys_addr_t) notify->data.transfer.iovec.flags,
notify->data.transfer.iovec.addr);
- pr_debug("sps ev_id=%d, addr=0x%pa, size=0x%x, flags=0x%x user=0x%p\n",
+ pr_debug("sps ev_id=%d, addr=0x%pa, size=0x%x, flags=0x%x user=0x%pK\n",
notify->event_id, &addr,
notify->data.transfer.iovec.size,
notify->data.transfer.iovec.flags,
diff --git a/drivers/crypto/msm/qcedev.c b/drivers/crypto/msm/qcedev.c
index beeb99e479c7..20bf034bb193 100644
--- a/drivers/crypto/msm/qcedev.c
+++ b/drivers/crypto/msm/qcedev.c
@@ -201,7 +201,7 @@ static int qcedev_release(struct inode *inode, struct file *file)
handle = file->private_data;
podev = handle->cntl;
if (podev != NULL && podev->magic != QCEDEV_MAGIC) {
- pr_err("%s: invalid handle %p\n",
+ pr_err("%s: invalid handle %pK\n",
__func__, podev);
}
kzfree(handle);
@@ -1607,7 +1607,7 @@ long qcedev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
podev = handle->cntl;
qcedev_areq.handle = handle;
if (podev == NULL || podev->magic != QCEDEV_MAGIC) {
- pr_err("%s: invalid handle %p\n",
+ pr_err("%s: invalid handle %pK\n",
__func__, podev);
return -ENOENT;
}
diff --git a/drivers/crypto/msm/qcrypto.c b/drivers/crypto/msm/qcrypto.c
index b5c5dc035c66..5b364f053b1b 100644
--- a/drivers/crypto/msm/qcrypto.c
+++ b/drivers/crypto/msm/qcrypto.c
@@ -262,7 +262,7 @@ static void qcrypto_free_req_control(struct crypto_engine *pce,
preq->arsp = NULL;
/* free req */
if (xchg(&preq->in_use, false) == false) {
- pr_warn("request info %p free already\n", preq);
+ pr_warn("request info %pK free already\n", preq);
} else {
atomic_dec(&pce->req_count);
}
@@ -1720,7 +1720,7 @@ static void _qce_ahash_complete(void *cookie, unsigned char *digest,
}
#ifdef QCRYPTO_DEBUG
- dev_info(&pengine->pdev->dev, "_qce_ahash_complete: %p ret %d\n",
+ dev_info(&pengine->pdev->dev, "_qce_ahash_complete: %pK ret %d\n",
areq, ret);
#endif
if (digest) {
@@ -1779,7 +1779,7 @@ static void _qce_ablk_cipher_complete(void *cookie, unsigned char *icb,
}
#ifdef QCRYPTO_DEBUG
- dev_info(&pengine->pdev->dev, "_qce_ablk_cipher_complete: %p ret %d\n",
+ dev_info(&pengine->pdev->dev, "_qce_ablk_cipher_complete: %pK ret %d\n",
areq, ret);
#endif
if (iv)
@@ -2479,7 +2479,7 @@ static int _qcrypto_enc_aes_ecb(struct ablkcipher_request *req)
BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
CRYPTO_ALG_TYPE_ABLKCIPHER);
#ifdef QCRYPTO_DEBUG
- dev_info(&ctx->pengine->pdev->dev, "_qcrypto_enc_aes_ecb: %p\n", req);
+ dev_info(&ctx->pengine->pdev->dev, "_qcrypto_enc_aes_ecb: %pK\n", req);
#endif
if ((ctx->enc_key_len == AES_KEYSIZE_192) &&
@@ -2509,7 +2509,7 @@ static int _qcrypto_enc_aes_cbc(struct ablkcipher_request *req)
BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
CRYPTO_ALG_TYPE_ABLKCIPHER);
#ifdef QCRYPTO_DEBUG
- dev_info(&ctx->pengine->pdev->dev, "_qcrypto_enc_aes_cbc: %p\n", req);
+ dev_info(&ctx->pengine->pdev->dev, "_qcrypto_enc_aes_cbc: %pK\n", req);
#endif
if ((ctx->enc_key_len == AES_KEYSIZE_192) &&
@@ -2539,7 +2539,7 @@ static int _qcrypto_enc_aes_ctr(struct ablkcipher_request *req)
BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
CRYPTO_ALG_TYPE_ABLKCIPHER);
#ifdef QCRYPTO_DEBUG
- dev_info(&ctx->pengine->pdev->dev, "_qcrypto_enc_aes_ctr: %p\n", req);
+ dev_info(&ctx->pengine->pdev->dev, "_qcrypto_enc_aes_ctr: %pK\n", req);
#endif
if ((ctx->enc_key_len == AES_KEYSIZE_192) &&
@@ -2727,7 +2727,7 @@ static int _qcrypto_dec_aes_ecb(struct ablkcipher_request *req)
BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
CRYPTO_ALG_TYPE_ABLKCIPHER);
#ifdef QCRYPTO_DEBUG
- dev_info(&ctx->pengine->pdev->dev, "_qcrypto_dec_aes_ecb: %p\n", req);
+ dev_info(&ctx->pengine->pdev->dev, "_qcrypto_dec_aes_ecb: %pK\n", req);
#endif
if ((ctx->enc_key_len == AES_KEYSIZE_192) &&
@@ -2757,7 +2757,7 @@ static int _qcrypto_dec_aes_cbc(struct ablkcipher_request *req)
BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
CRYPTO_ALG_TYPE_ABLKCIPHER);
#ifdef QCRYPTO_DEBUG
- dev_info(&ctx->pengine->pdev->dev, "_qcrypto_dec_aes_cbc: %p\n", req);
+ dev_info(&ctx->pengine->pdev->dev, "_qcrypto_dec_aes_cbc: %pK\n", req);
#endif
if ((ctx->enc_key_len == AES_KEYSIZE_192) &&
@@ -2787,7 +2787,7 @@ static int _qcrypto_dec_aes_ctr(struct ablkcipher_request *req)
BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
CRYPTO_ALG_TYPE_ABLKCIPHER);
#ifdef QCRYPTO_DEBUG
- dev_info(&ctx->pengine->pdev->dev, "_qcrypto_dec_aes_ctr: %p\n", req);
+ dev_info(&ctx->pengine->pdev->dev, "_qcrypto_dec_aes_ctr: %pK\n", req);
#endif
if ((ctx->enc_key_len == AES_KEYSIZE_192) &&
@@ -3353,7 +3353,7 @@ static int _qcrypto_aead_encrypt_aes_cbc(struct aead_request *req)
#ifdef QCRYPTO_DEBUG
dev_info(&ctx->pengine->pdev->dev,
- "_qcrypto_aead_encrypt_aes_cbc: %p\n", req);
+ "_qcrypto_aead_encrypt_aes_cbc: %pK\n", req);
#endif
rctx = aead_request_ctx(req);
@@ -3384,7 +3384,7 @@ static int _qcrypto_aead_decrypt_aes_cbc(struct aead_request *req)
#ifdef QCRYPTO_DEBUG
dev_info(&ctx->pengine->pdev->dev,
- "_qcrypto_aead_decrypt_aes_cbc: %p\n", req);
+ "_qcrypto_aead_decrypt_aes_cbc: %pK\n", req);
#endif
rctx = aead_request_ctx(req);
rctx->aead = 1;
diff --git a/drivers/esoc/esoc-mdm-4x.c b/drivers/esoc/esoc-mdm-4x.c
index 26f69fa61ba1..6eab3ea187c6 100644
--- a/drivers/esoc/esoc-mdm-4x.c
+++ b/drivers/esoc/esoc-mdm-4x.c
@@ -88,12 +88,10 @@ static void mdm_enable_irqs(struct mdm_ctrl *mdm)
return;
if (mdm->irq_mask & IRQ_ERRFATAL) {
enable_irq(mdm->errfatal_irq);
- irq_set_irq_wake(mdm->errfatal_irq, 1);
mdm->irq_mask &= ~IRQ_ERRFATAL;
}
if (mdm->irq_mask & IRQ_STATUS) {
enable_irq(mdm->status_irq);
- irq_set_irq_wake(mdm->status_irq, 1);
mdm->irq_mask &= ~IRQ_STATUS;
}
if (mdm->irq_mask & IRQ_PBLRDY) {
@@ -107,12 +105,10 @@ static void mdm_disable_irqs(struct mdm_ctrl *mdm)
if (!mdm)
return;
if (!(mdm->irq_mask & IRQ_ERRFATAL)) {
- irq_set_irq_wake(mdm->errfatal_irq, 0);
disable_irq_nosync(mdm->errfatal_irq);
mdm->irq_mask |= IRQ_ERRFATAL;
}
if (!(mdm->irq_mask & IRQ_STATUS)) {
- irq_set_irq_wake(mdm->status_irq, 0);
disable_irq_nosync(mdm->status_irq);
mdm->irq_mask |= IRQ_STATUS;
}
@@ -701,6 +697,7 @@ static int mdm_configure_ipc(struct mdm_ctrl *mdm, struct platform_device *pdev)
goto errfatal_err;
}
mdm->errfatal_irq = irq;
+ irq_set_irq_wake(mdm->errfatal_irq, 1);
errfatal_err:
/* status irq */
@@ -719,6 +716,7 @@ errfatal_err:
goto status_err;
}
mdm->status_irq = irq;
+ irq_set_irq_wake(mdm->status_irq, 1);
status_err:
if (gpio_is_valid(MDM_GPIO(mdm, MDM2AP_PBLRDY))) {
irq = platform_get_irq_byname(pdev, "plbrdy_irq");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 51a9942cdb40..f4cae5357e40 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -681,6 +681,10 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev)
DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n",
adev->clock.default_dispclk / 100);
adev->clock.default_dispclk = 60000;
+ } else if (adev->clock.default_dispclk <= 60000) {
+ DRM_INFO("Changing default dispclk from %dMhz to 625Mhz\n",
+ adev->clock.default_dispclk / 100);
+ adev->clock.default_dispclk = 62500;
}
adev->clock.dp_extclk =
le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 25a3e2485cc2..2bc17a907ecf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -124,6 +124,13 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
}
break;
}
+
+ if (!(*out_ring && (*out_ring)->adev)) {
+ DRM_ERROR("Ring %d is not initialized on IP %d\n",
+ ring, ip_type);
+ return -EINVAL;
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
index 49aa35016653..247b088990dc 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
@@ -164,7 +164,7 @@ void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state)
struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private;
int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating);
- ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args;
+ ENABLE_DISP_POWER_GATING_PS_ALLOCATION args;
memset(&args, 0, sizeof(args));
@@ -177,7 +177,7 @@ void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state)
void amdgpu_atombios_crtc_powergate_init(struct amdgpu_device *adev)
{
int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating);
- ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args;
+ ENABLE_DISP_POWER_GATING_PS_ALLOCATION args;
memset(&args, 0, sizeof(args));
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index b92139e9b9d8..b5c64edeb668 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -113,7 +113,11 @@ struct ast_private {
struct ttm_bo_kmap_obj cache_kmap;
int next_cursor;
bool support_wide_screen;
- bool DisableP2A;
+ enum {
+ ast_use_p2a,
+ ast_use_dt,
+ ast_use_defaults
+ } config_mode;
enum ast_tx_chip tx_chip_type;
u8 dp501_maxclk;
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 6c021165ca67..498a94069e6b 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -62,13 +62,84 @@ uint8_t ast_get_index_reg_mask(struct ast_private *ast,
return ret;
}
+static void ast_detect_config_mode(struct drm_device *dev, u32 *scu_rev)
+{
+ struct device_node *np = dev->pdev->dev.of_node;
+ struct ast_private *ast = dev->dev_private;
+ uint32_t data, jregd0, jregd1;
+
+ /* Defaults */
+ ast->config_mode = ast_use_defaults;
+ *scu_rev = 0xffffffff;
+
+ /* Check if we have device-tree properties */
+ if (np && !of_property_read_u32(np, "aspeed,scu-revision-id",
+ scu_rev)) {
+ /* We do, disable P2A access */
+ ast->config_mode = ast_use_dt;
+ DRM_INFO("Using device-tree for configuration\n");
+ return;
+ }
+
+ /* Not all families have a P2A bridge */
+ if (dev->pdev->device != PCI_CHIP_AST2000)
+ return;
+
+ /*
+ * The BMC will set SCU 0x40 D[12] to 1 if the P2 bridge
+ * is disabled. We force using P2A if VGA only mode bit
+ * is set D[7]
+ */
+ jregd0 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
+ jregd1 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff);
+ if (!(jregd0 & 0x80) || !(jregd1 & 0x10)) {
+ /* Double check it's actually working */
+ data = ast_read32(ast, 0xf004);
+ if (data != 0xFFFFFFFF) {
+ /* P2A works, grab silicon revision */
+ ast->config_mode = ast_use_p2a;
+
+ DRM_INFO("Using P2A bridge for configuration\n");
+
+ /* Read SCU7c (silicon revision register) */
+ ast_write32(ast, 0xf004, 0x1e6e0000);
+ ast_write32(ast, 0xf000, 0x1);
+ *scu_rev = ast_read32(ast, 0x1207c);
+ return;
+ }
+ }
+
+ /* We have a P2A bridge but it's disabled */
+ DRM_INFO("P2A bridge disabled, using default configuration\n");
+}
static int ast_detect_chip(struct drm_device *dev, bool *need_post)
{
struct ast_private *ast = dev->dev_private;
- uint32_t data, jreg;
+ uint32_t jreg, scu_rev;
+
+ /*
+ * If VGA isn't enabled, we need to enable now or subsequent
+ * access to the scratch registers will fail. We also inform
+ * our caller that it needs to POST the chip
+ * (Assumption: VGA not enabled -> need to POST)
+ */
+ if (!ast_is_vga_enabled(dev)) {
+ ast_enable_vga(dev);
+ DRM_INFO("VGA not enabled on entry, requesting chip POST\n");
+ *need_post = true;
+ } else
+ *need_post = false;
+
+
+ /* Enable extended register access */
+ ast_enable_mmio(dev);
ast_open_key(ast);
+ /* Find out whether P2A works or whether to use device-tree */
+ ast_detect_config_mode(dev, &scu_rev);
+
+ /* Identify chipset */
if (dev->pdev->device == PCI_CHIP_AST1180) {
ast->chip = AST1100;
DRM_INFO("AST 1180 detected\n");
@@ -80,12 +151,7 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
ast->chip = AST2300;
DRM_INFO("AST 2300 detected\n");
} else if (dev->pdev->revision >= 0x10) {
- uint32_t data;
- ast_write32(ast, 0xf004, 0x1e6e0000);
- ast_write32(ast, 0xf000, 0x1);
-
- data = ast_read32(ast, 0x1207c);
- switch (data & 0x0300) {
+ switch (scu_rev & 0x0300) {
case 0x0200:
ast->chip = AST1100;
DRM_INFO("AST 1100 detected\n");
@@ -110,26 +176,6 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
}
}
- /*
- * If VGA isn't enabled, we need to enable now or subsequent
- * access to the scratch registers will fail. We also inform
- * our caller that it needs to POST the chip
- * (Assumption: VGA not enabled -> need to POST)
- */
- if (!ast_is_vga_enabled(dev)) {
- ast_enable_vga(dev);
- ast_enable_mmio(dev);
- DRM_INFO("VGA not enabled on entry, requesting chip POST\n");
- *need_post = true;
- } else
- *need_post = false;
-
- /* Check P2A Access */
- ast->DisableP2A = true;
- data = ast_read32(ast, 0xf004);
- if (data != 0xFFFFFFFF)
- ast->DisableP2A = false;
-
/* Check if we support wide screen */
switch (ast->chip) {
case AST1180:
@@ -146,17 +192,12 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
ast->support_wide_screen = true;
else {
ast->support_wide_screen = false;
- if (ast->DisableP2A == false) {
- /* Read SCU7c (silicon revision register) */
- ast_write32(ast, 0xf004, 0x1e6e0000);
- ast_write32(ast, 0xf000, 0x1);
- data = ast_read32(ast, 0x1207c);
- data &= 0x300;
- if (ast->chip == AST2300 && data == 0x0) /* ast1300 */
- ast->support_wide_screen = true;
- if (ast->chip == AST2400 && data == 0x100) /* ast1400 */
- ast->support_wide_screen = true;
- }
+ if (ast->chip == AST2300 &&
+ (scu_rev & 0x300) == 0x0) /* ast1300 */
+ ast->support_wide_screen = true;
+ if (ast->chip == AST2400 &&
+ (scu_rev & 0x300) == 0x100) /* ast1400 */
+ ast->support_wide_screen = true;
}
break;
}
@@ -220,85 +261,102 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
static int ast_get_dram_info(struct drm_device *dev)
{
+ struct device_node *np = dev->pdev->dev.of_node;
struct ast_private *ast = dev->dev_private;
- uint32_t data, data2;
- uint32_t denum, num, div, ref_pll;
+ uint32_t mcr_cfg, mcr_scu_mpll, mcr_scu_strap;
+ uint32_t denum, num, div, ref_pll, dsel;
- if (ast->DisableP2A)
- {
+ switch (ast->config_mode) {
+ case ast_use_dt:
+ /*
+ * If some properties are missing, use reasonable
+ * defaults for AST2400
+ */
+ if (of_property_read_u32(np, "aspeed,mcr-configuration",
+ &mcr_cfg))
+ mcr_cfg = 0x00000577;
+ if (of_property_read_u32(np, "aspeed,mcr-scu-mpll",
+ &mcr_scu_mpll))
+ mcr_scu_mpll = 0x000050C0;
+ if (of_property_read_u32(np, "aspeed,mcr-scu-strap",
+ &mcr_scu_strap))
+ mcr_scu_strap = 0;
+ break;
+ case ast_use_p2a:
+ ast_write32(ast, 0xf004, 0x1e6e0000);
+ ast_write32(ast, 0xf000, 0x1);
+ mcr_cfg = ast_read32(ast, 0x10004);
+ mcr_scu_mpll = ast_read32(ast, 0x10120);
+ mcr_scu_strap = ast_read32(ast, 0x10170);
+ break;
+ case ast_use_defaults:
+ default:
ast->dram_bus_width = 16;
ast->dram_type = AST_DRAM_1Gx16;
ast->mclk = 396;
+ return 0;
}
- else
- {
- ast_write32(ast, 0xf004, 0x1e6e0000);
- ast_write32(ast, 0xf000, 0x1);
- data = ast_read32(ast, 0x10004);
-
- if (data & 0x40)
- ast->dram_bus_width = 16;
- else
- ast->dram_bus_width = 32;
- if (ast->chip == AST2300 || ast->chip == AST2400) {
- switch (data & 0x03) {
- case 0:
- ast->dram_type = AST_DRAM_512Mx16;
- break;
- default:
- case 1:
- ast->dram_type = AST_DRAM_1Gx16;
- break;
- case 2:
- ast->dram_type = AST_DRAM_2Gx16;
- break;
- case 3:
- ast->dram_type = AST_DRAM_4Gx16;
- break;
- }
- } else {
- switch (data & 0x0c) {
- case 0:
- case 4:
- ast->dram_type = AST_DRAM_512Mx16;
- break;
- case 8:
- if (data & 0x40)
- ast->dram_type = AST_DRAM_1Gx16;
- else
- ast->dram_type = AST_DRAM_512Mx32;
- break;
- case 0xc:
- ast->dram_type = AST_DRAM_1Gx32;
- break;
- }
- }
+ if (mcr_cfg & 0x40)
+ ast->dram_bus_width = 16;
+ else
+ ast->dram_bus_width = 32;
- data = ast_read32(ast, 0x10120);
- data2 = ast_read32(ast, 0x10170);
- if (data2 & 0x2000)
- ref_pll = 14318;
- else
- ref_pll = 12000;
-
- denum = data & 0x1f;
- num = (data & 0x3fe0) >> 5;
- data = (data & 0xc000) >> 14;
- switch (data) {
- case 3:
- div = 0x4;
+ if (ast->chip == AST2300 || ast->chip == AST2400) {
+ switch (mcr_cfg & 0x03) {
+ case 0:
+ ast->dram_type = AST_DRAM_512Mx16;
break;
- case 2:
+ default:
case 1:
- div = 0x2;
+ ast->dram_type = AST_DRAM_1Gx16;
break;
- default:
- div = 0x1;
+ case 2:
+ ast->dram_type = AST_DRAM_2Gx16;
+ break;
+ case 3:
+ ast->dram_type = AST_DRAM_4Gx16;
+ break;
+ }
+ } else {
+ switch (mcr_cfg & 0x0c) {
+ case 0:
+ case 4:
+ ast->dram_type = AST_DRAM_512Mx16;
+ break;
+ case 8:
+ if (mcr_cfg & 0x40)
+ ast->dram_type = AST_DRAM_1Gx16;
+ else
+ ast->dram_type = AST_DRAM_512Mx32;
+ break;
+ case 0xc:
+ ast->dram_type = AST_DRAM_1Gx32;
break;
}
- ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000);
}
+
+ if (mcr_scu_strap & 0x2000)
+ ref_pll = 14318;
+ else
+ ref_pll = 12000;
+
+ denum = mcr_scu_mpll & 0x1f;
+ num = (mcr_scu_mpll & 0x3fe0) >> 5;
+ dsel = (mcr_scu_mpll & 0xc000) >> 14;
+ switch (dsel) {
+ case 3:
+ div = 0x4;
+ break;
+ case 2:
+ case 1:
+ div = 0x2;
+ break;
+ default:
+ div = 0x1;
+ break;
+ }
+ ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000);
return 0;
}
diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
index 270e8fb2803f..c7c58becb25d 100644
--- a/drivers/gpu/drm/ast/ast_post.c
+++ b/drivers/gpu/drm/ast/ast_post.c
@@ -375,17 +375,14 @@ void ast_post_gpu(struct drm_device *dev)
ast_enable_mmio(dev);
ast_set_def_ext_reg(dev);
- if (ast->DisableP2A == false)
- {
+ if (ast->config_mode == ast_use_p2a) {
if (ast->chip == AST2300 || ast->chip == AST2400)
ast_init_dram_2300(dev);
else
ast_init_dram_reg(dev);
ast_init_3rdtx(dev);
- }
- else
- {
+ } else {
if (ast->tx_chip_type != AST_TX_NONE)
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x80); /* Enable DVO */
}
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 84125b3d1f95..4c082fff2fc5 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -48,6 +48,7 @@ msm_drm-y := \
sde/sde_backlight.o \
sde/sde_color_processing.o \
sde/sde_vbif.o \
+ sde/sde_splash.o \
sde_dbg_evtlog.o \
sde_io_util.o \
dba_bridge.o \
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index e24827590b7c..c085e173232b 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -409,8 +409,8 @@ static void a3xx_show(struct msm_gpu *gpu, struct seq_file *m)
gpu->funcs->pm_resume(gpu);
seq_printf(m, "status: %08x\n",
gpu_read(gpu, REG_A3XX_RBBM_STATUS));
- gpu->funcs->pm_suspend(gpu);
adreno_show(gpu, m);
+ gpu->funcs->pm_suspend(gpu);
}
#endif
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index b612c9a18faf..624c2a87d593 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -447,9 +447,9 @@ static void a4xx_show(struct msm_gpu *gpu, struct seq_file *m)
seq_printf(m, "status: %08x\n",
gpu_read(gpu, REG_A4XX_RBBM_STATUS));
- gpu->funcs->pm_suspend(gpu);
adreno_show(gpu, m);
+ gpu->funcs->pm_suspend(gpu);
}
#endif
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 37323e962c2c..45a38b247727 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -375,6 +375,7 @@ static const struct {
void a5xx_set_hwcg(struct msm_gpu *gpu, bool state)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
unsigned int i;
for (i = 0; i < ARRAY_SIZE(a5xx_hwcg); i++)
@@ -391,6 +392,11 @@ void a5xx_set_hwcg(struct msm_gpu *gpu, bool state)
gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, state ? 0xAAA8AA00 : 0);
gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, state ? 0x182 : 0x180);
+
+ if (state)
+ set_bit(A5XX_HWCG_ENABLED, &a5xx_gpu->flags);
+ else
+ clear_bit(A5XX_HWCG_ENABLED, &a5xx_gpu->flags);
}
static int a5xx_me_init(struct msm_gpu *gpu)
@@ -1168,6 +1174,10 @@ static int a5xx_pm_resume(struct msm_gpu *gpu)
if (ret)
return ret;
+ /* If we are already up, don't mess with what works */
+ if (gpu->active_cnt > 1)
+ return 0;
+
/* Turn the RBCCU domain first to limit the chances of voltage droop */
gpu_write(gpu, REG_A5XX_GPMU_RBCCU_POWER_CNTL, 0x778000);
@@ -1198,22 +1208,27 @@ static int a5xx_pm_suspend(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- /* Clear the VBIF pipe before shutting down */
+ /* Only do this next bit if we are about to go down */
+ if (gpu->active_cnt == 1) {
+ /* Clear the VBIF pipe before shutting down */
- gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0xF);
- spin_until((gpu_read(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL1) & 0xF) == 0xF);
+ gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0xF);
+ spin_until((gpu_read(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL1) & 0xF)
+ == 0xF);
- gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0);
+ gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0);
- /*
- * Reset the VBIF before power collapse to avoid issue with FIFO
- * entries
- */
-
- if (adreno_is_a530(adreno_gpu)) {
- /* These only need to be done for A530 */
- gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x003C0000);
- gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x00000000);
+ /*
+ * Reset the VBIF before power collapse to avoid issue with FIFO
+ * entries
+ */
+ if (adreno_is_a530(adreno_gpu)) {
+ /* These only need to be done for A530 */
+ gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD,
+ 0x003C0000);
+ gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD,
+ 0x00000000);
+ }
}
return msm_gpu_pm_suspend(gpu);
@@ -1233,13 +1248,29 @@ static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
#ifdef CONFIG_DEBUG_FS
static void a5xx_show(struct msm_gpu *gpu, struct seq_file *m)
{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ bool enabled = test_bit(A5XX_HWCG_ENABLED, &a5xx_gpu->flags);
+
gpu->funcs->pm_resume(gpu);
seq_printf(m, "status: %08x\n",
gpu_read(gpu, REG_A5XX_RBBM_STATUS));
- gpu->funcs->pm_suspend(gpu);
+
+ /*
+ * Temporarily disable hardware clock gating before going into
+ * adreno_show to avoid issues while reading the registers
+ */
+
+ if (enabled)
+ a5xx_set_hwcg(gpu, false);
adreno_show(gpu, m);
+
+ if (enabled)
+ a5xx_set_hwcg(gpu, true);
+
+ gpu->funcs->pm_suspend(gpu);
}
#endif
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
index f8b00982fe86..e637237fa811 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
@@ -23,6 +23,7 @@
enum {
A5XX_ZAP_SHADER_LOADED = 1,
+ A5XX_HWCG_ENABLED = 2,
};
struct a5xx_gpu {
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 2273b06b59a6..04e0056f2a49 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -297,8 +297,6 @@ void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
seq_printf(m, "rb wptr: %d\n", get_wptr(ring));
}
- gpu->funcs->pm_resume(gpu);
-
/* dump these out in a form that can be parsed by demsm: */
seq_printf(m, "IO:region %s 00000000 00020000\n", gpu->name);
for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
@@ -311,8 +309,6 @@ void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
seq_printf(m, "IO:R %08x %08x\n", addr<<2, val);
}
}
-
- gpu->funcs->pm_suspend(gpu);
}
#endif
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c
index 2ee75a2c1039..f1c44b30575f 100644
--- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c
@@ -556,13 +556,13 @@ static const struct file_operations sde_hdmi_hdcp_state_fops = {
.read = _sde_hdmi_hdcp_state_read,
};
-static u64 _sde_hdmi_clip_valid_pclk(struct drm_display_mode *mode, u64 pclk_in)
+static u64 _sde_hdmi_clip_valid_pclk(struct hdmi *hdmi, u64 pclk_in)
{
u32 pclk_delta, pclk;
u64 pclk_clip = pclk_in;
/* as per standard, 0.5% of deviation is allowed */
- pclk = mode->clock * HDMI_KHZ_TO_HZ;
+ pclk = hdmi->pixclock;
pclk_delta = pclk * 5 / 1000;
if (pclk_in < (pclk - pclk_delta))
@@ -700,7 +700,6 @@ static void sde_hdmi_tx_hdcp_cb_work(struct work_struct *work)
static int _sde_hdmi_update_pll_delta(struct sde_hdmi *display, s32 ppm)
{
struct hdmi *hdmi = display->ctrl.ctrl;
- struct drm_display_mode *current_mode = &display->mode;
u64 cur_pclk, dst_pclk;
u64 clip_pclk;
int rc = 0;
@@ -725,7 +724,7 @@ static int _sde_hdmi_update_pll_delta(struct sde_hdmi *display, s32 ppm)
dst_pclk = cur_pclk * (1000000000 + ppm);
do_div(dst_pclk, 1000000000);
- clip_pclk = _sde_hdmi_clip_valid_pclk(current_mode, dst_pclk);
+ clip_pclk = _sde_hdmi_clip_valid_pclk(hdmi, dst_pclk);
/* update pclk */
if (clip_pclk != cur_pclk) {
@@ -1254,6 +1253,13 @@ static int _sde_hdmi_hpd_enable(struct sde_hdmi *sde_hdmi)
uint32_t hpd_ctrl;
int i, ret;
unsigned long flags;
+ struct drm_connector *connector;
+ struct msm_drm_private *priv;
+ struct sde_kms *sde_kms;
+
+ connector = hdmi->connector;
+ priv = connector->dev->dev_private;
+ sde_kms = to_sde_kms(priv->kms);
for (i = 0; i < config->hpd_reg_cnt; i++) {
ret = regulator_enable(hdmi->hpd_regs[i]);
@@ -1293,9 +1299,11 @@ static int _sde_hdmi_hpd_enable(struct sde_hdmi *sde_hdmi)
}
}
- sde_hdmi_set_mode(hdmi, false);
- _sde_hdmi_phy_reset(hdmi);
- sde_hdmi_set_mode(hdmi, true);
+ if (!sde_kms->splash_info.handoff) {
+ sde_hdmi_set_mode(hdmi, false);
+ _sde_hdmi_phy_reset(hdmi);
+ sde_hdmi_set_mode(hdmi, true);
+ }
hdmi_write(hdmi, REG_HDMI_USEC_REFTIMER, 0x0001001b);
@@ -1893,11 +1901,6 @@ struct drm_msm_ext_panel_hdr_metadata *hdr_meta)
return;
}
- if (!connector->hdr_supported) {
- SDE_ERROR("Sink does not support HDR\n");
- return;
- }
-
/* Setup Packet header and payload */
packet_header = type_code | (version << 8) | (length << 16);
hdmi_write(hdmi, HDMI_GENERIC0_HDR, packet_header);
@@ -1967,6 +1970,30 @@ enable_packet_control:
hdmi_write(hdmi, HDMI_GEN_PKT_CTRL, packet_control);
}
+static void sde_hdmi_clear_hdr_infoframe(struct sde_hdmi *display)
+{
+ struct hdmi *hdmi;
+ struct drm_connector *connector;
+ u32 packet_control = 0;
+
+ if (!display) {
+ SDE_ERROR("invalid input\n");
+ return;
+ }
+
+ hdmi = display->ctrl.ctrl;
+ connector = display->ctrl.ctrl->connector;
+
+ if (!hdmi || !connector) {
+ SDE_ERROR("invalid input\n");
+ return;
+ }
+
+ packet_control = hdmi_read(hdmi, HDMI_GEN_PKT_CTRL);
+ packet_control &= ~HDMI_GEN_PKT_CTRL_CLR_MASK;
+ hdmi_write(hdmi, HDMI_GEN_PKT_CTRL, packet_control);
+}
+
int sde_hdmi_set_property(struct drm_connector *connector,
struct drm_connector_state *state,
int property_index,
@@ -2131,9 +2158,13 @@ static int sde_hdmi_tx_check_capability(struct sde_hdmi *sde_hdmi)
}
}
- SDE_DEBUG("%s: Features <HDMI:%s, HDCP:%s>\n", __func__,
+ if (sde_hdmi->hdmi_tx_major_version >= HDMI_TX_VERSION_4)
+ sde_hdmi->dc_feature_supported = true;
+
+ SDE_DEBUG("%s: Features <HDMI:%s, HDCP:%s, Deep Color:%s>\n", __func__,
hdmi_disabled ? "OFF" : "ON",
- hdcp_disabled ? "OFF" : "ON");
+ hdcp_disabled ? "OFF" : "ON",
+ sde_hdmi->dc_feature_supported ? "ON" : "OFF");
if (hdmi_disabled) {
DEV_ERR("%s: HDMI disabled\n", __func__);
@@ -2307,9 +2338,28 @@ int sde_hdmi_pre_kickoff(struct drm_connector *connector,
void *display,
struct msm_display_kickoff_params *params)
{
+ struct sde_hdmi *hdmi_display = (struct sde_hdmi *)display;
+ struct drm_msm_ext_panel_hdr_ctrl *hdr_ctrl;
+ u8 hdr_op;
- sde_hdmi_panel_set_hdr_infoframe(display,
- params->hdr_metadata);
+ if (!connector || !display || !params) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ hdr_ctrl = params->hdr_ctrl;
+
+ hdr_op = sde_hdmi_hdr_get_ops(hdmi_display->curr_hdr_state,
+ hdr_ctrl->hdr_state);
+
+ if (hdr_op == HDR_SEND_INFO) {
+ if (connector->hdr_supported)
+ sde_hdmi_panel_set_hdr_infoframe(display,
+ &hdr_ctrl->hdr_meta);
+ } else if (hdr_op == HDR_CLEAR_INFO)
+ sde_hdmi_clear_hdr_infoframe(display);
+
+ hdmi_display->curr_hdr_state = hdr_ctrl->hdr_state;
return 0;
}
@@ -2822,6 +2872,7 @@ int sde_hdmi_drm_init(struct sde_hdmi *display, struct drm_encoder *enc)
struct msm_drm_private *priv = NULL;
struct hdmi *hdmi;
struct platform_device *pdev;
+ struct sde_kms *sde_kms;
DBG("");
if (!display || !display->drm_dev || !enc) {
@@ -2880,6 +2931,19 @@ int sde_hdmi_drm_init(struct sde_hdmi *display, struct drm_encoder *enc)
enc->bridge = hdmi->bridge;
priv->bridges[priv->num_bridges++] = hdmi->bridge;
+ /*
+ * After initialising HDMI bridge, we need to check
+ * whether the early display is enabled for HDMI.
+ * If yes, we need to increase refcount of hdmi power
+ * clocks. This can skip the clock disabling operation in
+ * clock_late_init when finding clk.count == 1.
+ */
+ sde_kms = to_sde_kms(priv->kms);
+ if (sde_kms->splash_info.handoff) {
+ sde_hdmi_bridge_power_on(hdmi->bridge);
+ hdmi->power_on = true;
+ }
+
mutex_unlock(&display->display_lock);
return 0;
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h
index 6b9bcfec031b..f2dd5351913b 100644
--- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h
@@ -33,6 +33,9 @@
#include "sde_hdmi_util.h"
#include "sde_hdcp.h"
+#ifndef MIN
+#define MIN(x, y) (((x) < (y)) ? (x) : (y))
+#endif
#ifdef HDMI_DEBUG_ENABLE
#define SDE_HDMI_DEBUG(fmt, args...) SDE_ERROR(fmt, ##args)
#else
@@ -110,6 +113,8 @@ enum hdmi_tx_feature_type {
* @client_notify_pending: If there is client notification pending.
* @irq_domain: IRQ domain structure.
* @pll_update_enable: if it's allowed to update HDMI PLL ppm.
+ * @dc_enable: If deep color is enabled. Only DC_30 so far.
+ * @dc_feature_supported: If deep color feature is supported.
* @notifier: CEC notifider to convey physical address information.
* @root: Debug fs root entry.
*/
@@ -142,6 +147,7 @@ struct sde_hdmi {
u32 hdcp22_present;
u8 hdcp_status;
u32 enc_lvl;
+ u8 curr_hdr_state;
bool auth_state;
bool sink_hdcp22_support;
bool src_hdcp22_support;
@@ -161,6 +167,8 @@ struct sde_hdmi {
struct irq_domain *irq_domain;
struct cec_notifier *notifier;
bool pll_update_enable;
+ bool dc_enable;
+ bool dc_feature_supported;
struct delayed_work hdcp_cb_work;
struct dss_io_data io[HDMI_TX_MAX_IO];
@@ -188,6 +196,10 @@ enum hdmi_tx_scdc_access_type {
#define HDMI_KHZ_TO_HZ 1000
#define HDMI_MHZ_TO_HZ 1000000
+#define HDMI_YUV420_24BPP_PCLK_TMDS_CH_RATE_RATIO 2
+#define HDMI_RGB_24BPP_PCLK_TMDS_CH_RATE_RATIO 1
+
+#define HDMI_GEN_PKT_CTRL_CLR_MASK 0x7
/* Maximum pixel clock rates for hdmi tx */
#define HDMI_DEFAULT_MAX_PCLK_RATE 148500
@@ -345,6 +357,13 @@ int sde_hdmi_set_property(struct drm_connector *connector,
int property_index,
uint64_t value,
void *display);
+/**
+ * sde_hdmi_bridge_power_on -- A wrapper of _sde_hdmi_bridge_power_on.
+ * @bridge: Handle to the drm bridge.
+ *
+ * Return: void.
+ */
+void sde_hdmi_bridge_power_on(struct drm_bridge *bridge);
/**
* sde_hdmi_get_property() - get the connector properties
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c
index 24d2320683e4..e4eb531c12aa 100644
--- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c
@@ -345,7 +345,9 @@ static int _sde_hdmi_bridge_setup_scrambler(struct hdmi *hdmi,
return 0;
}
- if (mode->clock > HDMI_TX_SCRAMBLER_THRESHOLD_RATE_KHZ) {
+ /* use actual clock instead of mode clock */
+ if (hdmi->pixclock >
+ HDMI_TX_SCRAMBLER_THRESHOLD_RATE_KHZ * HDMI_KHZ_TO_HZ) {
scrambler_on = true;
tmds_clock_ratio = 1;
} else {
@@ -402,6 +404,38 @@ static int _sde_hdmi_bridge_setup_scrambler(struct hdmi *hdmi,
return rc;
}
+static void _sde_hdmi_bridge_setup_deep_color(struct hdmi *hdmi)
+{
+ struct drm_connector *connector = hdmi->connector;
+ struct sde_connector *c_conn = to_sde_connector(connector);
+ struct sde_hdmi *display = (struct sde_hdmi *)c_conn->display;
+ u32 hdmi_ctrl_reg, vbi_pkt_reg;
+
+ SDE_DEBUG("Deep Color: %s\n", display->dc_enable ? "On" : "Off");
+
+ if (display->dc_enable) {
+ hdmi_ctrl_reg = hdmi_read(hdmi, REG_HDMI_CTRL);
+
+ /* GC CD override */
+ hdmi_ctrl_reg |= BIT(27);
+
+ /* enable deep color for RGB888/YUV444/YUV420 30 bits */
+ hdmi_ctrl_reg |= BIT(24);
+ hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl_reg);
+ /* Enable GC_CONT and GC_SEND in General Control Packet
+ * (GCP) register so that deep color data is
+ * transmitted to the sink on every frame, allowing
+ * the sink to decode the data correctly.
+ *
+ * GC_CONT: 0x1 - Send GCP on every frame
+ * GC_SEND: 0x1 - Enable GCP Transmission
+ */
+ vbi_pkt_reg = hdmi_read(hdmi, REG_HDMI_VBI_PKT_CTRL);
+ vbi_pkt_reg |= BIT(5) | BIT(4);
+ hdmi_write(hdmi, REG_HDMI_VBI_PKT_CTRL, vbi_pkt_reg);
+ }
+}
+
static void _sde_hdmi_bridge_pre_enable(struct drm_bridge *bridge)
{
struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
@@ -484,12 +518,17 @@ static void _sde_hdmi_bridge_enable(struct drm_bridge *bridge)
{
struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
+ struct sde_connector *c_conn = to_sde_connector(hdmi->connector);
+ struct sde_hdmi *display = (struct sde_hdmi *)c_conn->display;
/* need to update hdcp info here to ensure right HDCP support*/
sde_hdmi_update_hdcp_info(hdmi->connector);
/* start HDCP authentication */
sde_hdmi_start_hdcp(hdmi->connector);
+
+ /* reset HDR state */
+ display->curr_hdr_state = HDR_DISABLE;
}
static void _sde_hdmi_bridge_disable(struct drm_bridge *bridge)
@@ -543,6 +582,10 @@ static void _sde_hdmi_bridge_set_avi_infoframe(struct hdmi *hdmi,
struct hdmi_avi_infoframe info;
drm_hdmi_avi_infoframe_from_display_mode(&info, mode);
+
+ if (mode->private_flags & MSM_MODE_FLAG_COLOR_FORMAT_YCBCR420)
+ info.colorspace = HDMI_COLORSPACE_YUV420;
+
hdmi_avi_infoframe_pack(&info, avi_iframe, sizeof(avi_iframe));
checksum = avi_iframe[HDMI_INFOFRAME_HEADER_SIZE - 1];
@@ -660,31 +703,77 @@ static inline void _sde_hdmi_save_mode(struct hdmi *hdmi,
drm_mode_copy(&display->mode, mode);
}
+static u32 _sde_hdmi_choose_best_format(struct hdmi *hdmi,
+ struct drm_display_mode *mode)
+{
+ /*
+ * choose priority:
+ * 1. DC + RGB
+ * 2. DC + YUV
+ * 3. RGB
+ * 4. YUV
+ */
+ int dc_format;
+ struct drm_connector *connector = hdmi->connector;
+
+ dc_format = sde_hdmi_sink_dc_support(connector, mode);
+ if (dc_format & MSM_MODE_FLAG_RGB444_DC_ENABLE)
+ return (MSM_MODE_FLAG_COLOR_FORMAT_RGB444
+ | MSM_MODE_FLAG_RGB444_DC_ENABLE);
+ else if (dc_format & MSM_MODE_FLAG_YUV420_DC_ENABLE)
+ return (MSM_MODE_FLAG_COLOR_FORMAT_YCBCR420
+ | MSM_MODE_FLAG_YUV420_DC_ENABLE);
+ else if (mode->flags & DRM_MODE_FLAG_SUPPORTS_RGB)
+ return MSM_MODE_FLAG_COLOR_FORMAT_RGB444;
+ else if (mode->flags & DRM_MODE_FLAG_SUPPORTS_YUV)
+ return MSM_MODE_FLAG_COLOR_FORMAT_YCBCR420;
+
+ SDE_ERROR("Can't get available best display format\n");
+
+ return MSM_MODE_FLAG_COLOR_FORMAT_RGB444;
+}
+
static void _sde_hdmi_bridge_mode_set(struct drm_bridge *bridge,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
+ struct drm_connector *connector = hdmi->connector;
+ struct sde_connector *c_conn = to_sde_connector(connector);
+ struct sde_hdmi *display = (struct sde_hdmi *)c_conn->display;
int hstart, hend, vstart, vend;
uint32_t frame_ctrl;
+ u32 div = 0;
mode = adjusted_mode;
- hdmi->pixclock = mode->clock * 1000;
+ display->dc_enable = mode->private_flags &
+ (MSM_MODE_FLAG_RGB444_DC_ENABLE |
+ MSM_MODE_FLAG_YUV420_DC_ENABLE);
+ /* compute pixclock as per color format and bit depth */
+ hdmi->pixclock = sde_hdmi_calc_pixclk(
+ mode->clock * HDMI_KHZ_TO_HZ,
+ mode->private_flags,
+ display->dc_enable);
+ SDE_DEBUG("Actual PCLK: %lu, Mode PCLK: %d\n",
+ hdmi->pixclock, mode->clock);
- hstart = mode->htotal - mode->hsync_start;
- hend = mode->htotal - mode->hsync_start + mode->hdisplay;
+ if (mode->private_flags & MSM_MODE_FLAG_COLOR_FORMAT_YCBCR420)
+ div = 1;
+
+ hstart = (mode->htotal - mode->hsync_start) >> div;
+ hend = (mode->htotal - mode->hsync_start + mode->hdisplay) >> div;
vstart = mode->vtotal - mode->vsync_start - 1;
vend = mode->vtotal - mode->vsync_start + mode->vdisplay - 1;
- DRM_DEBUG(
+ SDE_DEBUG(
"htotal=%d, vtotal=%d, hstart=%d, hend=%d, vstart=%d, vend=%d",
mode->htotal, mode->vtotal, hstart, hend, vstart, vend);
hdmi_write(hdmi, REG_HDMI_TOTAL,
- SDE_HDMI_TOTAL_H_TOTAL(mode->htotal - 1) |
+ SDE_HDMI_TOTAL_H_TOTAL((mode->htotal >> div) - 1) |
SDE_HDMI_TOTAL_V_TOTAL(mode->vtotal - 1));
hdmi_write(hdmi, REG_HDMI_ACTIVE_HSYNC,
@@ -734,6 +823,27 @@ static void _sde_hdmi_bridge_mode_set(struct drm_bridge *bridge,
_sde_hdmi_save_mode(hdmi, mode);
_sde_hdmi_bridge_setup_scrambler(hdmi, mode);
+ _sde_hdmi_bridge_setup_deep_color(hdmi);
+}
+
+static bool _sde_hdmi_bridge_mode_fixup(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
+
+ adjusted_mode->private_flags |=
+ _sde_hdmi_choose_best_format(hdmi, adjusted_mode);
+ SDE_DEBUG("Adjusted mode private flags: 0x%x\n",
+ adjusted_mode->private_flags);
+
+ return true;
+}
+
+void sde_hdmi_bridge_power_on(struct drm_bridge *bridge)
+{
+ _sde_hdmi_bridge_power_on(bridge);
}
static const struct drm_bridge_funcs _sde_hdmi_bridge_funcs = {
@@ -742,6 +852,7 @@ static const struct drm_bridge_funcs _sde_hdmi_bridge_funcs = {
.disable = _sde_hdmi_bridge_disable,
.post_disable = _sde_hdmi_bridge_post_disable,
.mode_set = _sde_hdmi_bridge_mode_set,
+ .mode_fixup = _sde_hdmi_bridge_mode_fixup,
};
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.c
index a7887d2c84b0..a291a1112aeb 100644
--- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.c
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.c
@@ -68,6 +68,15 @@ static void sde_hdmi_hdcp2p2_ddc_clear_status(struct sde_hdmi *display)
hdmi_write(hdmi, HDMI_HDCP2P2_DDC_STATUS, reg_val);
}
+static const char *sde_hdmi_hdr_sname(enum sde_hdmi_hdr_state hdr_state)
+{
+ switch (hdr_state) {
+ case HDR_DISABLE: return "HDR_DISABLE";
+ case HDR_ENABLE: return "HDR_ENABLE";
+ default: return "HDR_INVALID_STATE";
+ }
+}
+
/**
* sde_hdmi_dump_regs - utility to dump HDMI regs
* @hdmi_display: Pointer to private display handle
@@ -825,3 +834,123 @@ int sde_hdmi_hdcp2p2_read_rxstatus(void *hdmi_display)
}
return rc;
}
+
+unsigned long sde_hdmi_calc_pixclk(unsigned long pixel_freq,
+ u32 out_format, bool dc_enable)
+{
+ u32 rate_ratio = HDMI_RGB_24BPP_PCLK_TMDS_CH_RATE_RATIO;
+
+ if (out_format & MSM_MODE_FLAG_COLOR_FORMAT_YCBCR420)
+ rate_ratio = HDMI_YUV420_24BPP_PCLK_TMDS_CH_RATE_RATIO;
+
+ pixel_freq /= rate_ratio;
+
+ if (dc_enable)
+ pixel_freq += pixel_freq >> 2;
+
+ return pixel_freq;
+
+}
+
+bool sde_hdmi_validate_pixclk(struct drm_connector *connector,
+ unsigned long pclk)
+{
+ struct sde_connector *c_conn = to_sde_connector(connector);
+ struct sde_hdmi *display = (struct sde_hdmi *)c_conn->display;
+ unsigned long max_pclk = display->max_pclk_khz * HDMI_KHZ_TO_HZ;
+
+ if (connector->max_tmds_char)
+ max_pclk = MIN(max_pclk,
+ connector->max_tmds_char * HDMI_MHZ_TO_HZ);
+ else if (connector->max_tmds_clock)
+ max_pclk = MIN(max_pclk,
+ connector->max_tmds_clock * HDMI_MHZ_TO_HZ);
+
+ SDE_DEBUG("MAX PCLK = %ld, PCLK = %ld\n", max_pclk, pclk);
+
+ return pclk < max_pclk;
+}
+
+static bool sde_hdmi_check_dc_clock(struct drm_connector *connector,
+ struct drm_display_mode *mode, u32 format)
+{
+ struct sde_connector *c_conn = to_sde_connector(connector);
+ struct sde_hdmi *display = (struct sde_hdmi *)c_conn->display;
+
+ u32 tmds_clk_with_dc = sde_hdmi_calc_pixclk(
+ mode->clock * HDMI_KHZ_TO_HZ,
+ format,
+ true);
+
+ return (display->dc_feature_supported &&
+ sde_hdmi_validate_pixclk(connector, tmds_clk_with_dc));
+}
+
+int sde_hdmi_sink_dc_support(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ int dc_format = 0;
+
+ if ((mode->flags & DRM_MODE_FLAG_SUPPORTS_YUV) &&
+ (connector->display_info.edid_hdmi_dc_modes
+ & DRM_EDID_YCBCR420_DC_30))
+ if (sde_hdmi_check_dc_clock(connector, mode,
+ MSM_MODE_FLAG_COLOR_FORMAT_YCBCR420))
+ dc_format |= MSM_MODE_FLAG_YUV420_DC_ENABLE;
+
+ if ((mode->flags & DRM_MODE_FLAG_SUPPORTS_RGB) &&
+ (connector->display_info.edid_hdmi_dc_modes
+ & DRM_EDID_HDMI_DC_30))
+ if (sde_hdmi_check_dc_clock(connector, mode,
+ MSM_MODE_FLAG_COLOR_FORMAT_RGB444))
+ dc_format |= MSM_MODE_FLAG_RGB444_DC_ENABLE;
+
+ return dc_format;
+}
+
+u8 sde_hdmi_hdr_get_ops(u8 curr_state,
+ u8 new_state)
+{
+
+ /** There could be 3 valid state transitions:
+ * 1. HDR_DISABLE -> HDR_ENABLE
+ *
+ * In this transition, we shall start sending
+ * HDR metadata with metadata from the HDR clip
+ *
+ * 2. HDR_ENABLE -> HDR_ENABLE
+ *
+ * In this transition, we will keep sending
+ * HDR metadata but with EOTF and metadata as 0
+ *
+ * 3. HDR_ENABLE -> HDR_DISABLE
+ *
+ * In this transition, we will stop sending
+ * metadata to the sink and clear PKT_CTRL register
+ * bits.
+ */
+
+ if ((curr_state == HDR_DISABLE)
+ && (new_state == HDR_ENABLE)) {
+ HDMI_UTIL_DEBUG("State changed %s ---> %s\n",
+ sde_hdmi_hdr_sname(curr_state),
+ sde_hdmi_hdr_sname(new_state));
+ return HDR_SEND_INFO;
+ } else if ((curr_state == HDR_ENABLE)
+ && (new_state == HDR_ENABLE)) {
+ HDMI_UTIL_DEBUG("State changed %s ---> %s\n",
+ sde_hdmi_hdr_sname(curr_state),
+ sde_hdmi_hdr_sname(new_state));
+ return HDR_SEND_INFO;
+ } else if ((curr_state == HDR_ENABLE)
+ && (new_state == HDR_DISABLE)) {
+ HDMI_UTIL_DEBUG("State changed %s ---> %s\n",
+ sde_hdmi_hdr_sname(curr_state),
+ sde_hdmi_hdr_sname(new_state));
+ return HDR_CLEAR_INFO;
+ }
+
+ HDMI_UTIL_DEBUG("Unsupported OR no state change\n");
+ return HDR_UNSUPPORTED_OP;
+}
+
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.h b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.h
index 8b69dd31f637..1d89ae222a7b 100644
--- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.h
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.h
@@ -125,6 +125,17 @@ enum sde_hdmi_tx_hdcp2p2_rxstatus_intr_mask {
RXSTATUS_REAUTH_REQ = BIT(14),
};
+enum sde_hdmi_hdr_state {
+ HDR_DISABLE,
+ HDR_ENABLE
+};
+
+enum sde_hdmi_hdr_op {
+ HDR_UNSUPPORTED_OP,
+ HDR_SEND_INFO,
+ HDR_CLEAR_INFO
+};
+
struct sde_hdmi_tx_hdcp2p2_ddc_data {
enum sde_hdmi_tx_hdcp2p2_rxstatus_intr_mask intr_mask;
u32 timeout_ms;
@@ -164,4 +175,12 @@ int sde_hdmi_hdcp2p2_read_rxstatus(void *hdmi_display);
void sde_hdmi_ddc_config(void *hdmi_display);
int sde_hdmi_ddc_hdcp2p2_isr(void *hdmi_display);
void sde_hdmi_dump_regs(void *hdmi_display);
+unsigned long sde_hdmi_calc_pixclk(unsigned long pixel_freq,
+ u32 out_format, bool dc_enable);
+bool sde_hdmi_validate_pixclk(struct drm_connector *connector,
+ unsigned long pclk);
+int sde_hdmi_sink_dc_support(struct drm_connector *connector,
+ struct drm_display_mode *mode);
+u8 sde_hdmi_hdr_get_ops(u8 curr_state,
+ u8 new_state);
#endif /* _SDE_HDMI_UTIL_H_ */
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 924ce64206f4..c8b11425a817 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -1923,8 +1923,75 @@ static struct drm_driver msm_driver = {
#ifdef CONFIG_PM_SLEEP
static int msm_pm_suspend(struct device *dev)
{
- struct drm_device *ddev = dev_get_drvdata(dev);
+ struct drm_device *ddev;
+ struct drm_modeset_acquire_ctx *ctx;
+ struct drm_connector *conn;
+ struct drm_atomic_state *state;
+ struct drm_crtc_state *crtc_state;
+ struct msm_drm_private *priv;
+ int ret = 0;
+
+ if (!dev)
+ return -EINVAL;
+
+ ddev = dev_get_drvdata(dev);
+ if (!ddev || !ddev->dev_private)
+ return -EINVAL;
+
+ priv = ddev->dev_private;
+ SDE_EVT32(0);
+
+ /* acquire modeset lock(s) */
+ drm_modeset_lock_all(ddev);
+ ctx = ddev->mode_config.acquire_ctx;
+
+ /* save current state for resume */
+ if (priv->suspend_state)
+ drm_atomic_state_free(priv->suspend_state);
+ priv->suspend_state = drm_atomic_helper_duplicate_state(ddev, ctx);
+ if (IS_ERR_OR_NULL(priv->suspend_state)) {
+ DRM_ERROR("failed to back up suspend state\n");
+ priv->suspend_state = NULL;
+ goto unlock;
+ }
+
+ /* create atomic state to disable all CRTCs */
+ state = drm_atomic_state_alloc(ddev);
+ if (IS_ERR_OR_NULL(state)) {
+ DRM_ERROR("failed to allocate crtc disable state\n");
+ goto unlock;
+ }
+
+ state->acquire_ctx = ctx;
+ drm_for_each_connector(conn, ddev) {
+
+ if (!conn->state || !conn->state->crtc ||
+ conn->dpms != DRM_MODE_DPMS_ON)
+ continue;
+
+ /* force CRTC to be inactive */
+ crtc_state = drm_atomic_get_crtc_state(state,
+ conn->state->crtc);
+ if (IS_ERR_OR_NULL(crtc_state)) {
+ DRM_ERROR("failed to get crtc %d state\n",
+ conn->state->crtc->base.id);
+ drm_atomic_state_free(state);
+ goto unlock;
+ }
+ crtc_state->active = false;
+ }
+ /* commit the "disable all" state */
+ ret = drm_atomic_commit(state);
+ if (ret < 0) {
+ DRM_ERROR("failed to disable crtcs, %d\n", ret);
+ drm_atomic_state_free(state);
+ }
+
+unlock:
+ drm_modeset_unlock_all(ddev);
+
+ /* disable hot-plug polling */
drm_kms_helper_poll_disable(ddev);
return 0;
@@ -1932,8 +1999,38 @@ static int msm_pm_suspend(struct device *dev)
static int msm_pm_resume(struct device *dev)
{
- struct drm_device *ddev = dev_get_drvdata(dev);
+ struct drm_device *ddev;
+ struct msm_drm_private *priv;
+ int ret;
+
+ if (!dev)
+ return -EINVAL;
+
+ ddev = dev_get_drvdata(dev);
+ if (!ddev || !ddev->dev_private)
+ return -EINVAL;
+
+ priv = ddev->dev_private;
+
+ SDE_EVT32(priv->suspend_state != NULL);
+
+ drm_mode_config_reset(ddev);
+
+ drm_modeset_lock_all(ddev);
+
+ if (priv->suspend_state) {
+ priv->suspend_state->acquire_ctx =
+ ddev->mode_config.acquire_ctx;
+ ret = drm_atomic_commit(priv->suspend_state);
+ if (ret < 0) {
+ DRM_ERROR("failed to restore state, %d\n", ret);
+ drm_atomic_state_free(priv->suspend_state);
+ }
+ priv->suspend_state = NULL;
+ }
+ drm_modeset_unlock_all(ddev);
+ /* enable hot-plug polling */
drm_kms_helper_poll_enable(ddev);
return 0;
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index ac15f399df7d..49b6029c3342 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -144,7 +144,7 @@ enum msm_mdp_conn_property {
/* blob properties, always put these first */
CONNECTOR_PROP_SDE_INFO,
CONNECTOR_PROP_HDR_INFO,
- CONNECTOR_PROP_HDR_METADATA,
+ CONNECTOR_PROP_HDR_CONTROL,
/* # of blob properties */
CONNECTOR_PROP_BLOBCOUNT,
@@ -237,10 +237,10 @@ struct msm_display_info {
/**
* struct - msm_display_kickoff_params - info for display features at kickoff
- * @hdr_metadata: HDR metadata info passed from userspace
+ * @hdr_ctrl: HDR control info passed from userspace
*/
struct msm_display_kickoff_params {
- struct drm_msm_ext_panel_hdr_metadata *hdr_metadata;
+ struct drm_msm_ext_panel_hdr_ctrl *hdr_ctrl;
};
/**
@@ -367,6 +367,9 @@ struct msm_drm_private {
struct msm_vblank_ctrl vblank_ctrl;
+ /* saved atomic state during system suspend */
+ struct drm_atomic_state *suspend_state;
+
/* list of clients waiting for events */
struct list_head client_event_list;
};
@@ -414,6 +417,15 @@ void __msm_fence_worker(struct work_struct *work);
(_cb)->func = _func; \
} while (0)
+static inline bool msm_is_suspend_state(struct drm_device *dev)
+{
+ if (!dev || !dev->dev_private)
+ return false;
+
+ return ((struct msm_drm_private *)dev->dev_private)->suspend_state !=
+ NULL;
+}
+
int msm_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *state, bool async);
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index 2ab50919f514..ed0ba928f170 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -34,6 +34,24 @@
#define MSM_MODE_FLAG_SEAMLESS_DYNAMIC_FPS (1<<0)
/* Transition to new mode requires a wait-for-vblank before the modeset */
#define MSM_MODE_FLAG_VBLANK_PRE_MODESET (1<<1)
+/*
+ * We need setting some flags in bridge, and using them in encoder. Add them in
+ * private_flags would be better for use. DRM_MODE_FLAG_SUPPORTS_RGB/YUV are
+ * flags that indicating the SINK supported color formats read from EDID. While,
+ * these flags defined here indicate the best color/bit depth foramt we choosed
+ * that would be better for display. For example the best mode display like:
+ * RGB+RGB_DC,YUV+YUV_DC, RGB,YUV. And we could not set RGB and YUV format at
+ * the same time. And also RGB_DC only set when RGB format is set,the same for
+ * YUV_DC.
+ */
+/* Enable RGB444 30 bit deep color */
+#define MSM_MODE_FLAG_RGB444_DC_ENABLE (1<<2)
+/* Enable YUV420 30 bit deep color */
+#define MSM_MODE_FLAG_YUV420_DC_ENABLE (1<<3)
+/* Choose RGB444 format to display */
+#define MSM_MODE_FLAG_COLOR_FORMAT_RGB444 (1<<4)
+/* Choose YUV420 format to display */
+#define MSM_MODE_FLAG_COLOR_FORMAT_YCBCR420 (1<<5)
/* As there are different display controller blocks depending on the
* snapdragon version, the kms support is split out and the appropriate
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index 8148d3e9e850..cd3a710f8f27 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -46,6 +46,8 @@ struct msm_mmu_funcs {
void (*destroy)(struct msm_mmu *mmu);
void (*enable)(struct msm_mmu *mmu);
void (*disable)(struct msm_mmu *mmu);
+ int (*set_property)(struct msm_mmu *mmu,
+ enum iommu_attr attr, void *data);
};
struct msm_mmu {
diff --git a/drivers/gpu/drm/msm/msm_smmu.c b/drivers/gpu/drm/msm/msm_smmu.c
index eb68eb977aa7..4247243055b6 100644
--- a/drivers/gpu/drm/msm/msm_smmu.c
+++ b/drivers/gpu/drm/msm/msm_smmu.c
@@ -170,12 +170,36 @@ static void msm_smmu_destroy(struct msm_mmu *mmu)
kfree(smmu);
}
+/* user can call this API to set the attribute of smmu*/
+static int msm_smmu_set_property(struct msm_mmu *mmu,
+ enum iommu_attr attr, void *data)
+{
+ struct msm_smmu *smmu = to_msm_smmu(mmu);
+ struct msm_smmu_client *client = msm_smmu_to_client(smmu);
+ struct iommu_domain *domain;
+ int ret = 0;
+
+ if (!client)
+ return -EINVAL;
+
+ domain = client->mmu_mapping->domain;
+ if (!domain)
+ return -EINVAL;
+
+ ret = iommu_domain_set_attr(domain, attr, data);
+ if (ret)
+ DRM_ERROR("set domain attribute failed\n");
+
+ return ret;
+}
+
static const struct msm_mmu_funcs funcs = {
.attach = msm_smmu_attach,
.detach = msm_smmu_detach,
.map = msm_smmu_map,
.unmap = msm_smmu_unmap,
.destroy = msm_smmu_destroy,
+ .set_property = msm_smmu_set_property,
};
static struct msm_smmu_domain msm_smmu_domains[MSM_SMMU_DOMAIN_MAX] = {
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c
index 76e6e4ef6e7d..6cc54d15beb2 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.c
+++ b/drivers/gpu/drm/msm/sde/sde_connector.c
@@ -83,7 +83,7 @@ int sde_connector_pre_kickoff(struct drm_connector *connector)
if (!c_conn->ops.pre_kickoff)
return 0;
- params.hdr_metadata = &c_state->hdr_meta;
+ params.hdr_ctrl = &c_state->hdr_ctrl;
rc = c_conn->ops.pre_kickoff(connector, c_conn->display, &params);
@@ -247,6 +247,7 @@ static int _sde_connector_set_hdr_info(
void *usr_ptr)
{
struct drm_connector *connector;
+ struct drm_msm_ext_panel_hdr_ctrl *hdr_ctrl;
struct drm_msm_ext_panel_hdr_metadata *hdr_meta;
int i;
@@ -262,21 +263,26 @@ static int _sde_connector_set_hdr_info(
return -ENOTSUPP;
}
- memset(&c_state->hdr_meta, 0, sizeof(c_state->hdr_meta));
+ memset(&c_state->hdr_ctrl, 0, sizeof(c_state->hdr_ctrl));
if (!usr_ptr) {
- SDE_DEBUG_CONN(c_conn, "hdr metadata cleared\n");
+ SDE_DEBUG_CONN(c_conn, "hdr control cleared\n");
return 0;
}
- if (copy_from_user(&c_state->hdr_meta,
+ if (copy_from_user(&c_state->hdr_ctrl,
(void __user *)usr_ptr,
- sizeof(*hdr_meta))) {
- SDE_ERROR_CONN(c_conn, "failed to copy hdr metadata\n");
+ sizeof(*hdr_ctrl))) {
+ SDE_ERROR_CONN(c_conn, "failed to copy hdr control\n");
return -EFAULT;
}
- hdr_meta = &c_state->hdr_meta;
+ hdr_ctrl = &c_state->hdr_ctrl;
+
+ SDE_DEBUG_CONN(c_conn, "hdr_supported %d\n",
+ hdr_ctrl->hdr_state);
+
+ hdr_meta = &hdr_ctrl->hdr_meta;
SDE_DEBUG_CONN(c_conn, "hdr_supported %d\n",
hdr_meta->hdr_supported);
@@ -362,7 +368,7 @@ static int sde_connector_atomic_set_property(struct drm_connector *connector,
SDE_ERROR("invalid topology_control: 0x%llX\n", val);
}
- if (idx == CONNECTOR_PROP_HDR_METADATA) {
+ if (idx == CONNECTOR_PROP_HDR_CONTROL) {
rc = _sde_connector_set_hdr_info(c_conn, c_state, (void *)val);
if (rc)
SDE_ERROR_CONN(c_conn, "cannot set hdr info %d\n", rc);
@@ -598,6 +604,7 @@ struct drm_connector *sde_connector_init(struct drm_device *dev,
struct sde_kms *sde_kms;
struct sde_kms_info *info;
struct sde_connector *c_conn = NULL;
+ struct sde_splash_info *sinfo;
int rc;
if (!dev || !dev->dev_private || !encoder) {
@@ -718,8 +725,8 @@ struct drm_connector *sde_connector_init(struct drm_device *dev,
}
msm_property_install_volatile_range(&c_conn->property_info,
- "hdr_metadata", 0x0, 0, ~0, 0,
- CONNECTOR_PROP_HDR_METADATA);
+ "hdr_control", 0x0, 0, ~0, 0,
+ CONNECTOR_PROP_HDR_CONTROL);
msm_property_install_range(&c_conn->property_info, "RETIRE_FENCE",
0x0, 0, INR_OPEN_MAX, 0, CONNECTOR_PROP_RETIRE_FENCE);
@@ -751,6 +758,10 @@ struct drm_connector *sde_connector_init(struct drm_device *dev,
SDE_DEBUG("connector %d attach encoder %d\n",
c_conn->base.base.id, encoder->base.id);
+ sinfo = &sde_kms->splash_info;
+ if (sinfo && sinfo->handoff)
+ sde_splash_setup_connector_count(sinfo, connector_type);
+
priv->connectors[priv->num_connectors++] = &c_conn->base;
return &c_conn->base;
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.h b/drivers/gpu/drm/msm/sde/sde_connector.h
index 19e2b8a3e41c..8257f29bd4b8 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.h
+++ b/drivers/gpu/drm/msm/sde/sde_connector.h
@@ -221,14 +221,14 @@ struct sde_connector {
* @out_fb: Pointer to output frame buffer, if applicable
* @aspace: Address space for accessing frame buffer objects, if applicable
* @property_values: Local cache of current connector property values
- * @hdr_meta: HDR metadata info passed from userspace
+ * @hdr_ctrl: HDR control info passed from userspace
*/
struct sde_connector_state {
struct drm_connector_state base;
struct drm_framebuffer *out_fb;
struct msm_gem_address_space *aspace;
uint64_t property_values[CONNECTOR_PROP_COUNT];
- struct drm_msm_ext_panel_hdr_metadata hdr_meta;
+ struct drm_msm_ext_panel_hdr_ctrl hdr_ctrl;
};
/**
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index 5a49a1beee49..9ab216213d8e 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -57,7 +57,17 @@
static inline struct sde_kms *_sde_crtc_get_kms(struct drm_crtc *crtc)
{
- struct msm_drm_private *priv = crtc->dev->dev_private;
+ struct msm_drm_private *priv;
+
+ if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
+ SDE_ERROR("invalid crtc\n");
+ return NULL;
+ }
+ priv = crtc->dev->dev_private;
+ if (!priv || !priv->kms) {
+ SDE_ERROR("invalid kms\n");
+ return NULL;
+ }
return to_sde_kms(priv->kms);
}
@@ -77,10 +87,10 @@ static void sde_crtc_destroy(struct drm_crtc *crtc)
sde_cp_crtc_destroy_properties(crtc);
debugfs_remove_recursive(sde_crtc->debugfs_root);
- mutex_destroy(&sde_crtc->crtc_lock);
sde_fence_deinit(&sde_crtc->output_fence);
drm_crtc_cleanup(crtc);
+ mutex_destroy(&sde_crtc->crtc_lock);
kfree(sde_crtc);
}
@@ -590,14 +600,22 @@ void sde_crtc_complete_commit(struct drm_crtc *crtc,
{
struct sde_crtc *sde_crtc;
struct sde_crtc_state *cstate;
+ struct drm_connector *conn;
+ struct drm_device *dev;
+ struct msm_drm_private *priv;
+ struct sde_kms *sde_kms;
int i;
- if (!crtc || !crtc->state) {
+ if (!crtc || !crtc->state || !crtc->dev) {
SDE_ERROR("invalid crtc\n");
return;
}
+ dev = crtc->dev;
+ priv = dev->dev_private;
+
sde_crtc = to_sde_crtc(crtc);
+ sde_kms = _sde_crtc_get_kms(crtc);
cstate = to_sde_crtc_state(crtc->state);
SDE_EVT32(DRMID(crtc));
@@ -606,6 +624,20 @@ void sde_crtc_complete_commit(struct drm_crtc *crtc,
for (i = 0; i < cstate->num_connectors; ++i)
sde_connector_complete_commit(cstate->connectors[i]);
+
+ if (!sde_kms->splash_info.handoff &&
+ sde_kms->splash_info.lk_is_exited) {
+ mutex_lock(&dev->mode_config.mutex);
+ drm_for_each_connector(conn, crtc->dev) {
+ if (conn->state->crtc != crtc)
+ continue;
+
+ sde_splash_clean_up_free_resource(priv->kms,
+ &priv->phandle,
+ conn->connector_type);
+ }
+ mutex_unlock(&dev->mode_config.mutex);
+ }
}
/**
@@ -941,6 +973,112 @@ end:
}
/**
+ * _sde_crtc_vblank_enable_nolock - update power resource and vblank request
+ * @sde_crtc: Pointer to sde crtc structure
+ * @enable: Whether to enable/disable vblanks
+ */
+static void _sde_crtc_vblank_enable_nolock(
+ struct sde_crtc *sde_crtc, bool enable)
+{
+ struct drm_device *dev;
+ struct drm_crtc *crtc;
+ struct drm_encoder *enc;
+ struct msm_drm_private *priv;
+ struct sde_kms *sde_kms;
+
+ if (!sde_crtc) {
+ SDE_ERROR("invalid crtc\n");
+ return;
+ }
+
+ crtc = &sde_crtc->base;
+ dev = crtc->dev;
+ priv = dev->dev_private;
+
+ if (!priv->kms) {
+ SDE_ERROR("invalid kms\n");
+ return;
+ }
+ sde_kms = to_sde_kms(priv->kms);
+
+ if (enable) {
+ sde_power_resource_enable(&priv->phandle,
+ sde_kms->core_client, true);
+ list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
+ if (enc->crtc != crtc)
+ continue;
+
+ SDE_EVT32(DRMID(crtc), DRMID(enc), enable);
+
+ sde_encoder_register_vblank_callback(enc,
+ sde_crtc_vblank_cb, (void *)crtc);
+ }
+ } else {
+ list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
+ if (enc->crtc != crtc)
+ continue;
+
+ SDE_EVT32(DRMID(crtc), DRMID(enc), enable);
+
+ sde_encoder_register_vblank_callback(enc, NULL, NULL);
+ }
+ sde_power_resource_enable(&priv->phandle,
+ sde_kms->core_client, false);
+ }
+}
+
+/**
+ * _sde_crtc_set_suspend - notify crtc of suspend enable/disable
+ * @crtc: Pointer to drm crtc object
+ * @enable: true to enable suspend, false to indicate resume
+ */
+static void _sde_crtc_set_suspend(struct drm_crtc *crtc, bool enable)
+{
+ struct sde_crtc *sde_crtc;
+ struct msm_drm_private *priv;
+ struct sde_kms *sde_kms;
+
+ if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
+ SDE_ERROR("invalid crtc\n");
+ return;
+ }
+ sde_crtc = to_sde_crtc(crtc);
+ priv = crtc->dev->dev_private;
+
+ if (!priv->kms) {
+ SDE_ERROR("invalid crtc kms\n");
+ return;
+ }
+ sde_kms = to_sde_kms(priv->kms);
+
+ SDE_DEBUG("crtc%d suspend = %d\n", crtc->base.id, enable);
+
+ mutex_lock(&sde_crtc->crtc_lock);
+
+ /*
+ * Update CP on suspend/resume transitions
+ */
+ if (enable && !sde_crtc->suspend)
+ sde_cp_crtc_suspend(crtc);
+ else if (!enable && sde_crtc->suspend)
+ sde_cp_crtc_resume(crtc);
+
+ /*
+ * If the vblank refcount != 0, release a power reference on suspend
+ * and take it back during resume (if it is still != 0).
+ */
+ if (sde_crtc->suspend == enable)
+ SDE_DEBUG("crtc%d suspend already set to %d, ignoring update\n",
+ crtc->base.id, enable);
+ else if (atomic_read(&sde_crtc->vblank_refcount) != 0)
+ _sde_crtc_vblank_enable_nolock(sde_crtc, !enable);
+
+ sde_crtc->suspend = enable;
+
+ mutex_unlock(&sde_crtc->crtc_lock);
+}
+
+/**
* sde_crtc_duplicate_state - state duplicate hook
* @crtc: Pointer to drm crtc structure
* @Returns: Pointer to new drm_crtc_state structure
@@ -990,6 +1128,10 @@ static void sde_crtc_reset(struct drm_crtc *crtc)
return;
}
+ /* revert suspend actions, if necessary */
+ if (msm_is_suspend_state(crtc->dev))
+ _sde_crtc_set_suspend(crtc, false);
+
/* remove previous state, if present */
if (crtc->state) {
sde_crtc_destroy_state(crtc, crtc->state);
@@ -1013,37 +1155,67 @@ static void sde_crtc_reset(struct drm_crtc *crtc)
crtc->state = &cstate->base;
}
+static int _sde_crtc_vblank_no_lock(struct sde_crtc *sde_crtc, bool en)
+{
+ if (!sde_crtc) {
+ SDE_ERROR("invalid crtc\n");
+ return -EINVAL;
+ } else if (en && atomic_inc_return(&sde_crtc->vblank_refcount) == 1) {
+ SDE_DEBUG("crtc%d vblank enable\n", sde_crtc->base.base.id);
+ if (!sde_crtc->suspend)
+ _sde_crtc_vblank_enable_nolock(sde_crtc, true);
+ } else if (!en && atomic_read(&sde_crtc->vblank_refcount) < 1) {
+ SDE_ERROR("crtc%d invalid vblank disable\n",
+ sde_crtc->base.base.id);
+ return -EINVAL;
+ } else if (!en && atomic_dec_return(&sde_crtc->vblank_refcount) == 0) {
+ SDE_DEBUG("crtc%d vblank disable\n", sde_crtc->base.base.id);
+ if (!sde_crtc->suspend)
+ _sde_crtc_vblank_enable_nolock(sde_crtc, false);
+ } else {
+ SDE_DEBUG("crtc%d vblank %s refcount:%d\n",
+ sde_crtc->base.base.id,
+ en ? "enable" : "disable",
+ atomic_read(&sde_crtc->vblank_refcount));
+ }
+
+ return 0;
+}
+
static void sde_crtc_disable(struct drm_crtc *crtc)
{
- struct msm_drm_private *priv;
- struct sde_crtc *sde_crtc;
struct drm_encoder *encoder;
+ struct sde_crtc *sde_crtc;
struct sde_kms *sde_kms;
+ struct msm_drm_private *priv;
- if (!crtc) {
+ if (!crtc || !crtc->dev || !crtc->state) {
SDE_ERROR("invalid crtc\n");
return;
}
sde_crtc = to_sde_crtc(crtc);
sde_kms = _sde_crtc_get_kms(crtc);
+ if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev_private) {
+ SDE_ERROR("invalid kms handle\n");
+ return;
+ }
priv = sde_kms->dev->dev_private;
SDE_DEBUG("crtc%d\n", crtc->base.id);
+ if (msm_is_suspend_state(crtc->dev))
+ _sde_crtc_set_suspend(crtc, true);
+
mutex_lock(&sde_crtc->crtc_lock);
SDE_EVT32(DRMID(crtc));
- if (atomic_read(&sde_crtc->vblank_refcount)) {
+ if (atomic_read(&sde_crtc->vblank_refcount) && !sde_crtc->suspend) {
SDE_ERROR("crtc%d invalid vblank refcount\n",
crtc->base.id);
- SDE_EVT32(DRMID(crtc));
- drm_for_each_encoder(encoder, crtc->dev) {
- if (encoder->crtc != crtc)
- continue;
- sde_encoder_register_vblank_callback(encoder, NULL,
- NULL);
- }
- atomic_set(&sde_crtc->vblank_refcount, 0);
+ SDE_EVT32(DRMID(crtc), atomic_read(&sde_crtc->vblank_refcount));
+ while (atomic_read(&sde_crtc->vblank_refcount))
+ if (_sde_crtc_vblank_no_lock(sde_crtc, false))
+ break;
}
if (atomic_read(&sde_crtc->frame_pending)) {
@@ -1262,40 +1434,20 @@ end:
int sde_crtc_vblank(struct drm_crtc *crtc, bool en)
{
- struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
- struct drm_encoder *encoder;
- struct drm_device *dev = crtc->dev;
+ struct sde_crtc *sde_crtc;
+ int rc;
- if (en && atomic_inc_return(&sde_crtc->vblank_refcount) == 1) {
- SDE_DEBUG("crtc%d vblank enable\n", crtc->base.id);
- } else if (!en && atomic_read(&sde_crtc->vblank_refcount) < 1) {
- SDE_ERROR("crtc%d invalid vblank disable\n", crtc->base.id);
+ if (!crtc) {
+ SDE_ERROR("invalid crtc\n");
return -EINVAL;
- } else if (!en && atomic_dec_return(&sde_crtc->vblank_refcount) == 0) {
- SDE_DEBUG("crtc%d vblank disable\n", crtc->base.id);
- } else {
- SDE_DEBUG("crtc%d vblank %s refcount:%d\n",
- crtc->base.id,
- en ? "enable" : "disable",
- atomic_read(&sde_crtc->vblank_refcount));
- return 0;
}
+ sde_crtc = to_sde_crtc(crtc);
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- if (encoder->crtc != crtc)
- continue;
-
- SDE_EVT32(DRMID(crtc), en);
-
- if (en)
- sde_encoder_register_vblank_callback(encoder,
- sde_crtc_vblank_cb, (void *)crtc);
- else
- sde_encoder_register_vblank_callback(encoder, NULL,
- NULL);
- }
+ mutex_lock(&sde_crtc->crtc_lock);
+ rc = _sde_crtc_vblank_no_lock(sde_crtc, en);
+ mutex_unlock(&sde_crtc->crtc_lock);
- return 0;
+ return rc;
}
void sde_crtc_cancel_pending_flip(struct drm_crtc *crtc,
@@ -1385,6 +1537,7 @@ static void sde_crtc_install_properties(struct drm_crtc *crtc,
if (catalog->qseed_type == SDE_SSPP_SCALER_QSEED3)
sde_kms_info_add_keystr(info, "qseed_type", "qseed3");
sde_kms_info_add_keyint(info, "has_src_split", catalog->has_src_split);
+ sde_kms_info_add_keyint(info, "has_hdr", catalog->has_hdr);
if (catalog->perf.max_bw_low)
sde_kms_info_add_keyint(info, "max_bandwidth_low",
catalog->perf.max_bw_low);
@@ -1738,6 +1891,7 @@ struct drm_crtc *sde_crtc_init(struct drm_device *dev,
crtc->dev = dev;
atomic_set(&sde_crtc->vblank_refcount, 0);
+ mutex_init(&sde_crtc->crtc_lock);
spin_lock_init(&sde_crtc->spin_lock);
atomic_set(&sde_crtc->frame_pending, 0);
@@ -1759,7 +1913,6 @@ struct drm_crtc *sde_crtc_init(struct drm_device *dev,
snprintf(sde_crtc->name, SDE_CRTC_NAME_SIZE, "crtc%u", crtc->base.id);
/* initialize output fence support */
- mutex_init(&sde_crtc->crtc_lock);
sde_fence_init(&sde_crtc->output_fence, sde_crtc->name, crtc->base.id);
/* initialize debugfs support */
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h
index aaa815c76c4e..6b8483d574b1 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.h
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.h
@@ -82,6 +82,7 @@ struct sde_crtc_frame_event {
* @vblank_cb_count : count of vblank callback since last reset
* @vblank_cb_time : ktime at vblank count reset
* @vblank_refcount : reference count for vblank enable request
+ * @suspend : whether or not a suspend operation is in progress
* @feature_list : list of color processing features supported on a crtc
* @active_list : list of color processing features are active
* @dirty_list : list of color processing features are dirty
@@ -117,6 +118,7 @@ struct sde_crtc {
u32 vblank_cb_count;
ktime_t vblank_cb_time;
atomic_t vblank_refcount;
+ bool suspend;
struct list_head feature_list;
struct list_head active_list;
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
index 29444a83cf02..97c9f8baea6d 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -55,6 +55,33 @@
#define MAX_CHANNELS_PER_ENC 2
+/* rgb to yuv color space conversion matrix */
+static struct sde_csc_cfg sde_csc_10bit_convert[SDE_MAX_CSC] = {
+ [SDE_CSC_RGB2YUV_601L] = {
+ {
+ TO_S15D16(0x0083), TO_S15D16(0x0102), TO_S15D16(0x0032),
+ TO_S15D16(0xffb4), TO_S15D16(0xff6b), TO_S15D16(0x00e1),
+ TO_S15D16(0x00e1), TO_S15D16(0xff44), TO_S15D16(0xffdb),
+ },
+ { 0x0, 0x0, 0x0,},
+ { 0x0040, 0x0200, 0x0200,},
+ { 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+ { 0x0040, 0x03ac, 0x0040, 0x03c0, 0x0040, 0x03c0,},
+ },
+
+ [SDE_CSC_RGB2YUV_601FR] = {
+ {
+ TO_S15D16(0x0099), TO_S15D16(0x012d), TO_S15D16(0x003a),
+ TO_S15D16(0xffaa), TO_S15D16(0xff56), TO_S15D16(0x0100),
+ TO_S15D16(0x0100), TO_S15D16(0xff2a), TO_S15D16(0xffd6),
+ },
+ { 0x0, 0x0, 0x0,},
+ { 0x0000, 0x0200, 0x0200,},
+ { 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+ { 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+ },
+};
+
/**
* struct sde_encoder_virt - virtual encoder. Container of one or more physical
* encoders. Virtual encoder manages one "logical" display. Physical
@@ -1374,3 +1401,108 @@ enum sde_intf_mode sde_encoder_get_intf_mode(struct drm_encoder *encoder)
return INTF_MODE_NONE;
}
+
+/**
+ * sde_encoder_phys_setup_cdm - setup chroma down block
+ * @phys_enc: Pointer to physical encoder
+ * @output_type: HDMI/WB
+ * @format: Output format
+ * @roi: Output size
+ */
+void sde_encoder_phys_setup_cdm(struct sde_encoder_phys *phys_enc,
+ const struct sde_format *format, u32 output_type,
+ struct sde_rect *roi)
+{
+ struct drm_encoder *encoder = phys_enc->parent;
+ struct sde_encoder_virt *sde_enc = NULL;
+ struct sde_hw_cdm *hw_cdm = phys_enc->hw_cdm;
+ struct sde_hw_cdm_cfg *cdm_cfg = &phys_enc->cdm_cfg;
+ int ret;
+ u32 csc_type = 0;
+
+ if (!encoder) {
+ SDE_ERROR("invalid encoder\n");
+ return;
+ }
+ sde_enc = to_sde_encoder_virt(encoder);
+
+ if (!SDE_FORMAT_IS_YUV(format)) {
+ SDE_DEBUG_ENC(sde_enc, "[cdm_disable fmt:%x]\n",
+ format->base.pixel_format);
+
+ if (hw_cdm && hw_cdm->ops.disable)
+ hw_cdm->ops.disable(hw_cdm);
+
+ return;
+ }
+
+ memset(cdm_cfg, 0, sizeof(struct sde_hw_cdm_cfg));
+
+ cdm_cfg->output_width = roi->w;
+ cdm_cfg->output_height = roi->h;
+ cdm_cfg->output_fmt = format;
+ cdm_cfg->output_type = output_type;
+ cdm_cfg->output_bit_depth = SDE_FORMAT_IS_DX(format) ?
+ CDM_CDWN_OUTPUT_10BIT : CDM_CDWN_OUTPUT_8BIT;
+
+ /* enable 10 bit logic */
+ switch (cdm_cfg->output_fmt->chroma_sample) {
+ case SDE_CHROMA_RGB:
+ cdm_cfg->h_cdwn_type = CDM_CDWN_DISABLE;
+ cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE;
+ break;
+ case SDE_CHROMA_H2V1:
+ cdm_cfg->h_cdwn_type = CDM_CDWN_COSITE;
+ cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE;
+ break;
+ case SDE_CHROMA_420:
+ cdm_cfg->h_cdwn_type = CDM_CDWN_COSITE;
+ cdm_cfg->v_cdwn_type = CDM_CDWN_OFFSITE;
+ break;
+ case SDE_CHROMA_H1V2:
+ default:
+ SDE_ERROR("unsupported chroma sampling type\n");
+ cdm_cfg->h_cdwn_type = CDM_CDWN_DISABLE;
+ cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE;
+ break;
+ }
+
+ SDE_DEBUG_ENC(sde_enc, "[cdm_enable:%d,%d,%X,%d,%d,%d,%d]\n",
+ cdm_cfg->output_width,
+ cdm_cfg->output_height,
+ cdm_cfg->output_fmt->base.pixel_format,
+ cdm_cfg->output_type,
+ cdm_cfg->output_bit_depth,
+ cdm_cfg->h_cdwn_type,
+ cdm_cfg->v_cdwn_type);
+
+ if (output_type == CDM_CDWN_OUTPUT_HDMI)
+ csc_type = SDE_CSC_RGB2YUV_601FR;
+ else if (output_type == CDM_CDWN_OUTPUT_WB)
+ csc_type = SDE_CSC_RGB2YUV_601L;
+
+ if (hw_cdm && hw_cdm->ops.setup_csc_data) {
+ ret = hw_cdm->ops.setup_csc_data(hw_cdm,
+ &sde_csc_10bit_convert[csc_type]);
+ if (ret < 0) {
+ SDE_ERROR("failed to setup CSC %d\n", ret);
+ return;
+ }
+ }
+
+ if (hw_cdm && hw_cdm->ops.setup_cdwn) {
+ ret = hw_cdm->ops.setup_cdwn(hw_cdm, cdm_cfg);
+ if (ret < 0) {
+ SDE_ERROR("failed to setup CDM %d\n", ret);
+ return;
+ }
+ }
+
+ if (hw_cdm && hw_cdm->ops.enable) {
+ ret = hw_cdm->ops.enable(hw_cdm, cdm_cfg);
+ if (ret < 0) {
+ SDE_ERROR("failed to enable CDM %d\n", ret);
+ return;
+ }
+ }
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
index 2205dd98a927..20f125155de3 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
@@ -349,8 +349,8 @@ struct sde_encoder_phys *sde_encoder_phys_wb_init(
#endif
void sde_encoder_phys_setup_cdm(struct sde_encoder_phys *phys_enc,
- struct drm_framebuffer *fb, const struct sde_format *format,
- struct sde_rect *wb_roi);
+ const struct sde_format *format, u32 output_type,
+ struct sde_rect *roi);
/**
* sde_encoder_helper_trigger_start - control start helper function
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
index 0b6ee302e231..78a8b732b0de 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -242,17 +242,20 @@ static void sde_encoder_phys_vid_setup_timing_engine(
SDE_DEBUG_VIDENC(vid_enc, "enabling mode:\n");
drm_mode_debug_printmodeline(&mode);
- if (phys_enc->split_role != ENC_ROLE_SOLO) {
+ if (phys_enc->split_role != ENC_ROLE_SOLO ||
+ (mode.private_flags & MSM_MODE_FLAG_COLOR_FORMAT_YCBCR420)) {
mode.hdisplay >>= 1;
mode.htotal >>= 1;
mode.hsync_start >>= 1;
mode.hsync_end >>= 1;
+ mode.hskew >>= 1;
SDE_DEBUG_VIDENC(vid_enc,
- "split_role %d, halve horizontal %d %d %d %d\n",
+ "split_role %d, halve horizontal %d %d %d %d %d\n",
phys_enc->split_role,
mode.hdisplay, mode.htotal,
- mode.hsync_start, mode.hsync_end);
+ mode.hsync_start, mode.hsync_end,
+ mode.hskew);
}
drm_mode_to_intf_timing_params(vid_enc, &mode, &timing_params);
@@ -407,6 +410,9 @@ static void sde_encoder_phys_vid_mode_set(
return;
}
+ phys_enc->hw_ctl = NULL;
+ phys_enc->hw_cdm = NULL;
+
rm = &phys_enc->sde_kms->rm;
vid_enc = to_sde_encoder_phys_vid(phys_enc);
phys_enc->cached_mode = *adj_mode;
@@ -427,6 +433,20 @@ static void sde_encoder_phys_vid_mode_set(
phys_enc->hw_ctl = NULL;
return;
}
+
+ /* CDM is optional */
+ sde_rm_init_hw_iter(&iter, phys_enc->parent->base.id, SDE_HW_BLK_CDM);
+ for (i = 0; i <= instance; i++) {
+ sde_rm_get_hw(rm, &iter);
+ if (i == instance)
+ phys_enc->hw_cdm = (struct sde_hw_cdm *) iter.hw;
+ }
+
+ if (IS_ERR(phys_enc->hw_cdm)) {
+ SDE_ERROR("CDM required but not allocated: %ld\n",
+ PTR_ERR(phys_enc->hw_cdm));
+ phys_enc->hw_cdm = NULL;
+ }
}
static int sde_encoder_phys_vid_control_vblank_irq(
@@ -477,6 +497,9 @@ static void sde_encoder_phys_vid_enable(struct sde_encoder_phys *phys_enc)
struct sde_encoder_phys_vid *vid_enc;
struct sde_hw_intf *intf;
struct sde_hw_ctl *ctl;
+ struct sde_hw_cdm *hw_cdm = NULL;
+ struct drm_display_mode mode;
+ const struct sde_format *fmt = NULL;
u32 flush_mask = 0;
int ret;
@@ -485,7 +508,9 @@ static void sde_encoder_phys_vid_enable(struct sde_encoder_phys *phys_enc)
SDE_ERROR("invalid encoder/device\n");
return;
}
+ hw_cdm = phys_enc->hw_cdm;
priv = phys_enc->parent->dev->dev_private;
+ mode = phys_enc->cached_mode;
vid_enc = to_sde_encoder_phys_vid(phys_enc);
intf = vid_enc->hw_intf;
@@ -520,7 +545,21 @@ static void sde_encoder_phys_vid_enable(struct sde_encoder_phys *phys_enc)
goto end;
}
+ if (mode.private_flags & MSM_MODE_FLAG_COLOR_FORMAT_YCBCR420)
+ fmt = sde_get_sde_format(DRM_FORMAT_YUV420);
+
+ if (fmt) {
+ struct sde_rect hdmi_roi;
+
+ hdmi_roi.w = mode.hdisplay;
+ hdmi_roi.h = mode.vdisplay;
+ sde_encoder_phys_setup_cdm(phys_enc, fmt,
+ CDM_CDWN_OUTPUT_HDMI, &hdmi_roi);
+ }
+
ctl->ops.get_bitmask_intf(ctl, &flush_mask, intf->idx);
+ if (ctl->ops.get_bitmask_cdm && hw_cdm)
+ ctl->ops.get_bitmask_cdm(ctl, &flush_mask, hw_cdm->idx);
ctl->ops.update_pending_flush(ctl, flush_mask);
SDE_DEBUG_VIDENC(vid_enc, "update pending flush ctl %d flush_mask %x\n",
@@ -569,6 +608,8 @@ static void sde_encoder_phys_vid_get_hw_resources(
SDE_DEBUG_VIDENC(vid_enc, "\n");
hw_res->intfs[vid_enc->hw_intf->idx - INTF_0] = INTF_MODE_VIDEO;
+ hw_res->needs_cdm = true;
+ SDE_DEBUG_DRIVER("[vid] needs_cdm=%d\n", hw_res->needs_cdm);
}
static int sde_encoder_phys_vid_wait_for_vblank(
@@ -713,6 +754,11 @@ static void sde_encoder_phys_vid_disable(struct sde_encoder_phys *phys_enc)
SDE_ERROR_VIDENC(vid_enc, "invalid vblank refcount %d\n",
atomic_read(&phys_enc->vblank_refcount));
+ if (phys_enc->hw_cdm && phys_enc->hw_cdm->ops.disable) {
+ SDE_DEBUG_DRIVER("[cdm_disable]\n");
+ phys_enc->hw_cdm->ops.disable(phys_enc->hw_cdm);
+ }
+
phys_enc->enable_state = SDE_ENC_DISABLED;
}
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
index a48962a2384b..65b16419fcec 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
@@ -28,24 +28,6 @@
#define WBID(wb_enc) ((wb_enc) ? wb_enc->wb_dev->wb_idx : -1)
-#define TO_S15D16(_x_) ((_x_) << 7)
-
-/**
- * sde_rgb2yuv_601l - rgb to yuv color space conversion matrix
- *
- */
-static struct sde_csc_cfg sde_encoder_phys_wb_rgb2yuv_601l = {
- {
- TO_S15D16(0x0083), TO_S15D16(0x0102), TO_S15D16(0x0032),
- TO_S15D16(0x1fb5), TO_S15D16(0x1f6c), TO_S15D16(0x00e1),
- TO_S15D16(0x00e1), TO_S15D16(0x1f45), TO_S15D16(0x1fdc)
- },
- { 0x00, 0x00, 0x00 },
- { 0x0040, 0x0200, 0x0200 },
- { 0x000, 0x3ff, 0x000, 0x3ff, 0x000, 0x3ff },
- { 0x040, 0x3ac, 0x040, 0x3c0, 0x040, 0x3c0 },
-};
-
/**
* sde_encoder_phys_wb_is_master - report wb always as master encoder
*/
@@ -105,96 +87,6 @@ static void sde_encoder_phys_wb_set_traffic_shaper(
}
/**
- * sde_encoder_phys_setup_cdm - setup chroma down block
- * @phys_enc: Pointer to physical encoder
- * @fb: Pointer to output framebuffer
- * @format: Output format
- */
-void sde_encoder_phys_setup_cdm(struct sde_encoder_phys *phys_enc,
- struct drm_framebuffer *fb, const struct sde_format *format,
- struct sde_rect *wb_roi)
-{
- struct sde_hw_cdm *hw_cdm = phys_enc->hw_cdm;
- struct sde_hw_cdm_cfg *cdm_cfg = &phys_enc->cdm_cfg;
- int ret;
-
- if (!SDE_FORMAT_IS_YUV(format)) {
- SDE_DEBUG("[cdm_disable fmt:%x]\n",
- format->base.pixel_format);
-
- if (hw_cdm && hw_cdm->ops.disable)
- hw_cdm->ops.disable(hw_cdm);
-
- return;
- }
-
- memset(cdm_cfg, 0, sizeof(struct sde_hw_cdm_cfg));
-
- cdm_cfg->output_width = wb_roi->w;
- cdm_cfg->output_height = wb_roi->h;
- cdm_cfg->output_fmt = format;
- cdm_cfg->output_type = CDM_CDWN_OUTPUT_WB;
- cdm_cfg->output_bit_depth = SDE_FORMAT_IS_DX(format) ?
- CDM_CDWN_OUTPUT_10BIT : CDM_CDWN_OUTPUT_8BIT;
-
- /* enable 10 bit logic */
- switch (cdm_cfg->output_fmt->chroma_sample) {
- case SDE_CHROMA_RGB:
- cdm_cfg->h_cdwn_type = CDM_CDWN_DISABLE;
- cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE;
- break;
- case SDE_CHROMA_H2V1:
- cdm_cfg->h_cdwn_type = CDM_CDWN_COSITE;
- cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE;
- break;
- case SDE_CHROMA_420:
- cdm_cfg->h_cdwn_type = CDM_CDWN_COSITE;
- cdm_cfg->v_cdwn_type = CDM_CDWN_OFFSITE;
- break;
- case SDE_CHROMA_H1V2:
- default:
- SDE_ERROR("unsupported chroma sampling type\n");
- cdm_cfg->h_cdwn_type = CDM_CDWN_DISABLE;
- cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE;
- break;
- }
-
- SDE_DEBUG("[cdm_enable:%d,%d,%X,%d,%d,%d,%d]\n",
- cdm_cfg->output_width,
- cdm_cfg->output_height,
- cdm_cfg->output_fmt->base.pixel_format,
- cdm_cfg->output_type,
- cdm_cfg->output_bit_depth,
- cdm_cfg->h_cdwn_type,
- cdm_cfg->v_cdwn_type);
-
- if (hw_cdm && hw_cdm->ops.setup_csc_data) {
- ret = hw_cdm->ops.setup_csc_data(hw_cdm,
- &sde_encoder_phys_wb_rgb2yuv_601l);
- if (ret < 0) {
- SDE_ERROR("failed to setup CSC %d\n", ret);
- return;
- }
- }
-
- if (hw_cdm && hw_cdm->ops.setup_cdwn) {
- ret = hw_cdm->ops.setup_cdwn(hw_cdm, cdm_cfg);
- if (ret < 0) {
- SDE_ERROR("failed to setup CDM %d\n", ret);
- return;
- }
- }
-
- if (hw_cdm && hw_cdm->ops.enable) {
- ret = hw_cdm->ops.enable(hw_cdm, cdm_cfg);
- if (ret < 0) {
- SDE_ERROR("failed to enable CDM %d\n", ret);
- return;
- }
- }
-}
-
-/**
* sde_encoder_phys_wb_setup_fb - setup output framebuffer
* @phys_enc: Pointer to physical encoder
* @fb: Pointer to output framebuffer
@@ -520,7 +412,8 @@ static void sde_encoder_phys_wb_setup(
sde_encoder_phys_wb_set_traffic_shaper(phys_enc);
- sde_encoder_phys_setup_cdm(phys_enc, fb, wb_enc->wb_fmt, wb_roi);
+ sde_encoder_phys_setup_cdm(phys_enc, wb_enc->wb_fmt,
+ CDM_CDWN_OUTPUT_WB, wb_roi);
sde_encoder_phys_wb_setup_fb(phys_enc, fb, wb_roi);
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
index eb398fbee816..a185eb338134 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
@@ -689,6 +689,7 @@ static void _sde_sspp_setup_vig(struct sde_mdss_cfg *sde_cfg,
{
sblk->maxupscale = MAX_SSPP_UPSCALE;
sblk->maxdwnscale = MAX_SSPP_DOWNSCALE;
+ sblk->format_list = plane_formats_yuv;
sspp->id = SSPP_VIG0 + *vig_count;
sspp->clk_ctrl = SDE_CLK_CTRL_VIG0 + *vig_count;
sspp->type = SSPP_TYPE_VIG;
@@ -759,6 +760,7 @@ static void _sde_sspp_setup_rgb(struct sde_mdss_cfg *sde_cfg,
{
sblk->maxupscale = MAX_SSPP_UPSCALE;
sblk->maxdwnscale = MAX_SSPP_DOWNSCALE;
+ sblk->format_list = plane_formats;
sspp->id = SSPP_RGB0 + *rgb_count;
sspp->clk_ctrl = SDE_CLK_CTRL_RGB0 + *rgb_count;
sspp->type = SSPP_TYPE_RGB;
@@ -799,6 +801,7 @@ static void _sde_sspp_setup_cursor(struct sde_mdss_cfg *sde_cfg,
set_bit(SDE_SSPP_CURSOR, &sspp->features);
sblk->maxupscale = SSPP_UNITY_SCALE;
sblk->maxdwnscale = SSPP_UNITY_SCALE;
+ sblk->format_list = cursor_formats;
sspp->id = SSPP_CURSOR0 + *cursor_count;
sspp->clk_ctrl = SDE_CLK_CTRL_CURSOR0 + *cursor_count;
sspp->type = SSPP_TYPE_CURSOR;
@@ -812,6 +815,7 @@ static void _sde_sspp_setup_dma(struct sde_mdss_cfg *sde_cfg,
{
sblk->maxupscale = SSPP_UNITY_SCALE;
sblk->maxdwnscale = SSPP_UNITY_SCALE;
+ sblk->format_list = plane_formats;
sspp->id = SSPP_DMA0 + *dma_count;
sspp->clk_ctrl = SDE_CLK_CTRL_DMA0 + *dma_count;
sspp->type = SSPP_TYPE_DMA;
@@ -1291,6 +1295,7 @@ static int sde_wb_parse_dt(struct device_node *np,
wb->xin_id = PROP_VALUE_ACCESS(prop_value, WB_XIN_ID, i);
wb->vbif_idx = VBIF_NRT;
wb->len = PROP_VALUE_ACCESS(prop_value, WB_LEN, 0);
+ wb->format_list = wb2_formats;
if (!prop_exists[WB_LEN])
wb->len = DEFAULT_SDE_HW_BLOCK_LEN;
sblk->maxlinewidth = sde_cfg->max_wb_linewidth;
@@ -2075,6 +2080,11 @@ static int sde_hardware_format_caps(struct sde_mdss_cfg *sde_cfg,
goto end;
}
+ if (IS_SDE_MAJOR_MINOR_SAME((hw_rev), SDE_HW_VER_300) ||
+ IS_SDE_MAJOR_MINOR_SAME((hw_rev), SDE_HW_VER_301)) {
+ sde_cfg->has_hdr = true;
+ }
+
index = _sde_copy_formats(sde_cfg->dma_formats, dma_list_size,
0, plane_formats, ARRAY_SIZE(plane_formats));
index += _sde_copy_formats(sde_cfg->dma_formats, dma_list_size,
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
index 01204df48871..73bb77b7afa6 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
@@ -655,6 +655,7 @@ struct sde_vp_cfg {
* @csc_type csc or csc_10bit support.
* @has_src_split source split feature status
* @has_cdp Client driver prefetch feature status
+ * @has_hdr HDR feature support
* @dma_formats Supported formats for dma pipe
* @cursor_formats Supported formats for cursor pipe
* @vig_formats Supported formats for vig pipe
@@ -672,7 +673,7 @@ struct sde_mdss_cfg {
u32 csc_type;
bool has_src_split;
bool has_cdp;
-
+ bool has_hdr;
u32 mdss_count;
struct sde_mdss_base_cfg mdss[MAX_BLOCKS];
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_cdm.c b/drivers/gpu/drm/msm/sde/sde_hw_cdm.c
index 188649d946d6..9ec81c227e60 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_cdm.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_cdm.c
@@ -227,7 +227,7 @@ int sde_hw_cdm_enable(struct sde_hw_cdm *ctx,
return -EINVAL;
if (cdm->output_type == CDM_CDWN_OUTPUT_HDMI) {
- if (fmt->chroma_sample != SDE_CHROMA_H1V2)
+ if (fmt->chroma_sample == SDE_CHROMA_H1V2)
return -EINVAL; /*unsupported format */
opmode = BIT(0);
opmode |= (fmt->chroma_sample << 1);
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_mdss.h b/drivers/gpu/drm/msm/sde/sde_hw_mdss.h
index 2592fe26cb38..1edeff6a7aec 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_mdss.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_mdss.h
@@ -63,6 +63,8 @@ enum sde_format_flags {
(((X)->fetch_mode == SDE_FETCH_UBWC) && \
test_bit(SDE_FORMAT_FLAG_COMPRESSED_BIT, (X)->flag))
+#define TO_S15D16(_x_) ((_x_) << 7)
+
#define SDE_BLEND_FG_ALPHA_FG_CONST (0 << 0)
#define SDE_BLEND_FG_ALPHA_BG_CONST (1 << 0)
#define SDE_BLEND_FG_ALPHA_FG_PIXEL (2 << 0)
@@ -339,6 +341,12 @@ enum sde_3d_blend_mode {
BLEND_3D_MAX
};
+enum sde_csc_type {
+ SDE_CSC_RGB2YUV_601L,
+ SDE_CSC_RGB2YUV_601FR,
+ SDE_MAX_CSC
+};
+
/** struct sde_format - defines the format configuration which
* allows SDE HW to correctly fetch and decode the format
* @base: base msm_format struture containing fourcc code
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index 031493aa42b8..705ec2d0dfa2 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -328,24 +328,12 @@ static int sde_debugfs_danger_init(struct sde_kms *sde_kms,
static int sde_kms_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
{
- struct sde_kms *sde_kms = to_sde_kms(kms);
- struct drm_device *dev = sde_kms->dev;
- struct msm_drm_private *priv = dev->dev_private;
-
- sde_power_resource_enable(&priv->phandle, sde_kms->core_client, true);
-
return sde_crtc_vblank(crtc, true);
}
static void sde_kms_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
{
- struct sde_kms *sde_kms = to_sde_kms(kms);
- struct drm_device *dev = sde_kms->dev;
- struct msm_drm_private *priv = dev->dev_private;
-
sde_crtc_vblank(crtc, false);
-
- sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
}
static void sde_kms_prepare_commit(struct msm_kms *kms,
@@ -355,6 +343,9 @@ static void sde_kms_prepare_commit(struct msm_kms *kms,
struct drm_device *dev = sde_kms->dev;
struct msm_drm_private *priv = dev->dev_private;
+ if (sde_kms->splash_info.handoff)
+ sde_splash_clean_up_exit_lk(kms);
+
sde_power_resource_enable(&priv->phandle, sde_kms->core_client, true);
}
@@ -997,8 +988,15 @@ static void _sde_kms_hw_destroy(struct sde_kms *sde_kms,
sde_hw_catalog_deinit(sde_kms->catalog);
sde_kms->catalog = NULL;
+ if (sde_kms->splash_info.handoff) {
+ if (sde_kms->core_client)
+ sde_splash_destroy(&sde_kms->splash_info,
+ &priv->phandle, sde_kms->core_client);
+ }
+
if (sde_kms->core_client)
- sde_power_client_destroy(&priv->phandle, sde_kms->core_client);
+ sde_power_client_destroy(&priv->phandle,
+ sde_kms->core_client);
sde_kms->core_client = NULL;
if (sde_kms->vbif[VBIF_NRT])
@@ -1110,6 +1108,24 @@ static int _sde_kms_mmu_init(struct sde_kms *sde_kms)
continue;
}
+ /* Attaching smmu means IOMMU HW starts to work immediately.
+ * However, display HW in LK is still accessing memory
+ * while the memory map is not done yet.
+ * So first set DOMAIN_ATTR_EARLY_MAP attribute 1 to bypass
+ * stage 1 translation in IOMMU HW.
+ */
+ if ((i == MSM_SMMU_DOMAIN_UNSECURE) &&
+ sde_kms->splash_info.handoff) {
+ ret = mmu->funcs->set_property(mmu,
+ DOMAIN_ATTR_EARLY_MAP,
+ &sde_kms->splash_info.handoff);
+ if (ret) {
+ SDE_ERROR("failed to set map att: %d\n", ret);
+ mmu->funcs->destroy(mmu);
+ goto fail;
+ }
+ }
+
aspace = msm_gem_smmu_address_space_create(sde_kms->dev->dev,
mmu, "sde");
if (IS_ERR(aspace)) {
@@ -1127,6 +1143,19 @@ static int _sde_kms_mmu_init(struct sde_kms *sde_kms)
goto fail;
}
+ /*
+ * It's safe now to map the physical memory blcok LK accesses.
+ */
+ if ((i == MSM_SMMU_DOMAIN_UNSECURE) &&
+ sde_kms->splash_info.handoff) {
+ ret = sde_splash_smmu_map(sde_kms->dev, mmu,
+ &sde_kms->splash_info);
+ if (ret) {
+ SDE_ERROR("map rsv mem failed: %d\n", ret);
+ msm_gem_address_space_put(aspace);
+ goto fail;
+ }
+ }
}
return 0;
@@ -1141,6 +1170,7 @@ static int sde_kms_hw_init(struct msm_kms *kms)
struct sde_kms *sde_kms;
struct drm_device *dev;
struct msm_drm_private *priv;
+ struct sde_splash_info *sinfo;
int i, rc = -EINVAL;
if (!kms) {
@@ -1230,6 +1260,33 @@ static int sde_kms_hw_init(struct msm_kms *kms)
goto power_error;
}
+ rc = sde_splash_parse_dt(dev);
+ if (rc) {
+ SDE_ERROR("parse dt for splash info failed: %d\n", rc);
+ goto power_error;
+ }
+
+ /*
+ * Read the DISP_INTF_SEL register to check
+ * whether early display is enabled in LK.
+ */
+ rc = sde_splash_get_handoff_status(kms);
+ if (rc) {
+ SDE_ERROR("get early splash status failed: %d\n", rc);
+ goto power_error;
+ }
+
+ /*
+ * when LK has enabled early display, sde_splash_init should be
+ * called first. This function will first do bandwidth voting job
+ * because display hardware is accessing AHB data bus, otherwise
+ * device reboot will happen. Second is to check if the memory is
+ * reserved.
+ */
+ sinfo = &sde_kms->splash_info;
+ if (sinfo->handoff)
+ sde_splash_init(&priv->phandle, kms);
+
for (i = 0; i < sde_kms->catalog->vbif_count; i++) {
u32 vbif_idx = sde_kms->catalog->vbif[i].id;
@@ -1304,7 +1361,10 @@ static int sde_kms_hw_init(struct msm_kms *kms)
*/
dev->mode_config.allow_fb_modifiers = true;
- sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
+ if (!sde_kms->splash_info.handoff)
+ sde_power_resource_enable(&priv->phandle,
+ sde_kms->core_client, false);
+
return 0;
drm_obj_init_err:
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.h b/drivers/gpu/drm/msm/sde/sde_kms.h
index 44f6be959ac9..d929e48a3fe8 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.h
+++ b/drivers/gpu/drm/msm/sde/sde_kms.h
@@ -34,6 +34,7 @@
#include "sde_power_handle.h"
#include "sde_irq.h"
#include "sde_core_perf.h"
+#include "sde_splash.h"
#define DRMID(x) ((x) ? (x)->base.id : -1)
@@ -157,6 +158,9 @@ struct sde_kms {
bool has_danger_ctrl;
void **hdmi_displays;
int hdmi_display_count;
+
+ /* splash handoff structure */
+ struct sde_splash_info splash_info;
};
struct vsync_info {
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
index 6fe1d1629d22..6e2ccfa8e428 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -34,6 +34,14 @@
#include "sde_plane.h"
#include "sde_color_processing.h"
+static bool suspend_blank = true;
+module_param(suspend_blank, bool, 0400);
+MODULE_PARM_DESC(suspend_blank,
+ "If set, active planes will force their outputs to black,\n"
+ "by temporarily enabling the color fill, when recovering\n"
+ "from a system resume instead of attempting to display the\n"
+ "last provided frame buffer.");
+
#define SDE_DEBUG_PLANE(pl, fmt, ...) SDE_DEBUG("plane%d " fmt,\
(pl) ? (pl)->base.base.id : -1, ##__VA_ARGS__)
@@ -138,6 +146,7 @@ struct sde_plane {
struct sde_debugfs_regset32 debugfs_src;
struct sde_debugfs_regset32 debugfs_scaler;
struct sde_debugfs_regset32 debugfs_csc;
+ bool debugfs_default_scale;
};
#define to_sde_plane(x) container_of(x, struct sde_plane, base)
@@ -686,9 +695,20 @@ static int _sde_plane_setup_scaler3_lut(struct sde_phy_plane *pp,
struct sde_plane_state *pstate)
{
struct sde_plane *psde = pp->sde_plane;
- struct sde_hw_scaler3_cfg *cfg = pp->scaler3_cfg;
+ struct sde_hw_scaler3_cfg *cfg;
int ret = 0;
+ if (!pp || !pp->scaler3_cfg) {
+ SDE_ERROR("invalid args\n");
+ return -EINVAL;
+ } else if (!pstate) {
+ /* pstate is expected to be null on forced color fill */
+ SDE_DEBUG("null pstate\n");
+ return -EINVAL;
+ }
+
+ cfg = pp->scaler3_cfg;
+
cfg->dir_lut = msm_property_get_blob(
&psde->property_info,
pstate->property_blobs, &cfg->dir_len,
@@ -723,6 +743,7 @@ static void _sde_plane_setup_scaler3(struct sde_phy_plane *pp,
}
memset(scale_cfg, 0, sizeof(*scale_cfg));
+ memset(&pp->pixel_ext, 0, sizeof(struct sde_hw_pixel_ext));
decimated = DECIMATED_DIMENSION(src_w,
pp->pipe_cfg.horz_decimation);
@@ -1070,7 +1091,8 @@ static void _sde_plane_setup_scaler(struct sde_phy_plane *pp,
int error;
error = _sde_plane_setup_scaler3_lut(pp, pstate);
- if (error || !pp->pixel_ext_usr) {
+ if (error || !pp->pixel_ext_usr ||
+ psde->debugfs_default_scale) {
memset(pe, 0, sizeof(struct sde_hw_pixel_ext));
/* calculate default config for QSEED3 */
_sde_plane_setup_scaler3(pp,
@@ -1081,7 +1103,8 @@ static void _sde_plane_setup_scaler(struct sde_phy_plane *pp,
pp->scaler3_cfg, fmt,
chroma_subsmpl_h, chroma_subsmpl_v);
}
- } else if (!pp->pixel_ext_usr) {
+ } else if (!pp->pixel_ext_usr || !pstate ||
+ psde->debugfs_default_scale) {
uint32_t deci_dim, i;
/* calculate default configuration for QSEED2 */
@@ -1701,8 +1724,8 @@ void sde_plane_flush(struct drm_plane *plane)
*/
list_for_each_entry(pp, &psde->phy_plane_head, phy_plane_list) {
if (psde->is_error)
- /* force white frame with 0% alpha pipe output on error */
- _sde_plane_color_fill(pp, 0xFFFFFF, 0x0);
+ /* force white frame with 100% alpha pipe output on error */
+ _sde_plane_color_fill(pp, 0xFFFFFF, 0xFF);
else if (pp->color_fill & SDE_PLANE_COLOR_FILL_FLAG)
/* force 100% alpha */
_sde_plane_color_fill(pp, pp->color_fill, 0xFF);
@@ -1711,6 +1734,10 @@ void sde_plane_flush(struct drm_plane *plane)
pp->pipe_hw->ops.setup_csc(pp->pipe_hw, pp->csc_ptr);
}
+ /* force black color fill during suspend */
+ if (msm_is_suspend_state(plane->dev) && suspend_blank)
+ _sde_plane_color_fill(pp, 0x0, 0x0);
+
/* flag h/w flush complete */
if (plane->state)
to_sde_plane_state(plane->state)->pending = false;
@@ -2582,6 +2609,10 @@ static void _sde_plane_init_debugfs(struct sde_plane *psde,
sde_debugfs_create_regset32("scaler_blk", S_IRUGO,
psde->debugfs_root,
&psde->debugfs_scaler);
+ debugfs_create_bool("default_scaling",
+ 0644,
+ psde->debugfs_root,
+ &psde->debugfs_default_scale);
sde_debugfs_setup_regset32(&psde->debugfs_csc,
sblk->csc_blk.base + cfg->base,
diff --git a/drivers/gpu/drm/msm/sde/sde_splash.c b/drivers/gpu/drm/msm/sde/sde_splash.c
new file mode 100644
index 000000000000..79989f3ac1e9
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_splash.c
@@ -0,0 +1,636 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/of_address.h>
+#include <linux/debugfs.h>
+#include <linux/memblock.h>
+
+#include "msm_drv.h"
+#include "msm_mmu.h"
+#include "sde_kms.h"
+#include "sde_hw_mdss.h"
+#include "sde_hw_util.h"
+#include "sde_hw_intf.h"
+#include "sde_hw_catalog.h"
+
+#define MDP_SSPP_TOP0_OFF 0x1000
+#define DISP_INTF_SEL 0x004
+#define SPLIT_DISPLAY_EN 0x2F4
+
+/* scratch registers */
+#define SCRATCH_REGISTER_0 0x014
+#define SCRATCH_REGISTER_1 0x018
+#define SCRATCH_REGISTER_2 0x01C
+
+#define SDE_LK_RUNNING_VALUE 0xC001CAFE
+#define SDE_LK_SHUT_DOWN_VALUE 0xDEADDEAD
+#define SDE_LK_EXIT_VALUE 0xDEADBEEF
+
+#define SDE_LK_EXIT_MAX_LOOP 20
+/*
+ * In order to free reseved memory from bootup, and we are not
+ * able to call the __init free functions, so we need to free
+ * this memory by ourselves using the free_reserved_page() function.
+ */
+static void _sde_splash_free_bootup_memory_to_system(phys_addr_t phys,
+ size_t size)
+{
+ unsigned long pfn_start, pfn_end, pfn_idx;
+
+ memblock_free(phys, size);
+
+ pfn_start = phys >> PAGE_SHIFT;
+ pfn_end = (phys + size) >> PAGE_SHIFT;
+
+ for (pfn_idx = pfn_start; pfn_idx < pfn_end; pfn_idx++)
+ free_reserved_page(pfn_to_page(pfn_idx));
+}
+
+static int _sde_splash_parse_dt_get_lk_pool_node(struct drm_device *dev,
+ struct sde_splash_info *sinfo)
+{
+ struct device_node *parent, *node;
+ struct resource r;
+ int ret = 0;
+
+ if (!sinfo)
+ return -EINVAL;
+
+ parent = of_find_node_by_path("/reserved-memory");
+ if (!parent)
+ return -EINVAL;
+
+ node = of_find_node_by_name(parent, "lk_pool");
+ if (!node) {
+ SDE_ERROR("mem reservation for lk_pool is not presented\n");
+ ret = -EINVAL;
+ goto parent_node_err;
+ }
+
+ /* find the mode */
+ if (of_address_to_resource(node, 0, &r)) {
+ ret = -EINVAL;
+ goto child_node_err;
+ }
+
+ sinfo->lk_pool_paddr = (dma_addr_t)r.start;
+ sinfo->lk_pool_size = r.end - r.start;
+
+ DRM_INFO("lk_pool: addr:%pK, size:%pK\n",
+ (void *)sinfo->lk_pool_paddr,
+ (void *)sinfo->lk_pool_size);
+
+child_node_err:
+ of_node_put(node);
+
+parent_node_err:
+ of_node_put(parent);
+
+ return ret;
+}
+
+static int _sde_splash_parse_dt_get_display_node(struct drm_device *dev,
+ struct sde_splash_info *sinfo)
+{
+ unsigned long size = 0;
+ dma_addr_t start;
+ struct device_node *node;
+ int ret = 0, i = 0, len = 0;
+
+ /* get reserved memory for display module */
+ if (of_get_property(dev->dev->of_node, "contiguous-region", &len))
+ sinfo->splash_mem_num = len / sizeof(u32);
+ else
+ sinfo->splash_mem_num = 0;
+
+ sinfo->splash_mem_paddr =
+ kmalloc(sizeof(phys_addr_t) * sinfo->splash_mem_num,
+ GFP_KERNEL);
+ if (!sinfo->splash_mem_paddr) {
+ SDE_ERROR("alloc splash_mem_paddr failed\n");
+ return -ENOMEM;
+ }
+
+ sinfo->splash_mem_size =
+ kmalloc(sizeof(size_t) * sinfo->splash_mem_num,
+ GFP_KERNEL);
+ if (!sinfo->splash_mem_size) {
+ SDE_ERROR("alloc splash_mem_size failed\n");
+ goto error;
+ }
+
+ sinfo->obj = kmalloc(sizeof(struct drm_gem_object *) *
+ sinfo->splash_mem_num, GFP_KERNEL);
+ if (!sinfo->obj) {
+ SDE_ERROR("construct splash gem objects failed\n");
+ goto error;
+ }
+
+ for (i = 0; i < sinfo->splash_mem_num; i++) {
+ node = of_parse_phandle(dev->dev->of_node,
+ "contiguous-region", i);
+
+ if (node) {
+ struct resource r;
+
+ ret = of_address_to_resource(node, 0, &r);
+ if (ret)
+ return ret;
+
+ size = r.end - r.start;
+ start = (dma_addr_t)r.start;
+
+ sinfo->splash_mem_paddr[i] = start;
+ sinfo->splash_mem_size[i] = size;
+
+ DRM_INFO("blk: %d, addr:%pK, size:%pK\n",
+ i, (void *)sinfo->splash_mem_paddr[i],
+ (void *)sinfo->splash_mem_size[i]);
+
+ of_node_put(node);
+ }
+ }
+
+ return ret;
+
+error:
+ kfree(sinfo->splash_mem_paddr);
+ sinfo->splash_mem_paddr = NULL;
+
+ kfree(sinfo->splash_mem_size);
+ sinfo->splash_mem_size = NULL;
+
+ return -ENOMEM;
+}
+
+static bool _sde_splash_lk_check(struct sde_hw_intr *intr)
+{
+ return (SDE_LK_RUNNING_VALUE == SDE_REG_READ(&intr->hw,
+ SCRATCH_REGISTER_1)) ? true : false;
+}
+
+/**
+ * _sde_splash_notify_lk_to_exit.
+ *
+ * Function to monitor LK's status and tell it to exit.
+ */
+static void _sde_splash_notify_lk_exit(struct sde_hw_intr *intr)
+{
+ int i = 0;
+
+ /* first is to write exit signal to scratch register*/
+ SDE_REG_WRITE(&intr->hw, SCRATCH_REGISTER_1, SDE_LK_SHUT_DOWN_VALUE);
+
+ while ((SDE_LK_EXIT_VALUE !=
+ SDE_REG_READ(&intr->hw, SCRATCH_REGISTER_1)) &&
+ (++i < SDE_LK_EXIT_MAX_LOOP)) {
+ DRM_INFO("wait for LK's exit");
+ msleep(20);
+ }
+
+ if (i == SDE_LK_EXIT_MAX_LOOP)
+ SDE_ERROR("Loop LK's exit failed\n");
+}
+
+static int _sde_splash_gem_new(struct drm_device *dev,
+ struct sde_splash_info *sinfo)
+{
+ int i, ret;
+
+ for (i = 0; i < sinfo->splash_mem_num; i++) {
+ sinfo->obj[i] = msm_gem_new(dev,
+ sinfo->splash_mem_size[i], MSM_BO_UNCACHED);
+
+ if (IS_ERR(sinfo->obj[i])) {
+ ret = PTR_ERR(sinfo->obj[i]);
+ SDE_ERROR("failed to allocate gem, ret=%d\n", ret);
+ goto error;
+ }
+ }
+
+ return 0;
+
+error:
+ for (i = 0; i < sinfo->splash_mem_num; i++) {
+ if (sinfo->obj[i])
+ msm_gem_free_object(sinfo->obj[i]);
+ sinfo->obj[i] = NULL;
+ }
+
+ return ret;
+}
+
+static int _sde_splash_get_pages(struct drm_gem_object *obj, phys_addr_t phys)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct page **p;
+ dma_addr_t paddr;
+ int npages = obj->size >> PAGE_SHIFT;
+ int i;
+
+ p = drm_malloc_ab(npages, sizeof(struct page *));
+ if (!p)
+ return -ENOMEM;
+
+ paddr = phys;
+
+ for (i = 0; i < npages; i++) {
+ p[i] = phys_to_page(paddr);
+ paddr += PAGE_SIZE;
+ }
+
+ msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
+ if (IS_ERR(msm_obj->sgt)) {
+ SDE_ERROR("failed to allocate sgt\n");
+ return -ENOMEM;
+ }
+
+ msm_obj->pages = p;
+
+ return 0;
+}
+
+static void _sde_splash_destroy_gem_object(struct msm_gem_object *msm_obj)
+{
+ if (msm_obj->pages) {
+ sg_free_table(msm_obj->sgt);
+ kfree(msm_obj->sgt);
+ drm_free_large(msm_obj->pages);
+ msm_obj->pages = NULL;
+ }
+}
+
+static void _sde_splash_destroy_splash_node(struct sde_splash_info *sinfo)
+{
+ kfree(sinfo->splash_mem_paddr);
+ sinfo->splash_mem_paddr = NULL;
+
+ kfree(sinfo->splash_mem_size);
+ sinfo->splash_mem_size = NULL;
+}
+
+static int _sde_splash_free_resource(struct msm_mmu *mmu,
+ struct sde_splash_info *sinfo, enum splash_connector_type conn)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(sinfo->obj[conn]);
+
+ if (!msm_obj)
+ return -EINVAL;
+
+ if (mmu->funcs && mmu->funcs->unmap)
+ mmu->funcs->unmap(mmu, sinfo->splash_mem_paddr[conn],
+ msm_obj->sgt, NULL);
+
+ _sde_splash_free_bootup_memory_to_system(sinfo->splash_mem_paddr[conn],
+ sinfo->splash_mem_size[conn]);
+
+ _sde_splash_destroy_gem_object(msm_obj);
+
+ return 0;
+}
+
+__ref int sde_splash_init(struct sde_power_handle *phandle, struct msm_kms *kms)
+{
+ struct sde_kms *sde_kms;
+ struct sde_splash_info *sinfo;
+ int i = 0;
+
+ if (!phandle || !kms) {
+ SDE_ERROR("invalid phandle/kms\n");
+ return -EINVAL;
+ }
+
+ sde_kms = to_sde_kms(kms);
+ sinfo = &sde_kms->splash_info;
+
+ sinfo->dsi_connector_cnt = 0;
+ sinfo->hdmi_connector_cnt = 0;
+
+ sde_power_data_bus_bandwidth_ctrl(phandle,
+ sde_kms->core_client, true);
+
+ for (i = 0; i < sinfo->splash_mem_num; i++) {
+ if (!memblock_is_reserved(sinfo->splash_mem_paddr[i])) {
+ SDE_ERROR("failed to reserve memory\n");
+
+ /* withdraw the vote when failed. */
+ sde_power_data_bus_bandwidth_ctrl(phandle,
+ sde_kms->core_client, false);
+
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+void sde_splash_destroy(struct sde_splash_info *sinfo,
+ struct sde_power_handle *phandle,
+ struct sde_power_client *pclient)
+{
+ struct msm_gem_object *msm_obj;
+ int i = 0;
+
+ if (!sinfo || !phandle || !pclient) {
+ SDE_ERROR("invalid sde_kms/phandle/pclient\n");
+ return;
+ }
+
+ for (i = 0; i < sinfo->splash_mem_num; i++) {
+ msm_obj = to_msm_bo(sinfo->obj[i]);
+
+ if (msm_obj)
+ _sde_splash_destroy_gem_object(msm_obj);
+ }
+
+ sde_power_data_bus_bandwidth_ctrl(phandle, pclient, false);
+
+ _sde_splash_destroy_splash_node(sinfo);
+}
+
+/*
+ * sde_splash_parse_dt.
+ * In the function, it will parse and reserve two kinds of memory node.
+ * First is to get the reserved memory for display buffers.
+ * Second is to get the memory node LK's code stack is running on.
+ */
+int sde_splash_parse_dt(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct sde_kms *sde_kms;
+ struct sde_splash_info *sinfo;
+
+ if (!priv || !priv->kms) {
+ SDE_ERROR("Invalid kms\n");
+ return -EINVAL;
+ }
+
+ sde_kms = to_sde_kms(priv->kms);
+ sinfo = &sde_kms->splash_info;
+
+ if (_sde_splash_parse_dt_get_display_node(dev, sinfo)) {
+ SDE_ERROR("get display node failed\n");
+ return -EINVAL;
+ }
+
+ if (_sde_splash_parse_dt_get_lk_pool_node(dev, sinfo)) {
+ SDE_ERROR("get LK pool node failed\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int sde_splash_get_handoff_status(struct msm_kms *kms)
+{
+ uint32_t intf_sel = 0;
+ uint32_t split_display = 0;
+ uint32_t num_of_display_on = 0;
+ uint32_t i = 0;
+ struct sde_kms *sde_kms = to_sde_kms(kms);
+ struct sde_rm *rm;
+ struct sde_hw_blk_reg_map *c;
+ struct sde_splash_info *sinfo;
+ struct sde_mdss_cfg *catalog;
+
+ sinfo = &sde_kms->splash_info;
+ if (!sinfo) {
+ SDE_ERROR("%s(%d): invalid splash info\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ rm = &sde_kms->rm;
+
+ if (!rm || !rm->hw_mdp) {
+ SDE_ERROR("invalid rm.\n");
+ return -EINVAL;
+ }
+
+ c = &rm->hw_mdp->hw;
+ if (c) {
+ intf_sel = SDE_REG_READ(c, DISP_INTF_SEL);
+ split_display = SDE_REG_READ(c, SPLIT_DISPLAY_EN);
+ }
+
+ catalog = sde_kms->catalog;
+
+ if (intf_sel != 0) {
+ for (i = 0; i < catalog->intf_count; i++)
+ if ((intf_sel >> i*8) & 0x000000FF)
+ num_of_display_on++;
+
+ /*
+ * For split display enabled - DSI0, DSI1 interfaces are
+ * considered as single display. So decrement
+ * 'num_of_display_on' by 1
+ */
+ if (split_display)
+ num_of_display_on--;
+ }
+
+ if (num_of_display_on) {
+ sinfo->handoff = true;
+ sinfo->program_scratch_regs = true;
+ } else {
+ sinfo->handoff = false;
+ sinfo->program_scratch_regs = false;
+ }
+
+ sinfo->lk_is_exited = false;
+
+ return 0;
+}
+
+int sde_splash_smmu_map(struct drm_device *dev, struct msm_mmu *mmu,
+ struct sde_splash_info *sinfo)
+{
+ struct msm_gem_object *msm_obj;
+ int i = 0, ret = 0;
+
+ if (!mmu || !sinfo)
+ return -EINVAL;
+
+ /* first is to construct drm_gem_objects for splash memory */
+ if (_sde_splash_gem_new(dev, sinfo))
+ return -ENOMEM;
+
+ /* second is to contruct sgt table for calling smmu map */
+ for (i = 0; i < sinfo->splash_mem_num; i++) {
+ if (_sde_splash_get_pages(sinfo->obj[i],
+ sinfo->splash_mem_paddr[i]))
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < sinfo->splash_mem_num; i++) {
+ msm_obj = to_msm_bo(sinfo->obj[i]);
+
+ if (mmu->funcs && mmu->funcs->map) {
+ ret = mmu->funcs->map(mmu, sinfo->splash_mem_paddr[i],
+ msm_obj->sgt, IOMMU_READ | IOMMU_NOEXEC, NULL);
+
+ if (!ret) {
+ SDE_ERROR("Map blk %d @%pK failed.\n",
+ i, (void *)sinfo->splash_mem_paddr[i]);
+ return ret;
+ }
+ }
+ }
+
+ return ret ? 0 : -ENOMEM;
+}
+
+void sde_splash_setup_connector_count(struct sde_splash_info *sinfo,
+ int connector_type)
+{
+ switch (connector_type) {
+ case DRM_MODE_CONNECTOR_HDMIA:
+ sinfo->hdmi_connector_cnt++;
+ break;
+ case DRM_MODE_CONNECTOR_DSI:
+ sinfo->dsi_connector_cnt++;
+ break;
+ default:
+ SDE_ERROR("invalid connector_type %d\n", connector_type);
+ }
+}
+
+int sde_splash_clean_up_free_resource(struct msm_kms *kms,
+ struct sde_power_handle *phandle, int connector_type)
+{
+ struct sde_kms *sde_kms;
+ struct sde_splash_info *sinfo;
+ struct msm_mmu *mmu;
+ int ret = 0;
+ static bool hdmi_is_released;
+ static bool dsi_is_released;
+
+ if (!phandle || !kms) {
+ SDE_ERROR("invalid phandle/kms.\n");
+ return -EINVAL;
+ }
+
+ sde_kms = to_sde_kms(kms);
+ sinfo = &sde_kms->splash_info;
+ if (!sinfo) {
+ SDE_ERROR("%s(%d): invalid splash info\n", __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ /* When both hdmi's and dsi's resource are freed,
+ * 1. Destroy splash node objects.
+ * 2. Release the memory which LK's stack is running on.
+ * 3. Withdraw AHB data bus bandwidth voting.
+ */
+ if (sinfo->hdmi_connector_cnt == 0 &&
+ sinfo->dsi_connector_cnt == 0) {
+ DRM_INFO("HDMI and DSI resource handoff is completed\n");
+
+ sinfo->lk_is_exited = false;
+
+ _sde_splash_destroy_splash_node(sinfo);
+
+ _sde_splash_free_bootup_memory_to_system(sinfo->lk_pool_paddr,
+ sinfo->lk_pool_size);
+
+ sde_power_data_bus_bandwidth_ctrl(phandle,
+ sde_kms->core_client, false);
+
+ return 0;
+ }
+
+ mmu = sde_kms->aspace[0]->mmu;
+
+ switch (connector_type) {
+ case DRM_MODE_CONNECTOR_HDMIA:
+ if (!hdmi_is_released)
+ sinfo->hdmi_connector_cnt--;
+
+ if ((sinfo->hdmi_connector_cnt == 0) && (!hdmi_is_released)) {
+ hdmi_is_released = true;
+
+ ret = _sde_splash_free_resource(mmu,
+ sinfo, SPLASH_HDMI);
+ }
+ break;
+ case DRM_MODE_CONNECTOR_DSI:
+ if (!dsi_is_released)
+ sinfo->dsi_connector_cnt--;
+
+ if ((sinfo->dsi_connector_cnt == 0) && (!dsi_is_released)) {
+ dsi_is_released = true;
+
+ ret = _sde_splash_free_resource(mmu,
+ sinfo, SPLASH_DSI);
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ SDE_ERROR("%s: invalid connector_type %d\n",
+ __func__, connector_type);
+ }
+
+ return ret;
+}
+
+/*
+ * In below function, it will
+ * 1. Notify LK to exit and wait for exiting is done.
+ * 2. Set DOMAIN_ATTR_EARLY_MAP to 1 to enable stage 1 translation in iommu.
+ */
+int sde_splash_clean_up_exit_lk(struct msm_kms *kms)
+{
+ struct sde_splash_info *sinfo;
+ struct msm_mmu *mmu;
+ struct sde_kms *sde_kms = to_sde_kms(kms);
+ int ret;
+
+ sinfo = &sde_kms->splash_info;
+
+ if (!sinfo) {
+ SDE_ERROR("%s(%d): invalid splash info\n", __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ /* Monitor LK's status and tell it to exit. */
+ if (sinfo->program_scratch_regs) {
+ if (_sde_splash_lk_check(sde_kms->hw_intr))
+ _sde_splash_notify_lk_exit(sde_kms->hw_intr);
+
+ sinfo->handoff = false;
+ sinfo->program_scratch_regs = false;
+ }
+
+ if (!sde_kms->aspace[0] || !sde_kms->aspace[0]->mmu) {
+ /* We do not return fault value here, to ensure
+ * flag "lk_is_exited" is set.
+ */
+ SDE_ERROR("invalid mmu\n");
+ WARN_ON(1);
+ } else {
+ mmu = sde_kms->aspace[0]->mmu;
+ /* After LK has exited, set early domain map attribute
+ * to 1 to enable stage 1 translation in iommu driver.
+ */
+ if (mmu->funcs && mmu->funcs->set_property) {
+ ret = mmu->funcs->set_property(mmu,
+ DOMAIN_ATTR_EARLY_MAP, &sinfo->handoff);
+
+ if (ret)
+ SDE_ERROR("set_property failed\n");
+ }
+ }
+
+ sinfo->lk_is_exited = true;
+ return 0;
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_splash.h b/drivers/gpu/drm/msm/sde/sde_splash.h
new file mode 100644
index 000000000000..47965693f0b8
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_splash.h
@@ -0,0 +1,124 @@
+/**
+ * Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef SDE_SPLASH_H_
+#define SDE_SPLASH_H_
+
+#include "msm_kms.h"
+#include "msm_mmu.h"
+
+enum splash_connector_type {
+ SPLASH_DSI = 0,
+ SPLASH_HDMI,
+};
+
+struct sde_splash_info {
+ /* handoff flag */
+ bool handoff;
+
+ /* flag of display scratch registers */
+ bool program_scratch_regs;
+
+ /* to indicate LK is totally exited */
+ bool lk_is_exited;
+
+ /* memory node used for display buffer */
+ uint32_t splash_mem_num;
+
+ /* physical address of memory node for display buffer */
+ phys_addr_t *splash_mem_paddr;
+
+ /* size of memory node */
+ size_t *splash_mem_size;
+
+ /* constructed gem objects for smmu mapping */
+ struct drm_gem_object **obj;
+
+ /* physical address of lk pool */
+ phys_addr_t lk_pool_paddr;
+
+ /* memory size of lk pool */
+ size_t lk_pool_size;
+
+ /* registered hdmi connector count */
+ uint32_t hdmi_connector_cnt;
+
+ /* registered dst connector count */
+ uint32_t dsi_connector_cnt;
+};
+
+/* APIs for early splash handoff functions */
+
+/**
+ * sde_splash_get_handoff_status.
+ *
+ * This function will read DISP_INTF_SEL regsiter to get
+ * the status of early splash.
+ */
+int sde_splash_get_handoff_status(struct msm_kms *kms);
+
+/**
+ * sde_splash_init
+ *
+ * This function will do bandwidth vote and reserved memory
+ */
+int sde_splash_init(struct sde_power_handle *phandle, struct msm_kms *kms);
+
+/**
+ *sde_splash_setup_connector_count
+ *
+ * To count connector numbers for DSI and HDMI respectively.
+ */
+void sde_splash_setup_connector_count(struct sde_splash_info *sinfo,
+ int connector_type);
+
+/**
+ * sde_splash_clean_up_exit_lk.
+ *
+ * Tell LK to exit, and clean up the resource.
+ */
+int sde_splash_clean_up_exit_lk(struct msm_kms *kms);
+
+/**
+ * sde_splash_clean_up_free_resource.
+ *
+ * According to input connector_type, free
+ * HDMI's and DSI's resource respectively.
+ */
+int sde_splash_clean_up_free_resource(struct msm_kms *kms,
+ struct sde_power_handle *phandle, int connector_type);
+
+/**
+ * sde_splash_parse_dt.
+ *
+ * Parse reserved memory block from DT for early splash.
+ */
+int sde_splash_parse_dt(struct drm_device *dev);
+
+/**
+ * sde_splash_smmu_map.
+ *
+ * Map the physical memory LK visited into iommu driver.
+ */
+int sde_splash_smmu_map(struct drm_device *dev, struct msm_mmu *mmu,
+ struct sde_splash_info *sinfo);
+
+/**
+ * sde_splash_destroy
+ *
+ * Destroy the resource in failed case.
+ */
+void sde_splash_destroy(struct sde_splash_info *sinfo,
+ struct sde_power_handle *phandle,
+ struct sde_power_client *pclient);
+
+#endif
diff --git a/drivers/gpu/drm/msm/sde_edid_parser.c b/drivers/gpu/drm/msm/sde_edid_parser.c
index ca9229ede251..68246253bb70 100644
--- a/drivers/gpu/drm/msm/sde_edid_parser.c
+++ b/drivers/gpu/drm/msm/sde_edid_parser.c
@@ -229,10 +229,17 @@ u32 video_format)
{
u8 cea_mode = 0;
struct drm_display_mode *mode;
+ u32 mode_fmt_flags = 0;
/* Need to add Y420 support flag to the modes */
list_for_each_entry(mode, &connector->probed_modes, head) {
+ /* Cache the format flags before clearing */
+ mode_fmt_flags = mode->flags;
+ /* Clear the RGB/YUV format flags before calling upstream API */
+ mode->flags &= ~SDE_DRM_MODE_FLAG_FMT_MASK;
cea_mode = drm_match_cea_mode(mode);
+ /* Restore the format flags */
+ mode->flags = mode_fmt_flags;
if ((cea_mode != 0) && (cea_mode == video_format)) {
SDE_EDID_DEBUG("%s found match for %d ", __func__,
video_format);
@@ -246,7 +253,7 @@ struct drm_connector *connector, struct sde_edid_ctrl *edid_ctrl,
const u8 *db)
{
u32 offset = 0;
- u8 len = 0;
+ u8 cmdb_len = 0;
u8 svd_len = 0;
const u8 *svd = NULL;
u32 i = 0, j = 0;
@@ -262,10 +269,8 @@ const u8 *db)
return;
}
SDE_EDID_DEBUG("%s +\n", __func__);
- len = db[0] & 0x1f;
+ cmdb_len = db[0] & 0x1f;
- if (len < 7)
- return;
/* Byte 3 to L+1 contain SVDs */
offset += 2;
@@ -273,20 +278,24 @@ const u8 *db)
if (svd) {
/*moving to the next byte as vic info begins there*/
- ++svd;
svd_len = svd[0] & 0x1f;
+ ++svd;
}
for (i = 0; i < svd_len; i++, j++) {
- video_format = *svd & 0x7F;
- if (db[offset] & (1 << j))
+ video_format = *(svd + i) & 0x7F;
+ if (cmdb_len == 1) {
+ /* If cmdb_len is 1, it means all SVDs support YUV */
+ sde_edid_set_y420_support(connector, video_format);
+ } else if (db[offset] & (1 << j)) {
sde_edid_set_y420_support(connector, video_format);
- if (j & 0x80) {
- j = j/8;
- offset++;
- if (offset >= len)
- break;
+ if (j & 0x80) {
+ j = j/8;
+ offset++;
+ if (offset >= cmdb_len)
+ break;
+ }
}
}
diff --git a/drivers/gpu/drm/msm/sde_edid_parser.h b/drivers/gpu/drm/msm/sde_edid_parser.h
index 1143dc2c7bec..59e3dceca33c 100644
--- a/drivers/gpu/drm/msm/sde_edid_parser.h
+++ b/drivers/gpu/drm/msm/sde_edid_parser.h
@@ -33,6 +33,8 @@
#define SDE_CEA_EXT 0x02
#define SDE_EXTENDED_TAG 0x07
+#define SDE_DRM_MODE_FLAG_FMT_MASK (0x3 << 20)
+
enum extended_data_block_types {
VIDEO_CAPABILITY_DATA_BLOCK = 0x0,
VENDOR_SPECIFIC_VIDEO_DATA_BLOCK = 0x01,
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index a9b01bcf7d0a..fcecaf5b5526 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -3394,6 +3394,13 @@ void radeon_combios_asic_init(struct drm_device *dev)
rdev->pdev->subsystem_vendor == 0x103c &&
rdev->pdev->subsystem_device == 0x280a)
return;
+ /* quirk for rs4xx Toshiba Sattellite L20-183 latop to make it resume
+ * - it hangs on resume inside the dynclk 1 table.
+ */
+ if (rdev->family == CHIP_RS400 &&
+ rdev->pdev->subsystem_vendor == 0x1179 &&
+ rdev->pdev->subsystem_device == 0xff31)
+ return;
/* DYN CLK 1 */
table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 4aa2cbe4c85f..a77521695c9a 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -127,6 +127,10 @@ static struct radeon_px_quirk radeon_px_quirk_list[] = {
* https://bugzilla.kernel.org/show_bug.cgi?id=51381
*/
{ PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
+ /* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
+ * https://bugs.freedesktop.org/show_bug.cgi?id=101491
+ */
+ { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
/* macbook pro 8.2 */
{ PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP },
{ 0, 0, 0, 0, 0 },
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
index 13db8a2851ed..1f013d45c9e9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
@@ -321,6 +321,7 @@ void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man)
list_for_each_entry_safe(entry, next, &man->list, head)
vmw_cmdbuf_res_free(man, entry);
+ drm_ht_remove(&man->resources);
kfree(man);
}
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 990c9bca5127..afb489f10172 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -2210,21 +2210,23 @@ static int kgsl_setup_dmabuf_useraddr(struct kgsl_device *device,
if (fd != 0)
dmabuf = dma_buf_get(fd - 1);
}
- up_read(&current->mm->mmap_sem);
- if (IS_ERR_OR_NULL(dmabuf))
+ if (IS_ERR_OR_NULL(dmabuf)) {
+ up_read(&current->mm->mmap_sem);
return dmabuf ? PTR_ERR(dmabuf) : -ENODEV;
+ }
ret = kgsl_setup_dma_buf(device, pagetable, entry, dmabuf);
if (ret) {
dma_buf_put(dmabuf);
+ up_read(&current->mm->mmap_sem);
return ret;
}
/* Setup the user addr/cache mode for cache operations */
entry->memdesc.useraddr = hostptr;
_setup_cache_mode(entry, vma);
-
+ up_read(&current->mm->mmap_sem);
return 0;
}
#else
diff --git a/drivers/gpu/msm/kgsl_drawobj.c b/drivers/gpu/msm/kgsl_drawobj.c
index f8f0e7ccb0d3..8dc2ebd26cf9 100644
--- a/drivers/gpu/msm/kgsl_drawobj.c
+++ b/drivers/gpu/msm/kgsl_drawobj.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -348,7 +348,13 @@ static int drawobj_add_sync_fence(struct kgsl_device *device,
struct kgsl_cmd_syncpoint_fence *sync = priv;
struct kgsl_drawobj *drawobj = DRAWOBJ(syncobj);
struct kgsl_drawobj_sync_event *event;
+ struct sync_fence *fence = NULL;
unsigned int id;
+ int ret = 0;
+
+ fence = sync_fence_fdget(sync->fd);
+ if (fence == NULL)
+ return -EINVAL;
kref_get(&drawobj->refcount);
@@ -364,11 +370,13 @@ static int drawobj_add_sync_fence(struct kgsl_device *device,
set_bit(event->id, &syncobj->pending);
+ trace_syncpoint_fence(syncobj, fence->name);
+
event->handle = kgsl_sync_fence_async_wait(sync->fd,
drawobj_sync_fence_func, event);
if (IS_ERR_OR_NULL(event->handle)) {
- int ret = PTR_ERR(event->handle);
+ ret = PTR_ERR(event->handle);
clear_bit(event->id, &syncobj->pending);
event->handle = NULL;
@@ -376,18 +384,16 @@ static int drawobj_add_sync_fence(struct kgsl_device *device,
drawobj_put(drawobj);
/*
- * If ret == 0 the fence was already signaled - print a trace
- * message so we can track that
+ * Print a syncpoint_fence_expire trace if
+ * the fence is already signaled or there is
+ * a failure in registering the fence waiter.
*/
- if (ret == 0)
- trace_syncpoint_fence_expire(syncobj, "signaled");
-
- return ret;
+ trace_syncpoint_fence_expire(syncobj, (ret < 0) ?
+ "error" : fence->name);
}
- trace_syncpoint_fence(syncobj, event->handle->name);
-
- return 0;
+ sync_fence_put(fence);
+ return ret;
}
/* drawobj_add_sync_timestamp() - Add a new sync point for a sync obj
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index 63315e86d172..ad8b0131bb46 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -1022,6 +1022,8 @@ static void __force_on(struct kgsl_device *device, int flag, int on)
if (on) {
switch (flag) {
case KGSL_PWRFLAGS_CLK_ON:
+ /* make sure pwrrail is ON before enabling clocks */
+ kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_ON);
kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON,
KGSL_STATE_ACTIVE);
break;
@@ -1817,7 +1819,12 @@ static int kgsl_pwrctrl_pwrrail(struct kgsl_device *device, int state)
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
int status = 0;
- if (test_bit(KGSL_PWRFLAGS_POWER_ON, &pwr->ctrl_flags))
+ /*
+ * Disabling the regulator means also disabling dependent clocks.
+ * Hence don't disable it if force clock ON is set.
+ */
+ if (test_bit(KGSL_PWRFLAGS_POWER_ON, &pwr->ctrl_flags) ||
+ test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->ctrl_flags))
return 0;
if (state == KGSL_PWRFLAGS_OFF) {
diff --git a/drivers/gpu/msm/kgsl_pwrscale.c b/drivers/gpu/msm/kgsl_pwrscale.c
index 7f93ab8fa8d4..2b7a1fcbaa78 100644
--- a/drivers/gpu/msm/kgsl_pwrscale.c
+++ b/drivers/gpu/msm/kgsl_pwrscale.c
@@ -522,7 +522,8 @@ int kgsl_devfreq_target(struct device *dev, unsigned long *freq, u32 flags)
struct kgsl_device *device = dev_get_drvdata(dev);
struct kgsl_pwrctrl *pwr;
struct kgsl_pwrlevel *pwr_level;
- int level, i;
+ int level;
+ unsigned int i;
unsigned long cur_freq;
if (device == NULL)
@@ -550,7 +551,12 @@ int kgsl_devfreq_target(struct device *dev, unsigned long *freq, u32 flags)
/* If the governor recommends a new frequency, update it here */
if (*freq != cur_freq) {
level = pwr->max_pwrlevel;
- for (i = pwr->min_pwrlevel; i >= pwr->max_pwrlevel; i--)
+ /*
+ * Array index of pwrlevels[] should be within the permitted
+ * power levels, i.e., from max_pwrlevel to min_pwrlevel.
+ */
+ for (i = pwr->min_pwrlevel; (i >= pwr->max_pwrlevel
+ && i <= pwr->min_pwrlevel); i--)
if (*freq <= pwr->pwrlevels[i].gpu_freq) {
if (pwr->thermal_cycle == CYCLE_ACTIVE)
level = _thermal_adjust(pwr, i);
@@ -585,7 +591,7 @@ int kgsl_devfreq_get_dev_status(struct device *dev,
struct kgsl_device *device = dev_get_drvdata(dev);
struct kgsl_pwrctrl *pwrctrl;
struct kgsl_pwrscale *pwrscale;
- ktime_t tmp;
+ ktime_t tmp1, tmp2;
if (device == NULL)
return -ENODEV;
@@ -596,6 +602,8 @@ int kgsl_devfreq_get_dev_status(struct device *dev,
pwrctrl = &device->pwrctrl;
mutex_lock(&device->mutex);
+
+ tmp1 = ktime_get();
/*
* If the GPU clock is on grab the latest power counter
* values. Otherwise the most recent ACTIVE values will
@@ -603,9 +611,9 @@ int kgsl_devfreq_get_dev_status(struct device *dev,
*/
kgsl_pwrscale_update_stats(device);
- tmp = ktime_get();
- stat->total_time = ktime_us_delta(tmp, pwrscale->time);
- pwrscale->time = tmp;
+ tmp2 = ktime_get();
+ stat->total_time = ktime_us_delta(tmp2, pwrscale->time);
+ pwrscale->time = tmp1;
stat->busy_time = pwrscale->accum_stats.busy_time;
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index e37030624165..c7f8b70d15ee 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -285,6 +285,9 @@
#define USB_VENDOR_ID_DEALEXTREAME 0x10c5
#define USB_DEVICE_ID_DEALEXTREAME_RADIO_SI4701 0x819a
+#define USB_VENDOR_ID_DELL 0x413c
+#define USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE 0x301a
+
#define USB_VENDOR_ID_DELORME 0x1163
#define USB_DEVICE_ID_DELORME_EARTHMATE 0x0100
#define USB_DEVICE_ID_DELORME_EM_LT20 0x0200
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index 0b80633bae91..d4d655a10df1 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -364,6 +364,15 @@ static int i2c_hid_hwreset(struct i2c_client *client)
if (ret)
return ret;
+ /*
+ * The HID over I2C specification states that if a DEVICE needs time
+ * after the PWR_ON request, it should utilise CLOCK stretching.
+ * However, it has been observered that the Windows driver provides a
+ * 1ms sleep between the PWR_ON and RESET requests and that some devices
+ * rely on this.
+ */
+ usleep_range(1000, 5000);
+
i2c_hid_dbg(ihid, "resetting...\n");
ret = i2c_hid_command(client, &hid_reset_cmd, NULL, 0);
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 6ca6ab00fa93..ce1543d69acb 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -72,6 +72,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_ELAN, HID_ANY_ID, HID_QUIRK_ALWAYS_POLL },
diff --git a/drivers/hwtracing/coresight/coresight-remote-etm.c b/drivers/hwtracing/coresight/coresight-remote-etm.c
index 30b13282f6c0..cc0b25b130d7 100644
--- a/drivers/hwtracing/coresight/coresight-remote-etm.c
+++ b/drivers/hwtracing/coresight/coresight-remote-etm.c
@@ -186,12 +186,9 @@ static void remote_etm_rcv_msg(struct work_struct *work)
struct remote_etm_drvdata *drvdata = container_of(work,
struct remote_etm_drvdata,
work_rcv_msg);
- mutex_lock(&drvdata->mutex);
if (qmi_recv_msg(drvdata->handle) < 0)
dev_err(drvdata->dev, "%s: Error receiving QMI message\n",
__func__);
-
- mutex_unlock(&drvdata->mutex);
}
static void remote_etm_notify(struct qmi_handle *handle,
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index 316d8b783d94..691c7bb3afac 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, 2016-2017 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012, 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -764,6 +764,16 @@ static int tmc_enable(struct tmc_drvdata *drvdata, enum tmc_mode mode)
pm_runtime_get_sync(drvdata->dev);
mutex_lock(&drvdata->mem_lock);
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+ if (drvdata->reading) {
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ mutex_unlock(&drvdata->mem_lock);
+ pm_runtime_put(drvdata->dev);
+ return -EBUSY;
+ }
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
if (drvdata->config_type == TMC_CONFIG_TYPE_ETR &&
drvdata->out_mode == TMC_ETR_OUT_MODE_MEM) {
/*
@@ -807,19 +817,8 @@ static int tmc_enable(struct tmc_drvdata *drvdata, enum tmc_mode mode)
coresight_cti_map_trigout(drvdata->cti_flush, 1, 0);
coresight_cti_map_trigin(drvdata->cti_reset, 2, 0);
}
- mutex_unlock(&drvdata->mem_lock);
spin_lock_irqsave(&drvdata->spinlock, flags);
- if (drvdata->reading) {
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
-
- if (drvdata->config_type == TMC_CONFIG_TYPE_ETR
- && drvdata->out_mode == TMC_ETR_OUT_MODE_USB)
- usb_qdss_close(drvdata->usbch);
- pm_runtime_put(drvdata->dev);
-
- return -EBUSY;
- }
if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
tmc_etb_enable_hw(drvdata);
@@ -845,6 +844,7 @@ static int tmc_enable(struct tmc_drvdata *drvdata, enum tmc_mode mode)
*/
drvdata->sticky_enable = true;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ mutex_unlock(&drvdata->mem_lock);
dev_info(drvdata->dev, "TMC enabled\n");
return 0;
@@ -1140,6 +1140,7 @@ static int tmc_read_prepare(struct tmc_drvdata *drvdata)
unsigned long flags;
enum tmc_mode mode;
+ mutex_lock(&drvdata->mem_lock);
spin_lock_irqsave(&drvdata->spinlock, flags);
if (!drvdata->sticky_enable) {
dev_err(drvdata->dev, "enable tmc once before reading\n");
@@ -1172,11 +1173,13 @@ static int tmc_read_prepare(struct tmc_drvdata *drvdata)
out:
drvdata->reading = true;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ mutex_unlock(&drvdata->mem_lock);
dev_info(drvdata->dev, "TMC read start\n");
return 0;
err:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ mutex_unlock(&drvdata->mem_lock);
return ret;
}
@@ -1353,7 +1356,11 @@ static ssize_t tmc_read(struct file *file, char __user *data, size_t len,
{
struct tmc_drvdata *drvdata = container_of(file->private_data,
struct tmc_drvdata, miscdev);
- char *bufp = drvdata->buf + *ppos;
+ char *bufp;
+
+ mutex_lock(&drvdata->mem_lock);
+
+ bufp = drvdata->buf + *ppos;
if (*ppos + len > drvdata->size)
len = drvdata->size - *ppos;
@@ -1375,6 +1382,7 @@ static ssize_t tmc_read(struct file *file, char __user *data, size_t len,
if (copy_to_user(data, bufp, len)) {
dev_dbg(drvdata->dev, "%s: copy_to_user failed\n", __func__);
+ mutex_unlock(&drvdata->mem_lock);
return -EFAULT;
}
@@ -1382,6 +1390,8 @@ static ssize_t tmc_read(struct file *file, char __user *data, size_t len,
dev_dbg(drvdata->dev, "%s: %zu bytes copied, %d bytes left\n",
__func__, len, (int)(drvdata->size - *ppos));
+
+ mutex_unlock(&drvdata->mem_lock);
return len;
}
diff --git a/drivers/i2c/busses/i2c-msm-v2.c b/drivers/i2c/busses/i2c-msm-v2.c
index 1f042fa56eea..f4ed71f9c1a7 100644
--- a/drivers/i2c/busses/i2c-msm-v2.c
+++ b/drivers/i2c/busses/i2c-msm-v2.c
@@ -2232,19 +2232,8 @@ static int i2c_msm_pm_clk_enable(struct i2c_msm_ctrl *ctrl)
static int i2c_msm_pm_xfer_start(struct i2c_msm_ctrl *ctrl)
{
int ret;
- struct i2c_msm_xfer *xfer = &ctrl->xfer;
mutex_lock(&ctrl->xfer.mtx);
- /* if system is suspended just bail out */
- if (ctrl->pwr_state == I2C_MSM_PM_SYS_SUSPENDED) {
- struct i2c_msg *msgs = xfer->msgs + xfer->cur_buf.msg_idx;
- dev_err(ctrl->dev,
- "slave:0x%x is calling xfer when system is suspended\n",
- msgs->addr);
- mutex_unlock(&ctrl->xfer.mtx);
- return -EIO;
- }
-
i2c_msm_pm_pinctrl_state(ctrl, true);
pm_runtime_get_sync(ctrl->dev);
/*
@@ -2330,6 +2319,14 @@ i2c_msm_frmwrk_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
return PTR_ERR(msgs);
}
+ /* if system is suspended just bail out */
+ if (ctrl->pwr_state == I2C_MSM_PM_SYS_SUSPENDED) {
+ dev_err(ctrl->dev,
+ "slave:0x%x is calling xfer when system is suspended\n",
+ msgs->addr);
+ return -EIO;
+ }
+
ret = i2c_msm_pm_xfer_start(ctrl);
if (ret)
return ret;
diff --git a/drivers/iio/adc/qcom-rradc.c b/drivers/iio/adc/qcom-rradc.c
index 537cca877f66..5c20970ccbbb 100644
--- a/drivers/iio/adc/qcom-rradc.c
+++ b/drivers/iio/adc/qcom-rradc.c
@@ -331,8 +331,8 @@ static int rradc_post_process_therm(struct rradc_chip *chip,
int64_t temp;
/* K = code/4 */
- temp = div64_s64(adc_code, FG_ADC_RR_BATT_THERM_LSB_K);
- temp *= FG_ADC_SCALE_MILLI_FACTOR;
+ temp = ((int64_t)adc_code * FG_ADC_SCALE_MILLI_FACTOR);
+ temp = div64_s64(temp, FG_ADC_RR_BATT_THERM_LSB_K);
*result_millidegc = temp - FG_ADC_KELVINMIL_CELSIUSMIL;
return 0;
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index e7b96f1ac2c5..5be14ad29d46 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -788,6 +788,13 @@ static const struct dmi_system_id __initconst i8042_dmi_notimeout_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U574"),
},
},
+ {
+ /* Fujitsu UH554 laptop */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK UH544"),
+ },
+ },
{ }
};
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index 4831eb910fc7..22160e481794 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -699,9 +699,9 @@ out_clear_state:
out_unregister:
mmu_notifier_unregister(&pasid_state->mn, mm);
+ mmput(mm);
out_free:
- mmput(mm);
free_pasid_state(pasid_state);
out:
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index b92b8a724efb..f9711aceef54 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -1137,7 +1137,7 @@ static void dma_pte_free_level(struct dmar_domain *domain, int level,
if (!dma_pte_present(pte) || dma_pte_superpage(pte))
goto next;
- level_pfn = pfn & level_mask(level - 1);
+ level_pfn = pfn & level_mask(level);
level_pte = phys_to_virt(dma_pte_addr(pte));
if (level > 2)
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 33176a4aa6ef..92e6ae48caf8 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -394,36 +394,30 @@ int iommu_group_add_device(struct iommu_group *group, struct device *dev)
device->dev = dev;
ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group");
- if (ret) {
- kfree(device);
- return ret;
- }
+ if (ret)
+ goto err_free_device;
device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj));
rename:
if (!device->name) {
- sysfs_remove_link(&dev->kobj, "iommu_group");
- kfree(device);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_remove_link;
}
ret = sysfs_create_link_nowarn(group->devices_kobj,
&dev->kobj, device->name);
if (ret) {
- kfree(device->name);
if (ret == -EEXIST && i >= 0) {
/*
* Account for the slim chance of collision
* and append an instance to the name.
*/
+ kfree(device->name);
device->name = kasprintf(GFP_KERNEL, "%s.%d",
kobject_name(&dev->kobj), i++);
goto rename;
}
-
- sysfs_remove_link(&dev->kobj, "iommu_group");
- kfree(device);
- return ret;
+ goto err_free_name;
}
kobject_get(group->devices_kobj);
@@ -435,8 +429,10 @@ rename:
mutex_lock(&group->mutex);
list_add_tail(&device->list, &group->devices);
if (group->domain)
- __iommu_attach_device(group->domain, dev);
+ ret = __iommu_attach_device(group->domain, dev);
mutex_unlock(&group->mutex);
+ if (ret)
+ goto err_put_group;
/* Notify any listeners about change to group. */
blocking_notifier_call_chain(&group->notifier,
@@ -447,6 +443,21 @@ rename:
pr_info("Adding device %s to group %d\n", dev_name(dev), group->id);
return 0;
+
+err_put_group:
+ mutex_lock(&group->mutex);
+ list_del(&device->list);
+ mutex_unlock(&group->mutex);
+ dev->iommu_group = NULL;
+ kobject_put(group->devices_kobj);
+err_free_name:
+ kfree(device->name);
+err_remove_link:
+ sysfs_remove_link(&dev->kobj, "iommu_group");
+err_free_device:
+ kfree(device);
+ pr_err("Failed to add device %s to group %d: %d\n", dev_name(dev), group->id, ret);
+ return ret;
}
EXPORT_SYMBOL_GPL(iommu_group_add_device);
diff --git a/drivers/md/dm-android-verity.c b/drivers/md/dm-android-verity.c
index 4f6086970131..43d566fd38ae 100644
--- a/drivers/md/dm-android-verity.c
+++ b/drivers/md/dm-android-verity.c
@@ -645,6 +645,8 @@ static int add_as_linear_device(struct dm_target *ti, char *dev)
android_verity_target.iterate_devices = dm_linear_iterate_devices,
android_verity_target.io_hints = NULL;
+ set_disk_ro(dm_disk(dm_table_get_md(ti->table)), 0);
+
err = dm_linear_ctr(ti, DM_LINEAR_ARGS, linear_table_args);
if (!err) {
diff --git a/drivers/media/platform/msm/ais/ispif/msm_ispif.c b/drivers/media/platform/msm/ais/ispif/msm_ispif.c
index 8eb88364a2cb..c41f4546da5f 100644
--- a/drivers/media/platform/msm/ais/ispif/msm_ispif.c
+++ b/drivers/media/platform/msm/ais/ispif/msm_ispif.c
@@ -69,6 +69,11 @@ static void msm_ispif_io_dump_reg(struct ispif_device *ispif)
{
if (!ispif->enb_dump_reg)
return;
+ if (!ispif->base) {
+ pr_err("%s: null pointer for the ispif base\n", __func__);
+ return;
+ }
+
msm_camera_io_dump(ispif->base, 0x250, 0);
}
diff --git a/drivers/media/platform/msm/ais/msm.c b/drivers/media/platform/msm/ais/msm.c
index 3c2e28eaa4f9..2a1ec86118c5 100644
--- a/drivers/media/platform/msm/ais/msm.c
+++ b/drivers/media/platform/msm/ais/msm.c
@@ -725,6 +725,9 @@ static long msm_private_ioctl(struct file *file, void *fh,
return 0;
}
+ if (!event_data)
+ return -EINVAL;
+
memset(&event, 0, sizeof(struct v4l2_event));
session_id = event_data->session_id;
stream_id = event_data->stream_id;
diff --git a/drivers/media/platform/msm/ais/pproc/cpp/msm_cpp.c b/drivers/media/platform/msm/ais/pproc/cpp/msm_cpp.c
index 811ac98beead..6c50070c91ab 100644
--- a/drivers/media/platform/msm/ais/pproc/cpp/msm_cpp.c
+++ b/drivers/media/platform/msm/ais/pproc/cpp/msm_cpp.c
@@ -2882,7 +2882,7 @@ end:
return rc;
}
-static int msm_cpp_validate_input(unsigned int cmd, void *arg,
+static int msm_cpp_validate_ioctl_input(unsigned int cmd, void *arg,
struct msm_camera_v4l2_ioctl_t **ioctl_ptr)
{
switch (cmd) {
@@ -2922,6 +2922,14 @@ long msm_cpp_subdev_ioctl(struct v4l2_subdev *sd,
pr_err("sd %pK\n", sd);
return -EINVAL;
}
+
+
+ rc = msm_cpp_validate_ioctl_input(cmd, arg, &ioctl_ptr);
+ if (rc != 0) {
+ pr_err("input validation failed\n");
+ return rc;
+ }
+
cpp_dev = v4l2_get_subdevdata(sd);
if (cpp_dev == NULL) {
pr_err("cpp_dev is null\n");
@@ -2933,11 +2941,6 @@ long msm_cpp_subdev_ioctl(struct v4l2_subdev *sd,
return -EINVAL;
}
- rc = msm_cpp_validate_input(cmd, arg, &ioctl_ptr);
- if (rc != 0) {
- pr_err("input validation failed\n");
- return rc;
- }
mutex_lock(&cpp_dev->mutex);
CPP_DBG("E cmd: 0x%x\n", cmd);
@@ -3437,6 +3440,7 @@ STREAM_BUFF_END:
} else {
pr_err("%s:%d IOMMMU attach triggered in invalid state\n",
__func__, __LINE__);
+ rc = -EINVAL;
}
break;
}
@@ -4061,7 +4065,8 @@ static long msm_cpp_subdev_fops_compat_ioctl(struct file *file,
default:
pr_err_ratelimited("%s: unsupported compat type :%x LOAD %lu\n",
__func__, cmd, VIDIOC_MSM_CPP_LOAD_FIRMWARE);
- break;
+ mutex_unlock(&cpp_dev->mutex);
+ return -EINVAL;
}
mutex_unlock(&cpp_dev->mutex);
@@ -4092,7 +4097,7 @@ static long msm_cpp_subdev_fops_compat_ioctl(struct file *file,
default:
pr_err_ratelimited("%s: unsupported compat type :%d\n",
__func__, cmd);
- break;
+ return -EINVAL;
}
if (is_copytouser_req) {
diff --git a/drivers/media/platform/msm/ais/sensor/actuator/msm_actuator.c b/drivers/media/platform/msm/ais/sensor/actuator/msm_actuator.c
index 1adb380f335f..40806d5a164f 100644
--- a/drivers/media/platform/msm/ais/sensor/actuator/msm_actuator.c
+++ b/drivers/media/platform/msm/ais/sensor/actuator/msm_actuator.c
@@ -1722,6 +1722,10 @@ static long msm_actuator_subdev_do_ioctl(
parg = &actuator_data;
break;
}
+ break;
+ case VIDIOC_MSM_ACTUATOR_CFG:
+ pr_err("%s: invalid cmd 0x%x received\n", __func__, cmd);
+ return -EINVAL;
}
rc = msm_actuator_subdev_ioctl(sd, cmd, parg);
diff --git a/drivers/media/platform/msm/ais/sensor/flash/msm_flash.c b/drivers/media/platform/msm/ais/sensor/flash/msm_flash.c
index 6af589e5c230..024677e1b755 100644
--- a/drivers/media/platform/msm/ais/sensor/flash/msm_flash.c
+++ b/drivers/media/platform/msm/ais/sensor/flash/msm_flash.c
@@ -152,6 +152,13 @@ static int32_t msm_flash_i2c_write_table(
conf_array.reg_setting = settings->reg_setting_a;
conf_array.size = settings->size;
+ /* Validate the settings size */
+ if ((!conf_array.size) || (conf_array.size > MAX_I2C_REG_SET)) {
+ pr_err("failed: invalid size %d", conf_array.size);
+ return -EINVAL;
+ }
+
+
return flash_ctrl->flash_i2c_client.i2c_func_tbl->i2c_write_table(
&flash_ctrl->flash_i2c_client, &conf_array);
}
@@ -497,23 +504,46 @@ static int32_t msm_flash_init(
}
flash_ctrl->flash_state = MSM_CAMERA_FLASH_INIT;
-
CDBG("Exit");
return 0;
}
-#ifdef CONFIG_COMPAT
static int32_t msm_flash_init_prepare(
struct msm_flash_ctrl_t *flash_ctrl,
struct msm_flash_cfg_data_t *flash_data)
{
+ #ifdef CONFIG_COMPAT
+ struct msm_flash_cfg_data_t flash_data_k;
+ struct msm_flash_init_info_t flash_init_info;
+ int32_t i = 0;
+
+ if (!is_compat_task()) {
+ /*for 64-bit usecase,it need copy the data to local memory*/
+ flash_data_k.cfg_type = flash_data->cfg_type;
+ for (i = 0; i < MAX_LED_TRIGGERS; i++) {
+ flash_data_k.flash_current[i] =
+ flash_data->flash_current[i];
+ flash_data_k.flash_duration[i] =
+ flash_data->flash_duration[i];
+ }
+
+ flash_data_k.cfg.flash_init_info = &flash_init_info;
+ if (copy_from_user(&flash_init_info,
+ (void __user *)(flash_data->cfg.flash_init_info),
+ sizeof(struct msm_flash_init_info_t))) {
+ pr_err("%s copy_from_user failed %d\n",
+ __func__, __LINE__);
+ return -EFAULT;
+ }
+ return msm_flash_init(flash_ctrl, &flash_data_k);
+ }
+ /*
+ * for 32-bit usecase,it already copy the userspace
+ * data to local memory in msm_flash_subdev_do_ioctl()
+ * so here do not need copy from user
+ */
return msm_flash_init(flash_ctrl, flash_data);
-}
#else
-static int32_t msm_flash_init_prepare(
- struct msm_flash_ctrl_t *flash_ctrl,
- struct msm_flash_cfg_data_t *flash_data)
-{
struct msm_flash_cfg_data_t flash_data_k;
struct msm_flash_init_info_t flash_init_info;
int32_t i = 0;
@@ -528,15 +558,15 @@ static int32_t msm_flash_init_prepare(
flash_data_k.cfg.flash_init_info = &flash_init_info;
if (copy_from_user(&flash_init_info,
- (void *)(flash_data->cfg.flash_init_info),
+ (void __user *)(flash_data->cfg.flash_init_info),
sizeof(struct msm_flash_init_info_t))) {
pr_err("%s copy_from_user failed %d\n",
__func__, __LINE__);
return -EFAULT;
}
return msm_flash_init(flash_ctrl, &flash_data_k);
-}
#endif
+}
static int32_t msm_flash_low(
struct msm_flash_ctrl_t *flash_ctrl,
@@ -1022,13 +1052,13 @@ static long msm_flash_subdev_do_ioctl(
sd = vdev_to_v4l2_subdev(vdev);
u32 = (struct msm_flash_cfg_data_t32 *)arg;
- flash_data.cfg_type = u32->cfg_type;
- for (i = 0; i < MAX_LED_TRIGGERS; i++) {
- flash_data.flash_current[i] = u32->flash_current[i];
- flash_data.flash_duration[i] = u32->flash_duration[i];
- }
switch (cmd) {
case VIDIOC_MSM_FLASH_CFG32:
+ flash_data.cfg_type = u32->cfg_type;
+ for (i = 0; i < MAX_LED_TRIGGERS; i++) {
+ flash_data.flash_current[i] = u32->flash_current[i];
+ flash_data.flash_duration[i] = u32->flash_duration[i];
+ }
cmd = VIDIOC_MSM_FLASH_CFG;
switch (flash_data.cfg_type) {
case CFG_FLASH_OFF:
diff --git a/drivers/media/platform/msm/ais/sensor/ois/msm_ois.c b/drivers/media/platform/msm/ais/sensor/ois/msm_ois.c
index 28a5402a4359..aa7658f359ac 100644
--- a/drivers/media/platform/msm/ais/sensor/ois/msm_ois.c
+++ b/drivers/media/platform/msm/ais/sensor/ois/msm_ois.c
@@ -781,11 +781,11 @@ static long msm_ois_subdev_do_ioctl(
u32 = (struct msm_ois_cfg_data32 *)arg;
parg = arg;
- ois_data.cfgtype = u32->cfgtype;
switch (cmd) {
case VIDIOC_MSM_OIS_CFG32:
cmd = VIDIOC_MSM_OIS_CFG;
+ ois_data.cfgtype = u32->cfgtype;
switch (u32->cfgtype) {
case CFG_OIS_CONTROL:
@@ -819,7 +819,6 @@ static long msm_ois_subdev_do_ioctl(
settings.reg_setting =
compat_ptr(settings32.reg_setting);
- ois_data.cfgtype = u32->cfgtype;
ois_data.cfg.settings = &settings;
parg = &ois_data;
break;
diff --git a/drivers/media/platform/msm/camera_v2/common/msm_camera_io_util.c b/drivers/media/platform/msm/camera_v2/common/msm_camera_io_util.c
index c243d587e308..90edadaed1ef 100644
--- a/drivers/media/platform/msm/camera_v2/common/msm_camera_io_util.c
+++ b/drivers/media/platform/msm/camera_v2/common/msm_camera_io_util.c
@@ -175,35 +175,45 @@ int32_t msm_camera_io_poll_value_wmask(void __iomem *addr, u32 wait_data,
void msm_camera_io_dump(void __iomem *addr, int size, int enable)
{
- char line_str[128], *p_str;
+ char line_str[128];
int i;
- u32 *p = (u32 *) addr;
+ ptrdiff_t p = 0;
+ size_t offset = 0, used = 0;
u32 data;
CDBG("%s: addr=%pK size=%d\n", __func__, addr, size);
- if (!p || (size <= 0) || !enable)
+ if (!addr || (size <= 0) || !enable)
return;
line_str[0] = '\0';
- p_str = line_str;
for (i = 0; i < size/4; i++) {
if (i % 4 == 0) {
-#ifdef CONFIG_COMPAT
- snprintf(p_str, 20, "%016lx: ", (unsigned long) p);
- p_str += 18;
-#else
- snprintf(p_str, 12, "%08lx: ", (unsigned long) p);
- p_str += 10;
-#endif
+ used = snprintf(line_str + offset,
+ sizeof(line_str) - offset, "0x%04tX: ", p);
+ if (offset + used >= sizeof(line_str)) {
+ pr_err("%s\n", line_str);
+ offset = 0;
+ line_str[0] = '\0';
+ } else {
+ offset += used;
+ }
+ }
+ data = readl_relaxed(addr + p);
+ p = p + 4;
+ used = snprintf(line_str + offset,
+ sizeof(line_str) - offset, "%08x ", data);
+ if (offset + used >= sizeof(line_str)) {
+ pr_err("%s\n", line_str);
+ offset = 0;
+ line_str[0] = '\0';
+ } else {
+ offset += used;
}
- data = readl_relaxed(p++);
- snprintf(p_str, 12, "%08x ", data);
- p_str += 9;
if ((i + 1) % 4 == 0) {
pr_err("%s\n", line_str);
line_str[0] = '\0';
- p_str = line_str;
+ offset = 0;
}
}
if (line_str[0] != '\0')
diff --git a/drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.c b/drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.c
index a04d7ca73fe1..63c3595a3f85 100644
--- a/drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.c
+++ b/drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.c
@@ -1057,14 +1057,18 @@ static int msm_fd_s_ctrl(struct file *file, void *fh, struct v4l2_control *a)
a->value = ctx->format.size->work_size;
break;
case V4L2_CID_FD_WORK_MEMORY_FD:
+ mutex_lock(&ctx->fd_device->recovery_lock);
if (ctx->work_buf.fd != -1)
msm_fd_hw_unmap_buffer(&ctx->work_buf);
if (a->value >= 0) {
ret = msm_fd_hw_map_buffer(&ctx->mem_pool,
a->value, &ctx->work_buf);
- if (ret < 0)
+ if (ret < 0) {
+ mutex_unlock(&ctx->fd_device->recovery_lock);
return ret;
+ }
}
+ mutex_unlock(&ctx->fd_device->recovery_lock);
break;
default:
return -EINVAL;
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
index 1f1435fe282e..b2d152bf4ef0 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
@@ -786,6 +786,7 @@ struct vfe_device {
size_t num_norm_clk;
bool hvx_clk_state;
enum cam_ahb_clk_vote ahb_vote;
+ enum cam_ahb_clk_vote user_requested_ahb_vote;
struct cx_ipeak_client *vfe_cx_ipeak;
/* Sync variables*/
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c
index 6515e3d6ecbc..24d1c6cba84d 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c
@@ -274,10 +274,12 @@ int msm_isp47_ahb_clk_cfg(struct vfe_device *vfe_dev,
enum cam_ahb_clk_vote src_clk_vote;
struct msm_isp_clk_rates clk_rates;
- if (ahb_cfg)
+ if (ahb_cfg) {
vote = msm_isp47_get_cam_clk_vote(ahb_cfg->vote);
- else
- vote = CAM_AHB_SVS_VOTE;
+ vfe_dev->user_requested_ahb_vote = vote;
+ } else {
+ vote = vfe_dev->user_requested_ahb_vote;
+ }
vfe_dev->hw_info->vfe_ops.platform_ops.get_clk_rates(vfe_dev,
&clk_rates);
@@ -327,6 +329,7 @@ int msm_vfe47_init_hardware(struct vfe_device *vfe_dev)
if (rc)
goto clk_enable_failed;
+ vfe_dev->user_requested_ahb_vote = CAM_AHB_SVS_VOTE;
rc = cam_config_ahb_clk(NULL, 0, id, CAM_AHB_SVS_VOTE);
if (rc < 0) {
pr_err("%s: failed to vote for AHB\n", __func__);
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
index 0e0f10e7fbb5..63f5497e63b8 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
@@ -776,40 +776,6 @@ void msm_isp_check_for_output_error(struct vfe_device *vfe_dev,
}
}
-static int msm_isp_check_sync_time(struct msm_vfe_src_info *src_info,
- struct msm_isp_timestamp *ts,
- struct master_slave_resource_info *ms_res)
-{
- int i;
- struct msm_vfe_src_info *master_src_info = NULL;
- uint32_t master_time = 0, current_time;
-
- if (!ms_res->src_sof_mask)
- return 0;
-
- for (i = 0; i < MAX_VFE * VFE_SRC_MAX; i++) {
- if (ms_res->src_info[i] == NULL)
- continue;
- if (src_info == ms_res->src_info[i] ||
- ms_res->src_info[i]->active == 0)
- continue;
- if (ms_res->src_sof_mask &
- (1 << ms_res->src_info[i]->dual_hw_ms_info.index)) {
- master_src_info = ms_res->src_info[i];
- break;
- }
- }
- if (!master_src_info)
- return 0;
- master_time = master_src_info->
- dual_hw_ms_info.sof_info.mono_timestamp_ms;
- current_time = ts->buf_time.tv_sec * 1000 +
- ts->buf_time.tv_usec / 1000;
- if ((current_time - master_time) > ms_res->sof_delta_threshold)
- return 1;
- return 0;
-}
-
static void msm_isp_sync_dual_cam_frame_id(
struct vfe_device *vfe_dev,
struct master_slave_resource_info *ms_res,
@@ -824,24 +790,11 @@ static void msm_isp_sync_dual_cam_frame_id(
if (src_info->dual_hw_ms_info.sync_state ==
ms_res->dual_sync_mode) {
- if (msm_isp_check_sync_time(src_info, ts, ms_res) == 0) {
- (frame_src == VFE_PIX_0) ? src_info->frame_id +=
+ (frame_src == VFE_PIX_0) ? src_info->frame_id +=
vfe_dev->axi_data.src_info[frame_src].
sof_counter_step :
src_info->frame_id++;
- return;
- }
- ms_res->src_sof_mask = 0;
- ms_res->active_src_mask = 0;
- for (i = 0; i < MAX_VFE * VFE_SRC_MAX; i++) {
- if (ms_res->src_info[i] == NULL)
- continue;
- if (ms_res->src_info[i]->active == 0)
- continue;
- ms_res->src_info[i]->dual_hw_ms_info.
- sync_state =
- MSM_ISP_DUAL_CAM_ASYNC;
- }
+ return;
}
/* find highest frame id */
@@ -2616,6 +2569,7 @@ int msm_isp_axi_reset(struct vfe_device *vfe_dev,
struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
uint32_t bufq_handle = 0, bufq_id = 0;
struct msm_isp_timestamp timestamp;
+ struct msm_vfe_frame_request_queue *queue_req;
unsigned long flags;
int vfe_idx;
@@ -2652,8 +2606,18 @@ int msm_isp_axi_reset(struct vfe_device *vfe_dev,
VFE_PING_FLAG);
msm_isp_cfg_stream_scratch(stream_info,
VFE_PONG_FLAG);
+ stream_info->undelivered_request_cnt = 0;
spin_unlock_irqrestore(&stream_info->lock,
flags);
+ while (!list_empty(&stream_info->request_q)) {
+ queue_req = list_first_entry_or_null(
+ &stream_info->request_q,
+ struct msm_vfe_frame_request_queue, list);
+ if (queue_req) {
+ queue_req->cmd_used = 0;
+ list_del(&queue_req->list);
+ }
+ }
for (bufq_id = 0; bufq_id < VFE_BUF_QUEUE_MAX;
bufq_id++) {
bufq_handle = stream_info->bufq_handle[bufq_id];
@@ -3446,7 +3410,7 @@ static int msm_isp_request_frame(struct vfe_device *vfe_dev,
}
if ((vfe_dev->axi_data.src_info[frame_src].active && (frame_id !=
vfe_dev->axi_data.src_info[frame_src].frame_id + vfe_dev->
- axi_data.src_info[VFE_PIX_0].sof_counter_step)) ||
+ axi_data.src_info[frame_src].sof_counter_step)) ||
((!vfe_dev->axi_data.src_info[frame_src].active))) {
pr_debug("%s:%d invalid frame id %d cur frame id %d pix %d\n",
__func__, __LINE__, frame_id,
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.h
index a8d4cfb43927..0f029c0d5178 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.h
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.h
@@ -136,7 +136,7 @@ static inline void msm_isp_cfg_stream_scratch(
}
static inline struct msm_vfe_axi_stream *msm_isp_get_stream_common_data(
- struct vfe_device *vfe_dev, int stream_idx)
+ struct vfe_device *vfe_dev, uint32_t stream_idx)
{
struct msm_vfe_common_dev_data *common_data = vfe_dev->common_data;
struct msm_vfe_axi_stream *stream_info;
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
index d30d8022b7ab..e87f2414a879 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
@@ -511,6 +511,9 @@ static int msm_isp_cfg_rdi(struct vfe_device *vfe_dev,
return -EINVAL;
}
+ vfe_dev->axi_data.
+ src_info[input_cfg->input_src].sof_counter_step = 1;
+
vfe_dev->axi_data.src_info[input_cfg->input_src].pixel_clock =
input_cfg->input_pix_clk;
vfe_dev->hw_info->vfe_ops.core_ops.cfg_rdi_reg(
diff --git a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c
index 3f900ded090a..9c3bd7b41ce9 100644
--- a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c
+++ b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c
@@ -1025,6 +1025,9 @@ static void msm_ispif_config_stereo(struct ispif_device *ispif,
enum msm_ispif_vfe_intf vfe_intf;
uint32_t stereo_3d_threshold = STEREO_DEFAULT_3D_THRESHOLD;
+ if (params->num > MAX_PARAM_ENTRIES)
+ return;
+
for (i = 0; i < params->num; i++) {
vfe_intf = params->entries[i].vfe_intf;
if (!msm_ispif_is_intf_valid(ispif->csid_version, vfe_intf)) {
diff --git a/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_hw.c b/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_hw.c
index e40869d41a5d..821833d53905 100644
--- a/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_hw.c
+++ b/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_hw.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -903,26 +903,45 @@ int msm_jpeg_hw_exec_cmds(struct msm_jpeg_hw_cmd *hw_cmd_p, uint32_t m_cmds,
void msm_jpeg_io_dump(void *base, int size)
{
- char line_str[128], *p_str;
+ char line_str[128];
void __iomem *addr = (void __iomem *)base;
int i;
u32 *p = (u32 *) addr;
+ size_t offset = 0;
+ size_t used = 0;
+ size_t min_range = 0;
+ size_t sizeof_line_str = sizeof(line_str);
u32 data;
JPEG_DBG_HIGH("%s:%d] %pK %d", __func__, __LINE__, addr, size);
line_str[0] = '\0';
- p_str = line_str;
for (i = 0; i < size/4; i++) {
if (i % 4 == 0) {
- snprintf(p_str, 12, "%08lx: ", (unsigned long)p);
- p_str += 10;
+ used = snprintf(line_str + offset,
+ sizeof_line_str - offset, "%pK ", p);
+ if ((used < min_range) ||
+ (offset + used >= sizeof_line_str)) {
+ JPEG_PR_ERR("%s\n", line_str);
+ offset = 0;
+ line_str[0] = '\0';
+ } else {
+ offset += used;
+ }
}
data = msm_camera_io_r(p++);
- snprintf(p_str, 12, "%08x ", data);
- p_str += 9;
+ used = snprintf(line_str + offset,
+ sizeof_line_str - offset, "%08x ", data);
+ if ((used < min_range) ||
+ (offset + used >= sizeof_line_str)) {
+ JPEG_PR_ERR("%s\n", line_str);
+ offset = 0;
+ line_str[0] = '\0';
+ } else {
+ offset += used;
+ }
if ((i + 1) % 4 == 0) {
JPEG_DBG_HIGH("%s\n", line_str);
line_str[0] = '\0';
- p_str = line_str;
+ offset = 0;
}
}
if (line_str[0] != '\0')
diff --git a/drivers/media/platform/msm/camera_v2/msm.c b/drivers/media/platform/msm/camera_v2/msm.c
index 9cb7d5299ef8..4e5dc66d94a9 100644
--- a/drivers/media/platform/msm/camera_v2/msm.c
+++ b/drivers/media/platform/msm/camera_v2/msm.c
@@ -287,6 +287,7 @@ void msm_delete_stream(unsigned int session_id, unsigned int stream_id)
return;
while (1) {
+ unsigned long wl_flags;
if (try_count > 5) {
pr_err("%s : not able to delete stream %d\n",
@@ -294,18 +295,20 @@ void msm_delete_stream(unsigned int session_id, unsigned int stream_id)
break;
}
- write_lock(&session->stream_rwlock);
+ write_lock_irqsave(&session->stream_rwlock, wl_flags);
try_count++;
stream = msm_queue_find(&session->stream_q, struct msm_stream,
list, __msm_queue_find_stream, &stream_id);
if (!stream) {
- write_unlock(&session->stream_rwlock);
+ write_unlock_irqrestore(&session->stream_rwlock,
+ wl_flags);
return;
}
if (msm_vb2_get_stream_state(stream) != 1) {
- write_unlock(&session->stream_rwlock);
+ write_unlock_irqrestore(&session->stream_rwlock,
+ wl_flags);
continue;
}
@@ -315,7 +318,7 @@ void msm_delete_stream(unsigned int session_id, unsigned int stream_id)
kfree(stream);
stream = NULL;
spin_unlock_irqrestore(&(session->stream_q.lock), flags);
- write_unlock(&session->stream_rwlock);
+ write_unlock_irqrestore(&session->stream_rwlock, wl_flags);
break;
}
diff --git a/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.c b/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.c
index ba9b4df6bf22..e271c7fcd1b6 100644
--- a/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.c
+++ b/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.c
@@ -47,22 +47,23 @@ int msm_vb2_buf_init(struct vb2_buffer *vb)
struct msm_session *session;
struct msm_vb2_buffer *msm_vb2_buf;
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ unsigned long rl_flags;
session = msm_get_session_from_vb2q(vb->vb2_queue);
if (IS_ERR_OR_NULL(session))
return -EINVAL;
- read_lock(&session->stream_rwlock);
+ read_lock_irqsave(&session->stream_rwlock, rl_flags);
stream = msm_get_stream_from_vb2q(vb->vb2_queue);
if (!stream) {
pr_err("%s: Couldn't find stream\n", __func__);
- read_unlock(&session->stream_rwlock);
+ read_unlock_irqrestore(&session->stream_rwlock, rl_flags);
return -EINVAL;
}
msm_vb2_buf = container_of(vbuf, struct msm_vb2_buffer, vb2_v4l2_buf);
msm_vb2_buf->in_freeq = 0;
- read_unlock(&session->stream_rwlock);
+ read_unlock_irqrestore(&session->stream_rwlock, rl_flags);
return 0;
}
@@ -71,7 +72,7 @@ static void msm_vb2_buf_queue(struct vb2_buffer *vb)
struct msm_vb2_buffer *msm_vb2;
struct msm_stream *stream;
struct msm_session *session;
- unsigned long flags;
+ unsigned long flags, rl_flags;
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
msm_vb2 = container_of(vbuf, struct msm_vb2_buffer, vb2_v4l2_buf);
@@ -84,19 +85,19 @@ static void msm_vb2_buf_queue(struct vb2_buffer *vb)
if (IS_ERR_OR_NULL(session))
return;
- read_lock(&session->stream_rwlock);
+ read_lock_irqsave(&session->stream_rwlock, rl_flags);
stream = msm_get_stream_from_vb2q(vb->vb2_queue);
if (!stream) {
pr_err("%s:%d] NULL stream", __func__, __LINE__);
- read_unlock(&session->stream_rwlock);
+ read_unlock_irqrestore(&session->stream_rwlock, rl_flags);
return;
}
spin_lock_irqsave(&stream->stream_lock, flags);
list_add_tail(&msm_vb2->list, &stream->queued_list);
spin_unlock_irqrestore(&stream->stream_lock, flags);
- read_unlock(&session->stream_rwlock);
+ read_unlock_irqrestore(&session->stream_rwlock, rl_flags);
}
static void msm_vb2_buf_finish(struct vb2_buffer *vb)
@@ -104,26 +105,26 @@ static void msm_vb2_buf_finish(struct vb2_buffer *vb)
struct msm_vb2_buffer *msm_vb2;
struct msm_stream *stream;
struct msm_session *session;
- unsigned long flags;
+ unsigned long flags, rl_flags;
struct msm_vb2_buffer *msm_vb2_entry, *temp;
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
msm_vb2 = container_of(vbuf, struct msm_vb2_buffer, vb2_v4l2_buf);
if (!msm_vb2) {
pr_err("%s:%d] vb2_buf NULL", __func__, __LINE__);
- return;
+ return;
}
session = msm_get_session_from_vb2q(vb->vb2_queue);
if (IS_ERR_OR_NULL(session))
return;
- read_lock(&session->stream_rwlock);
+ read_lock_irqsave(&session->stream_rwlock, rl_flags);
stream = msm_get_stream_from_vb2q(vb->vb2_queue);
if (!stream) {
pr_err("%s:%d] NULL stream", __func__, __LINE__);
- read_unlock(&session->stream_rwlock);
+ read_unlock_irqrestore(&session->stream_rwlock, rl_flags);
return;
}
@@ -136,7 +137,7 @@ static void msm_vb2_buf_finish(struct vb2_buffer *vb)
}
}
spin_unlock_irqrestore(&stream->stream_lock, flags);
- read_unlock(&session->stream_rwlock);
+ read_unlock_irqrestore(&session->stream_rwlock, rl_flags);
return;
}
@@ -145,19 +146,19 @@ static void msm_vb2_stop_stream(struct vb2_queue *q)
struct msm_vb2_buffer *msm_vb2, *temp;
struct msm_stream *stream;
struct msm_session *session;
- unsigned long flags;
+ unsigned long flags, rl_flags;
struct vb2_v4l2_buffer *vb2_v4l2_buf;
session = msm_get_session_from_vb2q(q);
if (IS_ERR_OR_NULL(session))
return;
- read_lock(&session->stream_rwlock);
+ read_lock_irqsave(&session->stream_rwlock, rl_flags);
stream = msm_get_stream_from_vb2q(q);
if (!stream) {
pr_err_ratelimited("%s:%d] NULL stream", __func__, __LINE__);
- read_unlock(&session->stream_rwlock);
+ read_unlock_irqrestore(&session->stream_rwlock, rl_flags);
return;
}
@@ -177,7 +178,7 @@ static void msm_vb2_stop_stream(struct vb2_queue *q)
msm_vb2->in_freeq = 0;
}
spin_unlock_irqrestore(&stream->stream_lock, flags);
- read_unlock(&session->stream_rwlock);
+ read_unlock_irqrestore(&session->stream_rwlock, rl_flags);
}
int msm_vb2_get_stream_state(struct msm_stream *stream)
@@ -255,17 +256,17 @@ static struct vb2_v4l2_buffer *msm_vb2_get_buf(int session_id,
struct msm_session *session;
struct vb2_v4l2_buffer *vb2_v4l2_buf = NULL;
struct msm_vb2_buffer *msm_vb2 = NULL;
- unsigned long flags;
+ unsigned long flags, rl_flags;
session = msm_get_session(session_id);
if (IS_ERR_OR_NULL(session))
return NULL;
- read_lock(&session->stream_rwlock);
+ read_lock_irqsave(&session->stream_rwlock, rl_flags);
stream = msm_get_stream(session, stream_id);
if (IS_ERR_OR_NULL(stream)) {
- read_unlock(&session->stream_rwlock);
+ read_unlock_irqrestore(&session->stream_rwlock, rl_flags);
return NULL;
}
@@ -291,7 +292,7 @@ static struct vb2_v4l2_buffer *msm_vb2_get_buf(int session_id,
vb2_v4l2_buf = NULL;
end:
spin_unlock_irqrestore(&stream->stream_lock, flags);
- read_unlock(&session->stream_rwlock);
+ read_unlock_irqrestore(&session->stream_rwlock, rl_flags);
return vb2_v4l2_buf;
}
@@ -302,18 +303,18 @@ static struct vb2_v4l2_buffer *msm_vb2_get_buf_by_idx(int session_id,
struct msm_session *session;
struct vb2_v4l2_buffer *vb2_v4l2_buf = NULL;
struct msm_vb2_buffer *msm_vb2 = NULL;
- unsigned long flags;
+ unsigned long flags, rl_flags;
session = msm_get_session(session_id);
if (IS_ERR_OR_NULL(session))
return NULL;
- read_lock(&session->stream_rwlock);
+ read_lock_irqsave(&session->stream_rwlock, rl_flags);
stream = msm_get_stream(session, stream_id);
if (IS_ERR_OR_NULL(stream)) {
- read_unlock(&session->stream_rwlock);
+ read_unlock_irqrestore(&session->stream_rwlock, rl_flags);
return NULL;
}
@@ -337,7 +338,7 @@ static struct vb2_v4l2_buffer *msm_vb2_get_buf_by_idx(int session_id,
vb2_v4l2_buf = NULL;
end:
spin_unlock_irqrestore(&stream->stream_lock, flags);
- read_unlock(&session->stream_rwlock);
+ read_unlock_irqrestore(&session->stream_rwlock, rl_flags);
return vb2_v4l2_buf;
}
@@ -349,17 +350,17 @@ static int msm_vb2_put_buf(struct vb2_v4l2_buffer *vb, int session_id,
struct msm_vb2_buffer *msm_vb2;
struct vb2_v4l2_buffer *vb2_v4l2_buf = NULL;
int rc = 0;
- unsigned long flags;
+ unsigned long flags, rl_flags;
session = msm_get_session(session_id);
if (IS_ERR_OR_NULL(session))
return -EINVAL;
- read_lock(&session->stream_rwlock);
+ read_lock_irqsave(&session->stream_rwlock, rl_flags);
stream = msm_get_stream(session, stream_id);
if (IS_ERR_OR_NULL(stream)) {
- read_unlock(&session->stream_rwlock);
+ read_unlock_irqrestore(&session->stream_rwlock, rl_flags);
return -EINVAL;
}
@@ -374,6 +375,8 @@ static int msm_vb2_put_buf(struct vb2_v4l2_buffer *vb, int session_id,
pr_err("VB buffer is INVALID vb=%pK, ses_id=%d, str_id=%d\n",
vb, session_id, stream_id);
spin_unlock_irqrestore(&stream->stream_lock, flags);
+ read_unlock_irqrestore(&session->stream_rwlock,
+ rl_flags);
return -EINVAL;
}
msm_vb2 =
@@ -390,7 +393,7 @@ static int msm_vb2_put_buf(struct vb2_v4l2_buffer *vb, int session_id,
rc = -EINVAL;
}
spin_unlock_irqrestore(&stream->stream_lock, flags);
- read_unlock(&session->stream_rwlock);
+ read_unlock_irqrestore(&session->stream_rwlock, rl_flags);
return rc;
}
@@ -398,7 +401,7 @@ static int msm_vb2_buf_done(struct vb2_v4l2_buffer *vb, int session_id,
unsigned int stream_id, uint32_t sequence,
struct timeval *ts, uint32_t reserved)
{
- unsigned long flags;
+ unsigned long flags, rl_flags;
struct msm_vb2_buffer *msm_vb2;
struct msm_stream *stream;
struct msm_session *session;
@@ -409,11 +412,11 @@ static int msm_vb2_buf_done(struct vb2_v4l2_buffer *vb, int session_id,
if (IS_ERR_OR_NULL(session))
return -EINVAL;
- read_lock(&session->stream_rwlock);
+ read_lock_irqsave(&session->stream_rwlock, rl_flags);
stream = msm_get_stream(session, stream_id);
if (IS_ERR_OR_NULL(stream)) {
- read_unlock(&session->stream_rwlock);
+ read_unlock_irqrestore(&session->stream_rwlock, rl_flags);
return -EINVAL;
}
@@ -428,6 +431,8 @@ static int msm_vb2_buf_done(struct vb2_v4l2_buffer *vb, int session_id,
pr_err("VB buffer is INVALID ses_id=%d, str_id=%d, vb=%pK\n",
session_id, stream_id, vb);
spin_unlock_irqrestore(&stream->stream_lock, flags);
+ read_unlock_irqrestore(&session->stream_rwlock,
+ rl_flags);
return -EINVAL;
}
msm_vb2 =
@@ -448,7 +453,7 @@ static int msm_vb2_buf_done(struct vb2_v4l2_buffer *vb, int session_id,
rc = -EINVAL;
}
spin_unlock_irqrestore(&stream->stream_lock, flags);
- read_unlock(&session->stream_rwlock);
+ read_unlock_irqrestore(&session->stream_rwlock, rl_flags);
return rc;
}
@@ -459,18 +464,18 @@ long msm_vb2_return_buf_by_idx(int session_id, unsigned int stream_id,
struct msm_session *session;
struct vb2_v4l2_buffer *vb2_v4l2_buf = NULL;
struct msm_vb2_buffer *msm_vb2 = NULL;
- unsigned long flags;
+ unsigned long flags, rl_flags;
long rc = -EINVAL;
session = msm_get_session(session_id);
if (IS_ERR_OR_NULL(session))
return rc;
- read_lock(&session->stream_rwlock);
+ read_lock_irqsave(&session->stream_rwlock, rl_flags);
stream = msm_get_stream(session, stream_id);
if (IS_ERR_OR_NULL(stream)) {
- read_unlock(&session->stream_rwlock);
+ read_unlock_irqrestore(&session->stream_rwlock, rl_flags);
return -EINVAL;
}
@@ -499,14 +504,14 @@ long msm_vb2_return_buf_by_idx(int session_id, unsigned int stream_id,
end:
spin_unlock_irqrestore(&stream->stream_lock, flags);
- read_unlock(&session->stream_rwlock);
+ read_unlock_irqrestore(&session->stream_rwlock, rl_flags);
return rc;
}
EXPORT_SYMBOL(msm_vb2_return_buf_by_idx);
static int msm_vb2_flush_buf(int session_id, unsigned int stream_id)
{
- unsigned long flags;
+ unsigned long flags, rl_flags;
struct msm_vb2_buffer *msm_vb2;
struct msm_stream *stream;
struct msm_session *session;
@@ -516,11 +521,11 @@ static int msm_vb2_flush_buf(int session_id, unsigned int stream_id)
if (IS_ERR_OR_NULL(session))
return -EINVAL;
- read_lock(&session->stream_rwlock);
+ read_lock_irqsave(&session->stream_rwlock, rl_flags);
stream = msm_get_stream(session, stream_id);
if (IS_ERR_OR_NULL(stream)) {
- read_unlock(&session->stream_rwlock);
+ read_unlock_irqrestore(&session->stream_rwlock, rl_flags);
return -EINVAL;
}
@@ -532,7 +537,7 @@ static int msm_vb2_flush_buf(int session_id, unsigned int stream_id)
msm_vb2->in_freeq = 0;
}
spin_unlock_irqrestore(&stream->stream_lock, flags);
- read_unlock(&session->stream_rwlock);
+ read_unlock_irqrestore(&session->stream_rwlock, rl_flags);
return 0;
}
diff --git a/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.c b/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.c
index bc844bb87db5..b1bea12c2cc3 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.c
@@ -1707,6 +1707,10 @@ static long msm_actuator_subdev_do_ioctl(
parg = &actuator_data;
break;
}
+ break;
+ case VIDIOC_MSM_ACTUATOR_CFG:
+ pr_err("%s: invalid cmd 0x%x received\n", __func__, cmd);
+ return -EINVAL;
}
rc = msm_actuator_subdev_ioctl(sd, cmd, parg);
diff --git a/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.c b/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.c
index 7dda92510879..3cb6b55ccc8c 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.c
@@ -403,6 +403,10 @@ static int32_t msm_cci_calc_cmd_len(struct cci_device *cci_dev,
if (cmd->reg_addr + 1 ==
(cmd+1)->reg_addr) {
len += data_len;
+ if (len > cci_dev->payload_size) {
+ len = len - data_len;
+ break;
+ }
*pack += data_len;
} else
break;
diff --git a/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.c b/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.c
index 491b8d31935a..223ddf39dce8 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.c
@@ -152,6 +152,12 @@ static int32_t msm_flash_i2c_write_table(
conf_array.reg_setting = settings->reg_setting_a;
conf_array.size = settings->size;
+ /* Validate the settings size */
+ if ((!conf_array.size) || (conf_array.size > MAX_I2C_REG_SET)) {
+ pr_err("failed: invalid size %d", conf_array.size);
+ return -EINVAL;
+ }
+
return flash_ctrl->flash_i2c_client.i2c_func_tbl->i2c_write_table(
&flash_ctrl->flash_i2c_client, &conf_array);
}
@@ -502,18 +508,42 @@ static int32_t msm_flash_init(
return 0;
}
-#ifdef CONFIG_COMPAT
static int32_t msm_flash_init_prepare(
struct msm_flash_ctrl_t *flash_ctrl,
struct msm_flash_cfg_data_t *flash_data)
{
+#ifdef CONFIG_COMPAT
+ struct msm_flash_cfg_data_t flash_data_k;
+ struct msm_flash_init_info_t flash_init_info;
+ int32_t i = 0;
+
+ if (!is_compat_task()) {
+ /*for 64-bit usecase,it need copy the data to local memory*/
+ flash_data_k.cfg_type = flash_data->cfg_type;
+ for (i = 0; i < MAX_LED_TRIGGERS; i++) {
+ flash_data_k.flash_current[i] =
+ flash_data->flash_current[i];
+ flash_data_k.flash_duration[i] =
+ flash_data->flash_duration[i];
+ }
+
+ flash_data_k.cfg.flash_init_info = &flash_init_info;
+ if (copy_from_user(&flash_init_info,
+ (void __user *)(flash_data->cfg.flash_init_info),
+ sizeof(struct msm_flash_init_info_t))) {
+ pr_err("%s copy_from_user failed %d\n",
+ __func__, __LINE__);
+ return -EFAULT;
+ }
+ return msm_flash_init(flash_ctrl, &flash_data_k);
+ }
+ /*
+ * for 32-bit usecase,it already copy the userspace
+ * data to local memory in msm_flash_subdev_do_ioctl()
+ * so here do not need copy from user
+ */
return msm_flash_init(flash_ctrl, flash_data);
-}
#else
-static int32_t msm_flash_init_prepare(
- struct msm_flash_ctrl_t *flash_ctrl,
- struct msm_flash_cfg_data_t *flash_data)
-{
struct msm_flash_cfg_data_t flash_data_k;
struct msm_flash_init_info_t flash_init_info;
int32_t i = 0;
@@ -528,15 +558,15 @@ static int32_t msm_flash_init_prepare(
flash_data_k.cfg.flash_init_info = &flash_init_info;
if (copy_from_user(&flash_init_info,
- (void *)(flash_data->cfg.flash_init_info),
+ (void __user *)(flash_data->cfg.flash_init_info),
sizeof(struct msm_flash_init_info_t))) {
pr_err("%s copy_from_user failed %d\n",
__func__, __LINE__);
return -EFAULT;
}
return msm_flash_init(flash_ctrl, &flash_data_k);
-}
#endif
+}
static int32_t msm_flash_prepare(
struct msm_flash_ctrl_t *flash_ctrl)
diff --git a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_dt_util.c b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_dt_util.c
index 3f079fe2c173..457bd1730232 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_dt_util.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_dt_util.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -575,6 +575,8 @@ int msm_camera_get_dt_power_setting_data(struct device_node *of_node,
ps[i].seq_val = SENSOR_GPIO_CUSTOM1;
else if (!strcmp(seq_name, "sensor_gpio_custom2"))
ps[i].seq_val = SENSOR_GPIO_CUSTOM2;
+ else if (!strcmp(seq_name, "sensor_gpio_custom3"))
+ ps[i].seq_val = SENSOR_GPIO_CUSTOM3;
else
rc = -EILSEQ;
break;
@@ -1078,6 +1080,27 @@ int msm_camera_init_gpio_pin_tbl(struct device_node *of_node,
rc = 0;
}
+ rc = of_property_read_u32(of_node, "qcom,gpio-custom3", &val);
+ if (rc != -EINVAL) {
+ if (rc < 0) {
+ pr_err("%s:%d read qcom,gpio-custom3 failed rc %d\n",
+ __func__, __LINE__, rc);
+ goto ERROR;
+ } else if (val >= gpio_array_size) {
+ pr_err("%s:%d qcom,gpio-custom3 invalid %d\n",
+ __func__, __LINE__, val);
+ rc = -EINVAL;
+ goto ERROR;
+ }
+ gconf->gpio_num_info->gpio_num[SENSOR_GPIO_CUSTOM3] =
+ gpio_array[val];
+ gconf->gpio_num_info->valid[SENSOR_GPIO_CUSTOM3] = 1;
+ CDBG("%s qcom,gpio-custom3 %d\n", __func__,
+ gconf->gpio_num_info->gpio_num[SENSOR_GPIO_CUSTOM3]);
+ } else {
+ rc = 0;
+ }
+
return rc;
ERROR:
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
index acb697946e18..10f72a2155db 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
@@ -814,6 +814,9 @@ static void sde_hw_rotator_setup_wbengine(struct sde_hw_rotator_context *ctx,
bw /= TRAFFIC_SHAPE_CLKTICK_12MS;
if (bw > 0xFF)
bw = 0xFF;
+ else if (bw == 0)
+ bw = 1;
+
SDE_REGDMA_WRITE(wrptr, ROT_WB_TRAFFIC_SHAPER_WR_CLIENT,
BIT(31) | bw);
SDEROT_DBG("Enable ROT_WB Traffic Shaper:%d\n", bw);
diff --git a/drivers/media/platform/msm/vidc/msm_vdec.c b/drivers/media/platform/msm/vidc/msm_vdec.c
index 28a4006b480c..0f6389370643 100644
--- a/drivers/media/platform/msm/vidc/msm_vdec.c
+++ b/drivers/media/platform/msm/vidc/msm_vdec.c
@@ -1491,6 +1491,7 @@ static int msm_vdec_queue_setup(struct vb2_queue *q,
rc = -EINVAL;
break;
}
+ msm_dcvs_try_enable(inst);
/* Pretend as if FW itself is asking for
* additional buffers.
@@ -1570,9 +1571,10 @@ exit:
return rc;
}
-static inline int set_max_internal_buffers_size(struct msm_vidc_inst *inst)
+static int set_max_internal_buffers_size(struct msm_vidc_inst *inst)
{
int rc = 0;
+ struct msm_vidc_list *buf_list = &inst->scratchbufs;
struct {
enum hal_buffer type;
struct hal_buffer_requirements *req;
@@ -1580,13 +1582,17 @@ static inline int set_max_internal_buffers_size(struct msm_vidc_inst *inst)
} internal_buffers[] = {
{ HAL_BUFFER_INTERNAL_SCRATCH, NULL, 0},
{ HAL_BUFFER_INTERNAL_SCRATCH_1, NULL, 0},
- { HAL_BUFFER_INTERNAL_SCRATCH_2, NULL, 0},
- { HAL_BUFFER_INTERNAL_PERSIST, NULL, 0},
- { HAL_BUFFER_INTERNAL_PERSIST_1, NULL, 0},
};
struct hal_frame_size frame_sz;
int i;
+ mutex_lock(&buf_list->lock);
+ if (!list_empty(&buf_list->list)) {
+ dprintk(VIDC_DBG, "Scratch list already has allocated buf\n");
+ mutex_unlock(&buf_list->lock);
+ return 0;
+ }
+ mutex_unlock(&buf_list->lock);
frame_sz.buffer_type = HAL_BUFFER_INPUT;
frame_sz.width = inst->capability.width.max;
@@ -1612,6 +1618,15 @@ static inline int set_max_internal_buffers_size(struct msm_vidc_inst *inst)
get_buff_req_buffer(inst, internal_buffers[i].type);
internal_buffers[i].size = internal_buffers[i].req ?
internal_buffers[i].req->buffer_size : 0;
+
+ rc = allocate_and_set_internal_bufs(inst,
+ internal_buffers[i].req,
+ &inst->scratchbufs, false);
+ if (rc)
+ goto alloc_fail;
+ dprintk(VIDC_DBG,
+ "Allocated scratch type : %d size to : %zd\n",
+ internal_buffers[i].type, internal_buffers[i].size);
}
frame_sz.buffer_type = HAL_BUFFER_INPUT;
@@ -1624,25 +1639,18 @@ static inline int set_max_internal_buffers_size(struct msm_vidc_inst *inst)
dprintk(VIDC_ERR,
"%s Failed to get back old buf req, %d\n",
__func__, rc);
- return rc;
+ goto alloc_fail;
}
-
dprintk(VIDC_DBG,
"Old buffer reqs, buffer type = %d width = %d, height = %d\n",
frame_sz.buffer_type, frame_sz.width,
frame_sz.height);
- for (i = 0; i < ARRAY_SIZE(internal_buffers); i++) {
- if (internal_buffers[i].req) {
- internal_buffers[i].req->buffer_size =
- internal_buffers[i].size;
- dprintk(VIDC_DBG,
- "Changing buffer type : %d size to : %zd\n",
- internal_buffers[i].type,
- internal_buffers[i].size);
- }
- }
return 0;
+
+alloc_fail:
+ msm_comm_release_scratch_buffers(inst, false);
+ return rc;
}
static inline int start_streaming(struct msm_vidc_inst *inst)
@@ -1653,6 +1661,7 @@ static inline int start_streaming(struct msm_vidc_inst *inst)
struct hal_buffer_size_minimum b;
unsigned int buffer_size;
struct msm_vidc_format *fmt = NULL;
+ bool max_internal_buf = false;
fmt = &inst->fmts[CAPTURE_PORT];
buffer_size = fmt->get_frame_size(0,
@@ -1676,8 +1685,9 @@ static inline int start_streaming(struct msm_vidc_inst *inst)
dprintk(VIDC_ERR, "H/w scaling is not in valid range\n");
return -EINVAL;
}
- if ((inst->flags & VIDC_SECURE) && !inst->in_reconfig &&
- !slave_side_cp) {
+ max_internal_buf = (inst->flags & VIDC_SECURE) && !slave_side_cp
+ && (inst->session_type == MSM_VIDC_DECODER);
+ if (max_internal_buf) {
rc = set_max_internal_buffers_size(inst);
if (rc) {
dprintk(VIDC_ERR,
@@ -1686,7 +1696,7 @@ static inline int start_streaming(struct msm_vidc_inst *inst)
goto fail_start;
}
}
- rc = msm_comm_set_scratch_buffers(inst);
+ rc = msm_comm_set_scratch_buffers(inst, max_internal_buf);
if (rc) {
dprintk(VIDC_ERR,
"Failed to set scratch buffers: %d\n", rc);
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index c5816511e056..e4698e0cdcd8 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -1868,7 +1868,7 @@ static inline int start_streaming(struct msm_vidc_inst *inst)
"Failed to get Buffer Requirements : %d\n", rc);
goto fail_start;
}
- rc = msm_comm_set_scratch_buffers(inst);
+ rc = msm_comm_set_scratch_buffers(inst, false);
if (rc) {
dprintk(VIDC_ERR, "Failed to set scratch buffers: %d\n", rc);
goto fail_start;
diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c
index 7caf61cb6799..de4705c3d2eb 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc.c
@@ -550,7 +550,7 @@ static int __map_and_update_binfo(struct msm_vidc_inst *inst,
binfo->handle[i] = map_buffer(inst, &b->m.planes[i],
get_hal_buffer_type(inst, b));
if (!binfo->handle[i])
- rc = -EINVAL;
+ return -EINVAL;
binfo->mapped[i] = true;
binfo->device_addr[i] = binfo->handle[i]->device_addr +
@@ -670,13 +670,13 @@ int map_and_register_buf(struct msm_vidc_inst *inst, struct v4l2_buffer *b)
rc = __map_and_update_binfo(inst, binfo, b, i);
if (rc)
- goto exit;
+ goto map_err;
/* We maintain one ref count for all planes*/
if (!i && is_dynamic_output_buffer_mode(b, inst)) {
rc = buf_ref_get(inst, binfo);
if (rc < 0)
- goto exit;
+ goto map_err;
}
dprintk(VIDC_DBG,
"%s: [MAP] binfo = %pK, handle[%d] = %pK, device_addr = %pa, fd = %d, offset = %d, mapped = %d\n",
@@ -690,10 +690,14 @@ int map_and_register_buf(struct msm_vidc_inst *inst, struct v4l2_buffer *b)
mutex_unlock(&inst->registeredbufs.lock);
return 0;
+map_err:
+ if (binfo->handle[0] && binfo->mapped[0])
+ msm_comm_smem_free(inst, binfo->handle[0]);
exit:
kfree(binfo);
return rc;
}
+
int unmap_and_deregister_buf(struct msm_vidc_inst *inst,
struct buffer_info *binfo)
{
@@ -822,7 +826,8 @@ int output_buffer_cache_invalidate(struct msm_vidc_inst *inst,
if (inst->session_type == MSM_VIDC_ENCODER &&
!i)
- size = b->m.planes[i].bytesused;
+ size = b->m.planes[i].bytesused +
+ b->m.planes[i].data_offset;
else
size = -1;
@@ -1056,7 +1061,8 @@ int msm_vidc_qbuf(void *instance, struct v4l2_buffer *b)
if (binfo->handle[i] &&
(b->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)) {
if (inst->session_type == MSM_VIDC_DECODER && !i)
- size = b->m.planes[i].bytesused;
+ size = b->m.planes[i].bytesused +
+ b->m.planes[i].data_offset;
else
size = -1;
rc = msm_comm_smem_cache_operations(inst,
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index f85fe6fd3867..1d910f4b235c 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -3342,9 +3342,9 @@ static bool reuse_internal_buffers(struct msm_vidc_inst *inst,
return reused;
}
-static int allocate_and_set_internal_bufs(struct msm_vidc_inst *inst,
+int allocate_and_set_internal_bufs(struct msm_vidc_inst *inst,
struct hal_buffer_requirements *internal_bufreq,
- struct msm_vidc_list *buf_list)
+ struct msm_vidc_list *buf_list, bool set_on_fw)
{
struct msm_smem *handle;
struct internal_buf *binfo;
@@ -3381,11 +3381,13 @@ static int allocate_and_set_internal_bufs(struct msm_vidc_inst *inst,
binfo->handle = handle;
binfo->buffer_type = internal_bufreq->buffer_type;
- rc = set_internal_buf_on_fw(inst, internal_bufreq->buffer_type,
- handle, false);
- if (rc)
- goto fail_set_buffers;
-
+ if (set_on_fw) {
+ rc = set_internal_buf_on_fw(inst,
+ internal_bufreq->buffer_type,
+ handle, false);
+ if (rc)
+ goto fail_set_buffers;
+ }
mutex_lock(&buf_list->lock);
list_add_tail(&binfo->list, &buf_list->list);
mutex_unlock(&buf_list->lock);
@@ -3426,7 +3428,7 @@ static int set_internal_buffers(struct msm_vidc_inst *inst,
return 0;
return allocate_and_set_internal_bufs(inst, internal_buf,
- buf_list);
+ buf_list, true);
}
int msm_comm_try_state(struct msm_vidc_inst *inst, int state)
@@ -3587,39 +3589,6 @@ int msm_vidc_comm_cmd(void *instance, union msm_v4l2_cmd *cmd)
"Failed to flush buffers: %d\n", rc);
}
break;
- case V4L2_DEC_QCOM_CMD_RECONFIG_HINT:
- {
- u32 *ptr = NULL;
- struct hal_buffer_requirements *output_buf;
-
- rc = msm_comm_try_get_bufreqs(inst);
- if (rc) {
- dprintk(VIDC_ERR,
- "Getting buffer requirements failed: %d\n",
- rc);
- break;
- }
-
- output_buf = get_buff_req_buffer(inst,
- msm_comm_get_hal_output_buffer(inst));
- if (output_buf) {
- if (dec) {
- ptr = (u32 *)dec->raw.data;
- ptr[0] = output_buf->buffer_size;
- ptr[1] = output_buf->buffer_count_actual;
- dprintk(VIDC_DBG,
- "Reconfig hint, size is %u, count is %u\n",
- ptr[0], ptr[1]);
- } else {
- dprintk(VIDC_ERR, "Null decoder\n");
- }
- } else {
- dprintk(VIDC_DBG,
- "This output buffer not required, buffer_type: %x\n",
- HAL_BUFFER_OUTPUT);
- }
- break;
- }
default:
dprintk(VIDC_ERR, "Unknown Command %d\n", which_cmd);
rc = -ENOTSUPP;
@@ -4453,15 +4422,15 @@ error:
return rc;
}
-int msm_comm_set_scratch_buffers(struct msm_vidc_inst *inst)
-{
+int msm_comm_set_scratch_buffers(struct msm_vidc_inst *inst,
+ bool max_int_buffer) {
int rc = 0;
if (!inst || !inst->core || !inst->core->device) {
dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
return -EINVAL;
}
- if (msm_comm_release_scratch_buffers(inst, true))
+ if (!max_int_buffer && msm_comm_release_scratch_buffers(inst, true))
dprintk(VIDC_WARN, "Failed to release scratch buffers\n");
rc = set_internal_buffers(inst, HAL_BUFFER_INTERNAL_SCRATCH,
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.h b/drivers/media/platform/msm/vidc/msm_vidc_common.h
index 7d28859c040c..5658df95db26 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.h
@@ -41,9 +41,13 @@ int msm_comm_try_set_prop(struct msm_vidc_inst *inst,
enum hal_property ptype, void *pdata);
int msm_comm_try_get_prop(struct msm_vidc_inst *inst,
enum hal_property ptype, union hal_get_property *hprop);
-int msm_comm_set_scratch_buffers(struct msm_vidc_inst *inst);
+int msm_comm_set_scratch_buffers(struct msm_vidc_inst *inst,
+ bool max_int_buffer);
int msm_comm_set_persist_buffers(struct msm_vidc_inst *inst);
int msm_comm_set_output_buffers(struct msm_vidc_inst *inst);
+int allocate_and_set_internal_bufs(struct msm_vidc_inst *inst,
+ struct hal_buffer_requirements *internal_bufreq,
+ struct msm_vidc_list *buf_list, bool set_on_fw);
int msm_comm_queue_output_buffers(struct msm_vidc_inst *inst);
int msm_comm_qbuf(struct msm_vidc_inst *inst, struct vb2_buffer *vb);
void msm_comm_scale_clocks_and_bus(struct msm_vidc_inst *inst);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_resources.h b/drivers/media/platform/msm/vidc/msm_vidc_resources.h
index 3a329d989918..a1b4ad48b054 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_resources.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_resources.h
@@ -106,6 +106,7 @@ struct clock_info {
u32 count;
bool has_scaling;
bool has_mem_retention;
+ unsigned long rate_on_enable;
};
struct clock_set {
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
index 52b56f615da9..20f02ce46029 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -1574,6 +1574,7 @@ static int __scale_clocks(struct venus_hfi_device *device,
return rc;
}
+
static int venus_hfi_scale_clocks(void *dev, int load,
struct vidc_clk_scale_data *data,
unsigned long instant_bitrate)
@@ -1600,6 +1601,41 @@ exit:
return rc;
}
+static void __save_clock_rate(struct venus_hfi_device *device, bool reset)
+{
+ struct clock_info *cl;
+
+ venus_hfi_for_each_clock(device, cl) {
+ if (cl->has_scaling) {
+ cl->rate_on_enable =
+ reset ? 0 : clk_get_rate(cl->clk);
+ dprintk(VIDC_PROF, "Saved clock %s rate %lu\n",
+ cl->name, cl->rate_on_enable);
+ }
+ }
+}
+
+static void __restore_clock_rate(struct venus_hfi_device *device)
+{
+ struct clock_info *cl;
+
+ venus_hfi_for_each_clock(device, cl) {
+ if (cl->has_scaling && cl->rate_on_enable) {
+ int rc;
+
+ rc = __set_clk_rate(device, cl, cl->rate_on_enable);
+ if (rc)
+ dprintk(VIDC_ERR,
+ "Failed to restore clock %s rate %lu\n",
+ cl->name, cl->rate_on_enable);
+ else
+ dprintk(VIDC_DBG,
+ "Restored clock %s rate %lu\n",
+ cl->name, cl->rate_on_enable);
+ }
+ }
+}
+
/* Writes into cmdq without raising an interrupt */
static int __iface_cmdq_write_relaxed(struct venus_hfi_device *device,
void *pkt, bool *requires_interrupt)
@@ -4316,6 +4352,7 @@ static inline int __suspend(struct venus_hfi_device *device)
goto err_tzbsp_suspend;
}
+ __save_clock_rate(device, false);
__venus_power_off(device, true);
dprintk(VIDC_PROF, "Venus power collapsed\n");
return rc;
@@ -4345,6 +4382,7 @@ static inline int __resume(struct venus_hfi_device *device)
dprintk(VIDC_ERR, "Failed to power on venus\n");
goto err_venus_power_on;
}
+ __restore_clock_rate(device);
/* Reboot the firmware */
rc = __tzbsp_set_video_state(TZBSP_VIDEO_STATE_RESUME);
@@ -4382,6 +4420,7 @@ exit:
err_reset_core:
__tzbsp_set_video_state(TZBSP_VIDEO_STATE_SUSPEND);
err_set_video_state:
+ __save_clock_rate(device, true);
__venus_power_off(device, true);
err_venus_power_on:
dprintk(VIDC_ERR, "Failed to resume from power collapse\n");
@@ -4440,6 +4479,7 @@ fail_protect_mem:
subsystem_put(device->resources.fw.cookie);
device->resources.fw.cookie = NULL;
fail_load_fw:
+ __save_clock_rate(device, true);
__venus_power_off(device, true);
fail_venus_power_on:
fail_init_pkt:
@@ -4461,6 +4501,7 @@ static void __unload_fw(struct venus_hfi_device *device)
__vote_buses(device, NULL, 0);
subsystem_put(device->resources.fw.cookie);
__interface_queues_release(device);
+ __save_clock_rate(device, true);
__venus_power_off(device, false);
device->resources.fw.cookie = NULL;
__deinit_resources(device);
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index 7cdcd69cecf4..7bc5b5ad1122 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -281,6 +281,7 @@ struct qseecom_control {
wait_queue_head_t app_block_wq;
atomic_t qseecom_state;
int is_apps_region_protected;
+ bool smcinvoke_support;
};
struct qseecom_sec_buf_fd_info {
@@ -578,10 +579,12 @@ static int qseecom_scm_call2(uint32_t svc_id, uint32_t tz_cmd_id,
desc.args[1] = req_64bit->sb_ptr;
desc.args[2] = req_64bit->sb_len;
}
+ qseecom.smcinvoke_support = true;
smc_id = TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID;
__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
ret = scm_call2(smc_id, &desc);
if (ret) {
+ qseecom.smcinvoke_support = false;
smc_id = TZ_OS_REGISTER_LISTENER_ID;
__qseecom_reentrancy_check_if_no_app_blocked(
smc_id);
@@ -1006,10 +1009,14 @@ static int qseecom_scm_call2(uint32_t svc_id, uint32_t tz_cmd_id,
struct qseecom_continue_blocked_request_ireq *req =
(struct qseecom_continue_blocked_request_ireq *)
req_buf;
- smc_id = TZ_OS_CONTINUE_BLOCKED_REQUEST_ID;
+ if (qseecom.smcinvoke_support)
+ smc_id =
+ TZ_OS_CONTINUE_BLOCKED_REQUEST_SMCINVOKE_ID;
+ else
+ smc_id = TZ_OS_CONTINUE_BLOCKED_REQUEST_ID;
desc.arginfo =
TZ_OS_CONTINUE_BLOCKED_REQUEST_ID_PARAM_ID;
- desc.args[0] = req->app_id;
+ desc.args[0] = req->app_or_session_id;
ret = scm_call2(smc_id, &desc);
break;
}
@@ -1839,7 +1846,7 @@ static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data,
return ret;
}
-int __qseecom_process_reentrancy_blocked_on_listener(
+static int __qseecom_process_blocked_on_listener_legacy(
struct qseecom_command_scm_resp *resp,
struct qseecom_registered_app_list *ptr_app,
struct qseecom_dev_handle *data)
@@ -1848,9 +1855,8 @@ int __qseecom_process_reentrancy_blocked_on_listener(
int ret = 0;
struct qseecom_continue_blocked_request_ireq ireq;
struct qseecom_command_scm_resp continue_resp;
- sigset_t new_sigset, old_sigset;
- unsigned long flags;
bool found_app = false;
+ unsigned long flags;
if (!resp || !data) {
pr_err("invalid resp or data pointer\n");
@@ -1890,32 +1896,30 @@ int __qseecom_process_reentrancy_blocked_on_listener(
pr_debug("lsntr %d in_use = %d\n",
resp->data, list_ptr->listener_in_use);
ptr_app->blocked_on_listener_id = resp->data;
+
/* sleep until listener is available */
- do {
- qseecom.app_block_ref_cnt++;
- ptr_app->app_blocked = true;
- sigfillset(&new_sigset);
- sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
- mutex_unlock(&app_access_lock);
- do {
- if (!wait_event_freezable(
- list_ptr->listener_block_app_wq,
- !list_ptr->listener_in_use)) {
- break;
- }
- } while (1);
- mutex_lock(&app_access_lock);
- sigprocmask(SIG_SETMASK, &old_sigset, NULL);
- ptr_app->app_blocked = false;
- qseecom.app_block_ref_cnt--;
- } while (list_ptr->listener_in_use == true);
+ qseecom.app_block_ref_cnt++;
+ ptr_app->app_blocked = true;
+ mutex_unlock(&app_access_lock);
+ if (wait_event_freezable(
+ list_ptr->listener_block_app_wq,
+ !list_ptr->listener_in_use)) {
+ pr_err("Interrupted: listener_id %d, app_id %d\n",
+ resp->data, ptr_app->app_id);
+ ret = -ERESTARTSYS;
+ goto exit;
+ }
+ mutex_lock(&app_access_lock);
+ ptr_app->app_blocked = false;
+ qseecom.app_block_ref_cnt--;
+
ptr_app->blocked_on_listener_id = 0;
/* notify the blocked app that listener is available */
pr_warn("Lsntr %d is available, unblock app(%d) %s in TZ\n",
resp->data, data->client.app_id,
data->client.app_name);
ireq.qsee_cmd_id = QSEOS_CONTINUE_BLOCKED_REQ_COMMAND;
- ireq.app_id = data->client.app_id;
+ ireq.app_or_session_id = data->client.app_id;
ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
&ireq, sizeof(ireq),
&continue_resp, sizeof(continue_resp));
@@ -1934,6 +1938,73 @@ exit:
return ret;
}
+static int __qseecom_process_blocked_on_listener_smcinvoke(
+ struct qseecom_command_scm_resp *resp)
+{
+ struct qseecom_registered_listener_list *list_ptr;
+ int ret = 0;
+ struct qseecom_continue_blocked_request_ireq ireq;
+ struct qseecom_command_scm_resp continue_resp;
+ unsigned int session_id;
+
+ if (!resp) {
+ pr_err("invalid resp pointer\n");
+ ret = -EINVAL;
+ goto exit;
+ }
+ session_id = resp->resp_type;
+ list_ptr = __qseecom_find_svc(resp->data);
+ if (!list_ptr) {
+ pr_err("Invalid listener ID\n");
+ ret = -ENODATA;
+ goto exit;
+ }
+ pr_debug("lsntr %d in_use = %d\n",
+ resp->data, list_ptr->listener_in_use);
+ /* sleep until listener is available */
+ qseecom.app_block_ref_cnt++;
+ mutex_unlock(&app_access_lock);
+ if (wait_event_freezable(
+ list_ptr->listener_block_app_wq,
+ !list_ptr->listener_in_use)) {
+ pr_err("Interrupted: listener_id %d, session_id %d\n",
+ resp->data, session_id);
+ ret = -ERESTARTSYS;
+ goto exit;
+ }
+ mutex_lock(&app_access_lock);
+ qseecom.app_block_ref_cnt--;
+
+ /* notify TZ that listener is available */
+ pr_warn("Lsntr %d is available, unblock session(%d) in TZ\n",
+ resp->data, session_id);
+ ireq.qsee_cmd_id = QSEOS_CONTINUE_BLOCKED_REQ_COMMAND;
+ ireq.app_or_session_id = session_id;
+ ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+ &ireq, sizeof(ireq),
+ &continue_resp, sizeof(continue_resp));
+ if (ret) {
+ pr_err("scm_call for continue blocked req for session %d failed, ret %d\n",
+ session_id, ret);
+ goto exit;
+ }
+ resp->result = QSEOS_RESULT_INCOMPLETE;
+exit:
+ return ret;
+}
+
+static int __qseecom_process_reentrancy_blocked_on_listener(
+ struct qseecom_command_scm_resp *resp,
+ struct qseecom_registered_app_list *ptr_app,
+ struct qseecom_dev_handle *data)
+{
+ if (!qseecom.smcinvoke_support)
+ return __qseecom_process_blocked_on_listener_legacy(
+ resp, ptr_app, data);
+ else
+ return __qseecom_process_blocked_on_listener_smcinvoke(
+ resp);
+}
static int __qseecom_reentrancy_process_incomplete_cmd(
struct qseecom_dev_handle *data,
struct qseecom_command_scm_resp *resp)
@@ -4699,18 +4770,15 @@ int qseecom_process_listener_from_smcinvoke(struct scm_desc *desc)
}
resp.result = desc->ret[0]; /*req_cmd*/
- resp.resp_type = desc->ret[1]; /*app_id*/
+ resp.resp_type = desc->ret[1]; /*incomplete:unused;blocked:session_id*/
resp.data = desc->ret[2]; /*listener_id*/
- dummy_private_data.client.app_id = desc->ret[1];
- dummy_app_entry.app_id = desc->ret[1];
-
mutex_lock(&app_access_lock);
ret = __qseecom_process_reentrancy(&resp, &dummy_app_entry,
&dummy_private_data);
mutex_unlock(&app_access_lock);
if (ret)
- pr_err("Failed to req cmd %d lsnr %d on app %d, ret = %d\n",
+ pr_err("Failed on cmd %d for lsnr %d session %d, ret = %d\n",
(int)desc->ret[0], (int)desc->ret[2],
(int)desc->ret[1], ret);
desc->ret[0] = resp.result;
@@ -8462,7 +8530,11 @@ static int qseecom_probe(struct platform_device *pdev)
qseecom.ion_clnt = msm_ion_client_create("qseecom-kernel");
if (IS_ERR_OR_NULL(qseecom.ion_clnt)) {
pr_err("Ion client cannot be created\n");
- rc = -ENOMEM;
+
+ if (qseecom.ion_clnt != ERR_PTR(-EPROBE_DEFER))
+ rc = -ENOMEM;
+ else
+ rc = -EPROBE_DEFER;
goto exit_del_cdev;
}
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 79b5b3504ccd..0da9c5caea13 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -487,12 +487,17 @@ static int mmc_devfreq_set_target(struct device *dev,
struct mmc_devfeq_clk_scaling *clk_scaling;
int err = 0;
int abort;
+ unsigned long pflags = current->flags;
+
+ /* Ensure scaling would happen even in memory pressure conditions */
+ current->flags |= PF_MEMALLOC;
if (!(host && freq)) {
pr_err("%s: unexpected host/freq parameter\n", __func__);
err = -EINVAL;
goto out;
}
+
clk_scaling = &host->clk_scaling;
if (!clk_scaling->enable)
@@ -551,6 +556,7 @@ static int mmc_devfreq_set_target(struct device *dev,
rel_host:
mmc_release_host(host);
out:
+ tsk_restore_flags(current, pflags, PF_MEMALLOC);
return err;
}
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 7173bf56c2c5..a28d6b98a042 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1323,10 +1323,6 @@ int mmc_hs400_to_hs200(struct mmc_card *card)
if (host->caps & MMC_CAP_WAIT_WHILE_BUSY)
send_status = false;
- /* Reduce frequency to HS */
- max_dtr = card->ext_csd.hs_max_dtr;
- mmc_set_clock(host, max_dtr);
-
/* Switch HS400 to HS DDR */
val = EXT_CSD_TIMING_HS;
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
@@ -1337,6 +1333,10 @@ int mmc_hs400_to_hs200(struct mmc_card *card)
mmc_set_timing(host, MMC_TIMING_MMC_DDR52);
+ /* Reduce frequency to HS */
+ max_dtr = card->ext_csd.hs_max_dtr;
+ mmc_set_clock(host, max_dtr);
+
if (!send_status) {
err = mmc_switch_status(card, false);
if (err)
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 7aefeb037ef4..907763ddf234 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -1139,6 +1139,7 @@ int sdhci_msm_execute_tuning(struct sdhci_host *host, u32 opcode)
bool drv_type_changed = false;
struct mmc_card *card = host->mmc->card;
int sts_retry;
+ u8 last_good_phase = 0;
/*
* Tuning is required for SDR104, HS200 and HS400 cards and
@@ -1224,6 +1225,22 @@ retry:
mmc_wait_for_req(mmc, &mrq);
if (card && (cmd.error || data.error)) {
+ /*
+ * Set the dll to last known good phase while sending
+ * status command to ensure that status command won't
+ * fail due to bad phase.
+ */
+ if (tuned_phase_cnt)
+ last_good_phase =
+ tuned_phases[tuned_phase_cnt-1];
+ else if (msm_host->saved_tuning_phase !=
+ INVALID_TUNING_PHASE)
+ last_good_phase = msm_host->saved_tuning_phase;
+
+ rc = msm_config_cm_dll_phase(host, last_good_phase);
+ if (rc)
+ goto kfree;
+
sts_cmd.opcode = MMC_SEND_STATUS;
sts_cmd.arg = card->rca << 16;
sts_cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
@@ -4162,8 +4179,10 @@ static void sdhci_set_default_hw_caps(struct sdhci_msm_host *msm_host,
/* keep track of the value in SDHCI_CAPABILITIES */
msm_host->caps_0 = caps;
- if ((major == 1) && (minor >= 0x6b))
+ if ((major == 1) && (minor >= 0x6b)) {
msm_host->ice_hci_support = true;
+ host->cdr_support = true;
+ }
}
#ifdef CONFIG_MMC_CQ_HCI
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index a5ff9f73dfbc..0033fea0a800 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -3633,7 +3633,7 @@ static void sdhci_cmdq_set_transfer_params(struct mmc_host *mmc)
ctrl |= SDHCI_CTRL_ADMA32;
sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
}
- if (host->ops->toggle_cdr)
+ if (host->ops->toggle_cdr && !host->cdr_support)
host->ops->toggle_cdr(host, false);
}
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 93129b26dc5e..300be7fd0f24 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -564,6 +564,7 @@ struct sdhci_host {
bool runtime_suspended; /* Host is runtime suspended */
bool bus_on; /* Bus power prevents runtime suspend */
bool preset_enabled; /* Preset is enabled */
+ bool cdr_support;
struct mmc_request *mrq; /* Current request */
struct mmc_command *cmd; /* Current command */
diff --git a/drivers/mtd/bcm47xxpart.c b/drivers/mtd/bcm47xxpart.c
index 5abab8800891..9190057535e6 100644
--- a/drivers/mtd/bcm47xxpart.c
+++ b/drivers/mtd/bcm47xxpart.c
@@ -66,11 +66,13 @@ static const char *bcm47xxpart_trx_data_part_name(struct mtd_info *master,
{
uint32_t buf;
size_t bytes_read;
+ int err;
- if (mtd_read(master, offset, sizeof(buf), &bytes_read,
- (uint8_t *)&buf) < 0) {
- pr_err("mtd_read error while parsing (offset: 0x%X)!\n",
- offset);
+ err = mtd_read(master, offset, sizeof(buf), &bytes_read,
+ (uint8_t *)&buf);
+ if (err && !mtd_is_bitflip(err)) {
+ pr_err("mtd_read error while parsing (offset: 0x%X): %d\n",
+ offset, err);
goto out_default;
}
@@ -95,6 +97,7 @@ static int bcm47xxpart_parse(struct mtd_info *master,
int trx_part = -1;
int last_trx_part = -1;
int possible_nvram_sizes[] = { 0x8000, 0xF000, 0x10000, };
+ int err;
/*
* Some really old flashes (like AT45DB*) had smaller erasesize-s, but
@@ -118,8 +121,8 @@ static int bcm47xxpart_parse(struct mtd_info *master,
/* Parse block by block looking for magics */
for (offset = 0; offset <= master->size - blocksize;
offset += blocksize) {
- /* Nothing more in higher memory */
- if (offset >= 0x2000000)
+ /* Nothing more in higher memory on BCM47XX (MIPS) */
+ if (config_enabled(CONFIG_BCM47XX) && offset >= 0x2000000)
break;
if (curr_part >= BCM47XXPART_MAX_PARTS) {
@@ -128,10 +131,11 @@ static int bcm47xxpart_parse(struct mtd_info *master,
}
/* Read beginning of the block */
- if (mtd_read(master, offset, BCM47XXPART_BYTES_TO_READ,
- &bytes_read, (uint8_t *)buf) < 0) {
- pr_err("mtd_read error while parsing (offset: 0x%X)!\n",
- offset);
+ err = mtd_read(master, offset, BCM47XXPART_BYTES_TO_READ,
+ &bytes_read, (uint8_t *)buf);
+ if (err && !mtd_is_bitflip(err)) {
+ pr_err("mtd_read error while parsing (offset: 0x%X): %d\n",
+ offset, err);
continue;
}
@@ -252,10 +256,11 @@ static int bcm47xxpart_parse(struct mtd_info *master,
}
/* Read middle of the block */
- if (mtd_read(master, offset + 0x8000, 0x4,
- &bytes_read, (uint8_t *)buf) < 0) {
- pr_err("mtd_read error while parsing (offset: 0x%X)!\n",
- offset);
+ err = mtd_read(master, offset + 0x8000, 0x4, &bytes_read,
+ (uint8_t *)buf);
+ if (err && !mtd_is_bitflip(err)) {
+ pr_err("mtd_read error while parsing (offset: 0x%X): %d\n",
+ offset, err);
continue;
}
@@ -275,10 +280,11 @@ static int bcm47xxpart_parse(struct mtd_info *master,
}
offset = master->size - possible_nvram_sizes[i];
- if (mtd_read(master, offset, 0x4, &bytes_read,
- (uint8_t *)buf) < 0) {
- pr_err("mtd_read error while reading at offset 0x%X!\n",
- offset);
+ err = mtd_read(master, offset, 0x4, &bytes_read,
+ (uint8_t *)buf);
+ if (err && !mtd_is_bitflip(err)) {
+ pr_err("mtd_read error while reading (offset 0x%X): %d\n",
+ offset, err);
continue;
}
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index 37e4135ab213..64d6f053c2a5 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -1057,6 +1057,13 @@ static int spansion_quad_enable(struct spi_nor *nor)
return -EINVAL;
}
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret) {
+ dev_err(nor->dev,
+ "timeout while writing configuration register\n");
+ return ret;
+ }
+
/* read back and check it */
ret = read_cr(nor);
if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) {
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index 5e6238e0b2bd..75e6e7e6baed 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -2732,8 +2732,10 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
/* Flush Tx queues */
ret = xgbe_flush_tx_queues(pdata);
- if (ret)
+ if (ret) {
+ netdev_err(pdata->netdev, "error flushing TX queues\n");
return ret;
+ }
/*
* Initialize DMA related features
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 865b7e0b133b..64034ff081a0 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -877,7 +877,9 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
DBGPR("-->xgbe_start\n");
- hw_if->init(pdata);
+ ret = hw_if->init(pdata);
+ if (ret)
+ return ret;
ret = phy_if->phy_start(pdata);
if (ret)
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index b56c9c581359..70da30095b89 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -255,15 +255,16 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
while (ring->start != ring->end) {
int slot_idx = ring->start % BGMAC_TX_RING_SLOTS;
struct bgmac_slot_info *slot = &ring->slots[slot_idx];
- u32 ctl1;
+ u32 ctl0, ctl1;
int len;
if (slot_idx == empty_slot)
break;
+ ctl0 = le32_to_cpu(ring->cpu_base[slot_idx].ctl0);
ctl1 = le32_to_cpu(ring->cpu_base[slot_idx].ctl1);
len = ctl1 & BGMAC_DESC_CTL1_LEN;
- if (ctl1 & BGMAC_DESC_CTL0_SOF)
+ if (ctl0 & BGMAC_DESC_CTL0_SOF)
/* Unmap no longer used buffer */
dma_unmap_single(dma_dev, slot->dma_addr, len,
DMA_TO_DEVICE);
@@ -469,6 +470,11 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
len -= ETH_FCS_LEN;
skb = build_skb(buf, BGMAC_RX_ALLOC_SIZE);
+ if (unlikely(!skb)) {
+ bgmac_err(bgmac, "build_skb failed\n");
+ put_page(virt_to_head_page(buf));
+ break;
+ }
skb_put(skb, BGMAC_RX_FRAME_OFFSET +
BGMAC_RX_BUF_OFFSET + len);
skb_pull(skb, BGMAC_RX_FRAME_OFFSET +
@@ -1302,7 +1308,8 @@ static int bgmac_open(struct net_device *net_dev)
phy_start(bgmac->phy_dev);
- netif_carrier_on(net_dev);
+ netif_start_queue(net_dev);
+
return 0;
}
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 1795c935ff02..7b8638ddb673 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1052,7 +1052,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
err:
spin_unlock_bh(&adapter->mcc_lock);
- if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
+ if (base_status(status) == MCC_STATUS_UNAUTHORIZED_REQUEST)
status = -EPERM;
return status;
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 6a061f17a44f..4cd2a7d0124f 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -2939,7 +2939,7 @@ static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
size, GFAR_RXB_TRUESIZE);
/* try reuse page */
- if (unlikely(page_count(page) != 1))
+ if (unlikely(page_count(page) != 1 || page_is_pfmemalloc(page)))
return false;
/* change offset to the other half */
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index f9e4988ea30e..2f9b12cf9ee5 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1602,8 +1602,11 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
netdev->netdev_ops = &ibmveth_netdev_ops;
netdev->ethtool_ops = &netdev_ethtool_ops;
SET_NETDEV_DEV(netdev, &dev->dev);
- netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
- NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ netdev->hw_features = NETIF_F_SG;
+ if (vio_get_attribute(dev, "ibm,illan-options", NULL) != NULL) {
+ netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_RXCSUM;
+ }
netdev->features |= netdev->hw_features;
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index d74f5f4e5782..07eabf72c480 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -900,10 +900,10 @@ static void korina_restart_task(struct work_struct *work)
DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR,
&lp->rx_dma_regs->dmasm);
- korina_free_ring(dev);
-
napi_disable(&lp->napi);
+ korina_free_ring(dev);
+
if (korina_init(dev) < 0) {
printk(KERN_ERR "%s: cannot restart device\n", dev->name);
return;
@@ -1064,12 +1064,12 @@ static int korina_close(struct net_device *dev)
tmp = tmp | DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR;
writel(tmp, &lp->rx_dma_regs->dmasm);
- korina_free_ring(dev);
-
napi_disable(&lp->napi);
cancel_work_sync(&lp->restart_task);
+ korina_free_ring(dev);
+
free_irq(lp->rx_irq, dev);
free_irq(lp->tx_irq, dev);
free_irq(lp->ovr_irq, dev);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 71ec9cb08e06..15056f06754a 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2446,7 +2446,7 @@ static void mvneta_start_dev(struct mvneta_port *pp)
mvneta_port_enable(pp);
/* Enable polling on the port */
- for_each_present_cpu(cpu) {
+ for_each_online_cpu(cpu) {
struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
napi_enable(&port->napi);
@@ -2472,7 +2472,7 @@ static void mvneta_stop_dev(struct mvneta_port *pp)
phy_stop(pp->phy_dev);
- for_each_present_cpu(cpu) {
+ for_each_online_cpu(cpu) {
struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
napi_disable(&port->napi);
@@ -2902,13 +2902,11 @@ err_cleanup_rxqs:
static int mvneta_stop(struct net_device *dev)
{
struct mvneta_port *pp = netdev_priv(dev);
- int cpu;
mvneta_stop_dev(pp);
mvneta_mdio_remove(pp);
unregister_cpu_notifier(&pp->cpu_notifier);
- for_each_present_cpu(cpu)
- smp_call_function_single(cpu, mvneta_percpu_disable, pp, true);
+ on_each_cpu(mvneta_percpu_disable, pp, true);
free_percpu_irq(dev->irq, pp->ports);
mvneta_cleanup_rxqs(pp);
mvneta_cleanup_txqs(pp);
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 603d1c3d3b2e..ff77b8b608bd 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -542,8 +542,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
break;
case MLX4_EVENT_TYPE_SRQ_LIMIT:
- mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n",
- __func__);
+ mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT. srq_no=0x%x, eq 0x%x\n",
+ __func__, be32_to_cpu(eqe->event.srq.srqn),
+ eq->eqn);
case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
if (mlx4_is_master(dev)) {
/* forward only to slave owning the SRQ */
@@ -558,15 +559,19 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
eq->eqn, eq->cons_index, ret);
break;
}
- mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n",
- __func__, slave,
- be32_to_cpu(eqe->event.srq.srqn),
- eqe->type, eqe->subtype);
+ if (eqe->type ==
+ MLX4_EVENT_TYPE_SRQ_CATAS_ERROR)
+ mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n",
+ __func__, slave,
+ be32_to_cpu(eqe->event.srq.srqn),
+ eqe->type, eqe->subtype);
if (!ret && slave != dev->caps.function) {
- mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n",
- __func__, eqe->type,
- eqe->subtype, slave);
+ if (eqe->type ==
+ MLX4_EVENT_TYPE_SRQ_CATAS_ERROR)
+ mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n",
+ __func__, eqe->type,
+ eqe->subtype, slave);
mlx4_slave_event(dev, slave, eqe);
break;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 1e611980cf99..f5c1f4acc57b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -153,8 +153,9 @@ static struct mlx5_profile profile[] = {
},
};
-#define FW_INIT_TIMEOUT_MILI 2000
-#define FW_INIT_WAIT_MS 2
+#define FW_INIT_TIMEOUT_MILI 2000
+#define FW_INIT_WAIT_MS 2
+#define FW_PRE_INIT_TIMEOUT_MILI 10000
static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili)
{
@@ -934,6 +935,15 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
*/
dev->state = MLX5_DEVICE_STATE_UP;
+ /* wait for firmware to accept initialization segments configurations
+ */
+ err = wait_fw_init(dev, FW_PRE_INIT_TIMEOUT_MILI);
+ if (err) {
+ dev_err(&dev->pdev->dev, "Firmware over %d MS in pre-initializing state, aborting\n",
+ FW_PRE_INIT_TIMEOUT_MILI);
+ goto out;
+ }
+
err = mlx5_cmd_init(dev);
if (err) {
dev_err(&pdev->dev, "Failed initializing command interface, aborting\n");
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 1e61d4da72db..585e90f8341d 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -221,18 +221,6 @@ static void ravb_ring_free(struct net_device *ndev, int q)
int ring_size;
int i;
- /* Free RX skb ringbuffer */
- if (priv->rx_skb[q]) {
- for (i = 0; i < priv->num_rx_ring[q]; i++)
- dev_kfree_skb(priv->rx_skb[q][i]);
- }
- kfree(priv->rx_skb[q]);
- priv->rx_skb[q] = NULL;
-
- /* Free aligned TX buffers */
- kfree(priv->tx_align[q]);
- priv->tx_align[q] = NULL;
-
if (priv->rx_ring[q]) {
for (i = 0; i < priv->num_rx_ring[q]; i++) {
struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i];
@@ -261,6 +249,18 @@ static void ravb_ring_free(struct net_device *ndev, int q)
priv->tx_ring[q] = NULL;
}
+ /* Free RX skb ringbuffer */
+ if (priv->rx_skb[q]) {
+ for (i = 0; i < priv->num_rx_ring[q]; i++)
+ dev_kfree_skb(priv->rx_skb[q][i]);
+ }
+ kfree(priv->rx_skb[q]);
+ priv->rx_skb[q] = NULL;
+
+ /* Free aligned TX buffers */
+ kfree(priv->tx_align[q]);
+ priv->tx_align[q] = NULL;
+
/* Free TX skb ringbuffer.
* SKBs are freed by ravb_tx_free() call above.
*/
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index d790cb8d9db3..8e832ba8ab24 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -2796,6 +2796,11 @@ const struct efx_nic_type falcon_a1_nic_type = {
.timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH,
.offload_features = NETIF_F_IP_CSUM,
.mcdi_max_ver = -1,
+#ifdef CONFIG_SFC_SRIOV
+ .vswitching_probe = efx_port_dummy_op_int,
+ .vswitching_restore = efx_port_dummy_op_int,
+ .vswitching_remove = efx_port_dummy_op_void,
+#endif
};
const struct efx_nic_type falcon_b0_nic_type = {
@@ -2897,4 +2902,9 @@ const struct efx_nic_type falcon_b0_nic_type = {
.offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE,
.mcdi_max_ver = -1,
.max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS,
+#ifdef CONFIG_SFC_SRIOV
+ .vswitching_probe = efx_port_dummy_op_int,
+ .vswitching_restore = efx_port_dummy_op_int,
+ .vswitching_remove = efx_port_dummy_op_void,
+#endif
};
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index d2701c53ed68..ebec2dceff45 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -822,8 +822,6 @@ static int marvell_read_status(struct phy_device *phydev)
phydev->lp_advertising = mii_stat1000_to_ethtool_lpa_t(lpagb) |
mii_lpa_to_ethtool_lpa_t(lpa);
- lpa &= adv;
-
if (status & MII_M1011_PHY_STATUS_FULLDUPLEX)
phydev->duplex = DUPLEX_FULL;
else
diff --git a/drivers/net/phy/mdio-bcm-iproc.c b/drivers/net/phy/mdio-bcm-iproc.c
index c0b4e65267af..46fe1ae919a3 100644
--- a/drivers/net/phy/mdio-bcm-iproc.c
+++ b/drivers/net/phy/mdio-bcm-iproc.c
@@ -81,8 +81,6 @@ static int iproc_mdio_read(struct mii_bus *bus, int phy_id, int reg)
if (rc)
return rc;
- iproc_mdio_config_clk(priv->base);
-
/* Prepare the read operation */
cmd = (MII_DATA_TA_VAL << MII_DATA_TA_SHIFT) |
(reg << MII_DATA_RA_SHIFT) |
@@ -112,8 +110,6 @@ static int iproc_mdio_write(struct mii_bus *bus, int phy_id,
if (rc)
return rc;
- iproc_mdio_config_clk(priv->base);
-
/* Prepare the write operation */
cmd = (MII_DATA_TA_VAL << MII_DATA_TA_SHIFT) |
(reg << MII_DATA_RA_SHIFT) |
@@ -163,6 +159,8 @@ static int iproc_mdio_probe(struct platform_device *pdev)
bus->read = iproc_mdio_read;
bus->write = iproc_mdio_write;
+ iproc_mdio_config_clk(priv->base);
+
rc = of_mdiobus_register(bus, pdev->dev.of_node);
if (rc) {
dev_err(&pdev->dev, "MDIO bus registration failed\n");
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 7f7c87762bc6..8dfc75250583 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -47,8 +47,16 @@ module_param(gso, bool, 0444);
*/
DECLARE_EWMA(pkt_len, 1, 64)
+/* With mergeable buffers we align buffer address and use the low bits to
+ * encode its true size. Buffer size is up to 1 page so we need to align to
+ * square root of page size to ensure we reserve enough bits to encode the true
+ * size.
+ */
+#define MERGEABLE_BUFFER_MIN_ALIGN_SHIFT ((PAGE_SHIFT + 1) / 2)
+
/* Minimum alignment for mergeable packet buffers. */
-#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256)
+#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, \
+ 1 << MERGEABLE_BUFFER_MIN_ALIGN_SHIFT)
#define VIRTNET_DRIVER_VERSION "1.0.0"
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 9a986ccd42e5..dab3bf6649e6 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2240,7 +2240,7 @@ static void vxlan_cleanup(unsigned long arg)
= container_of(p, struct vxlan_fdb, hlist);
unsigned long timeout;
- if (f->state & NUD_PERMANENT)
+ if (f->state & (NUD_PERMANENT | NUD_NOARP))
continue;
timeout = f->used + vxlan->cfg.age_interval * HZ;
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 2217e0cae4da..7945b1cd6244 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -311,6 +311,14 @@ config CNSS_CRYPTO
from cnss platform driver. This crypto APIs used to generate cipher
key and add support for the WLAN driver module security protocol.
+config CNSS_QCA6290
+ bool "Enable CNSS QCA6290 chipset specific changes"
+ ---help---
+ This enables the changes from WLAN host driver that are specific to
+ CNSS QCA6290 chipset.
+ These changes are needed to support the new hardware architecture
+ for CNSS QCA6290 chipset.
+
source "drivers/net/wireless/ath/Kconfig"
source "drivers/net/wireless/b43/Kconfig"
source "drivers/net/wireless/b43legacy/Kconfig"
@@ -332,6 +340,7 @@ source "drivers/net/wireless/mwifiex/Kconfig"
source "drivers/net/wireless/cw1200/Kconfig"
source "drivers/net/wireless/rsi/Kconfig"
source "drivers/net/wireless/cnss/Kconfig"
+source "drivers/net/wireless/cnss2/Kconfig"
source "drivers/net/wireless/cnss_genl/Kconfig"
source "drivers/net/wireless/cnss_utils/Kconfig"
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 704a976bdfb5..f23a2fbc3afa 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -64,6 +64,7 @@ obj-$(CONFIG_RSI_91X) += rsi/
obj-$(CONFIG_WCNSS_CORE) += wcnss/
obj-$(CONFIG_CNSS) += cnss/
+obj-$(CONFIG_CNSS2) += cnss2/
obj-$(CONFIG_WCNSS_MEM_PRE_ALLOC) += cnss_prealloc/
obj-$(CONFIG_CNSS_CRYPTO) += cnss_crypto/
obj-$(CONFIG_CNSS_GENL) += cnss_genl/
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 6906bddb229f..10b33840e5e5 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -1886,6 +1886,12 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
goto err_wmi_detach;
}
+ /* If firmware indicates Full Rx Reorder support it must be used in a
+ * slightly different manner. Let HTT code know.
+ */
+ ar->htt.rx_ring.in_ord_rx = !!(test_bit(WMI_SERVICE_RX_FULL_REORDER,
+ ar->wmi.svc_map));
+
status = ath10k_htt_rx_alloc(&ar->htt);
if (status) {
ath10k_err(ar, "failed to alloc htt rx: %d\n", status);
@@ -1997,12 +2003,6 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
}
}
- /* If firmware indicates Full Rx Reorder support it must be used in a
- * slightly different manner. Let HTT code know.
- */
- ar->htt.rx_ring.in_ord_rx = !!(test_bit(WMI_SERVICE_RX_FULL_REORDER,
- ar->wmi.svc_map));
-
status = ath10k_htt_rx_ring_refill(ar);
if (status) {
ath10k_err(ar, "failed to refill htt rx ring: %d\n", status);
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index e430bab869a8..cce931e51d7e 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -667,6 +667,36 @@ ath10k_mac_get_any_chandef_iter(struct ieee80211_hw *hw,
*def = &conf->def;
}
+static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr)
+{
+ int ret;
+ unsigned long time_left;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ret = ath10k_wmi_peer_delete(ar, vdev_id, addr);
+ if (ret)
+ return ret;
+
+ ret = ath10k_wait_for_peer_deleted(ar, vdev_id, addr);
+ if (ret)
+ return ret;
+
+ if (QCA_REV_WCN3990(ar)) {
+ time_left = wait_for_completion_timeout(&ar->peer_delete_done,
+ 50 * HZ);
+
+ if (time_left == 0) {
+ ath10k_warn(ar, "Timeout in receiving peer delete response\n");
+ return -ETIMEDOUT;
+ }
+ }
+
+ ar->num_peers--;
+
+ return 0;
+}
+
static int ath10k_peer_create(struct ath10k *ar,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -711,7 +741,7 @@ static int ath10k_peer_create(struct ath10k *ar,
spin_unlock_bh(&ar->data_lock);
ath10k_warn(ar, "failed to find peer %pM on vdev %i after creation\n",
addr, vdev_id);
- ath10k_wmi_peer_delete(ar, vdev_id, addr);
+ ath10k_peer_delete(ar, vdev_id, addr);
return -ENOENT;
}
@@ -779,36 +809,6 @@ static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value)
return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value);
}
-static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr)
-{
- int ret;
- unsigned long time_left;
-
- lockdep_assert_held(&ar->conf_mutex);
-
- ret = ath10k_wmi_peer_delete(ar, vdev_id, addr);
- if (ret)
- return ret;
-
- ret = ath10k_wait_for_peer_deleted(ar, vdev_id, addr);
- if (ret)
- return ret;
-
- if (QCA_REV_WCN3990(ar)) {
- time_left = wait_for_completion_timeout(&ar->peer_delete_done,
- 50 * HZ);
-
- if (time_left == 0) {
- ath10k_warn(ar, "Timeout in receiving peer delete response\n");
- return -ETIMEDOUT;
- }
- }
-
- ar->num_peers--;
-
- return 0;
-}
-
static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id)
{
struct ath10k_peer *peer, *tmp;
@@ -5094,7 +5094,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
err_peer_delete:
if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
arvif->vdev_type == WMI_VDEV_TYPE_IBSS)
- ath10k_wmi_peer_delete(ar, arvif->vdev_id, vif->addr);
+ ath10k_peer_delete(ar, arvif->vdev_id, vif->addr);
err_vdev_delete:
ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
@@ -5150,8 +5150,8 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
- ret = ath10k_wmi_peer_delete(arvif->ar, arvif->vdev_id,
- vif->addr);
+ ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id,
+ vif->addr);
if (ret)
ath10k_warn(ar, "failed to submit AP/IBSS self-peer removal on vdev %i: %d\n",
arvif->vdev_id, ret);
@@ -7168,12 +7168,25 @@ ath10k_mac_update_vif_chan(struct ath10k *ar,
if (WARN_ON(!arvif->is_up))
continue;
+ if (QCA_REV_WCN3990(ar)) {
+ /* In the case of wcn3990 WLAN module we send
+ * vdev restart only, no need to send vdev down.
+ */
- ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
- if (ret) {
- ath10k_warn(ar, "failed to down vdev %d: %d\n",
- arvif->vdev_id, ret);
- continue;
+ ret = ath10k_vdev_restart(arvif, &vifs[i].new_ctx->def);
+ if (ret) {
+ ath10k_warn(ar,
+ "failed to restart vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ continue;
+ }
+ } else {
+ ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
+ if (ret) {
+ ath10k_warn(ar, "failed to down vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ continue;
+ }
}
}
@@ -7204,11 +7217,17 @@ ath10k_mac_update_vif_chan(struct ath10k *ar,
ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n",
ret);
- ret = ath10k_vdev_restart(arvif, &vifs[i].new_ctx->def);
- if (ret) {
- ath10k_warn(ar, "failed to restart vdev %d: %d\n",
- arvif->vdev_id, ret);
- continue;
+ if (!QCA_REV_WCN3990(ar)) {
+ /* In case of other than wcn3990 WLAN module we
+ * send vdev down and vdev restart to the firmware.
+ */
+
+ ret = ath10k_vdev_restart(arvif, &vifs[i].new_ctx->def);
+ if (ret) {
+ ath10k_warn(ar, "failed to restart vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ continue;
+ }
}
ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
index 9406b6f71a6b..75e81991913a 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.c
+++ b/drivers/net/wireless/ath/ath10k/snoc.c
@@ -107,8 +107,8 @@ static struct ce_attr host_ce_config_wlan[] = {
{
.flags = CE_ATTR_FLAGS,
.src_nentries = 0,
- .src_sz_max = 512,
- .dest_nentries = 512,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
.recv_cb = ath10k_snoc_htt_rx_cb,
},
diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h
index 129859255295..c3100fcd80f2 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-ops.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h
@@ -31,6 +31,8 @@ struct wmi_ops {
struct wmi_mgmt_rx_ev_arg *arg);
int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb,
struct wmi_ch_info_ev_arg *arg);
+ int (*pull_peer_delete_resp)(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_peer_delete_resp_ev_arg *arg);
int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb,
struct wmi_vdev_start_ev_arg *arg);
int (*pull_peer_kick)(struct ath10k *ar, struct sk_buff *skb,
@@ -246,6 +248,16 @@ ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb,
}
static inline int
+ath10k_wmi_pull_peer_delete_resp(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_peer_delete_resp_ev_arg *arg)
+{
+ if (!ar->wmi.ops->pull_peer_delete_resp)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_peer_delete_resp(ar, skb, arg);
+}
+
+static inline int
ath10k_wmi_pull_ch_info(struct ath10k *ar, struct sk_buff *skb,
struct wmi_ch_info_ev_arg *arg)
{
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index 888ab5ad37f1..07b15f4c1db4 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -412,15 +412,6 @@ static int ath10k_wmi_tlv_event_tx_pause(struct ath10k *ar,
return 0;
}
-static int ath10k_wmi_tlv_event_peer_delete_resp(struct ath10k *ar,
- struct sk_buff *skb)
-{
- ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TLV_PEER_DELETE_RESP_EVENTID\n");
- complete(&ar->peer_delete_done);
-
- return 0;
-}
-
/***********/
/* TLV ops */
/***********/
@@ -657,6 +648,34 @@ static int ath10k_wmi_tlv_op_pull_mgmt_rx_ev(struct ath10k *ar,
return 0;
}
+static int ath10k_wmi_tlv_op_pull_peer_delete_ev(
+ struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_peer_delete_resp_ev_arg *arg)
+{
+ const void **tb;
+ const struct wmi_peer_delete_resp_ev_arg *ev;
+ int ret;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_PEER_DELETE_RESP_EVENT];
+ if (!ev) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ arg->vdev_id = ev->vdev_id;
+ arg->peer_addr = ev->peer_addr;
+
+ kfree(tb);
+ return 0;
+}
+
static int ath10k_wmi_tlv_op_pull_ch_info_ev(struct ath10k *ar,
struct sk_buff *skb,
struct wmi_ch_info_ev_arg *arg)
@@ -3620,6 +3639,7 @@ static const struct wmi_ops wmi_tlv_ops = {
.pull_scan = ath10k_wmi_tlv_op_pull_scan_ev,
.pull_mgmt_rx = ath10k_wmi_tlv_op_pull_mgmt_rx_ev,
.pull_ch_info = ath10k_wmi_tlv_op_pull_ch_info_ev,
+ .pull_peer_delete_resp = ath10k_wmi_tlv_op_pull_peer_delete_ev,
.pull_vdev_start = ath10k_wmi_tlv_op_pull_vdev_start_ev,
.pull_peer_kick = ath10k_wmi_tlv_op_pull_peer_kick_ev,
.pull_swba = ath10k_wmi_tlv_op_pull_swba_ev,
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
index 86b24c24d9f1..500d1d8f441f 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
@@ -898,6 +898,7 @@ enum wmi_tlv_tag {
WMI_TLV_TAG_STRUCT_HL_1_0_SVC_OFFSET = 176,
WMI_TLV_TAG_STRUCT_MGMT_TX_CMD = 0x1A6,
+ WMI_TLV_TAG_STRUCT_PEER_DELETE_RESP_EVENT = 0x1C3,
WMI_TLV_TAG_MAX
};
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index d60e7fbb7e74..d6ec0de63582 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -2269,6 +2269,24 @@ static bool ath10k_wmi_rx_is_decrypted(struct ath10k *ar,
return true;
}
+int ath10k_wmi_tlv_event_peer_delete_resp(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ int ret;
+ struct wmi_peer_delete_resp_ev_arg arg = {};
+
+ ret = ath10k_wmi_pull_peer_delete_resp(ar, skb, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse peer delete resp: %d\n", ret);
+ dev_kfree_skb(skb);
+ return ret;
+ }
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TLV_PEER_DELETE_RESP_EVENTID\n");
+ complete(&ar->peer_delete_done);
+
+ return 0;
+}
+
int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_mgmt_rx_ev_arg arg = {};
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 9bd374910379..7ae07a505c59 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -6234,6 +6234,11 @@ struct wmi_scan_ev_arg {
__le32 vdev_id;
};
+struct wmi_peer_delete_resp_ev_arg {
+ __le32 vdev_id;
+ struct wmi_mac_addr peer_addr;
+};
+
struct wmi_mgmt_rx_ev_arg {
__le32 channel;
__le32 snr;
@@ -6603,6 +6608,8 @@ void ath10k_wmi_put_wmi_channel(struct wmi_channel *ch,
int ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg *arg);
int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb);
+int ath10k_wmi_tlv_event_peer_delete_resp(struct ath10k *ar,
+ struct sk_buff *skb);
int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb);
void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb);
void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb);
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 63bb7686b811..94861020af12 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -960,7 +960,7 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
wil_hex_dump_misc("mgmt tx frame ", DUMP_PREFIX_OFFSET, 16, 1, buf,
len, true);
- if (len < sizeof(struct ieee80211_mgmt))
+ if (len < sizeof(struct ieee80211_hdr_3addr))
return -EINVAL;
cmd = kmalloc(sizeof(*cmd) + len, GFP_KERNEL);
diff --git a/drivers/net/wireless/ath/wil6210/sysfs.c b/drivers/net/wireless/ath/wil6210/sysfs.c
index b4c4d09011b4..b91bf51be767 100644
--- a/drivers/net/wireless/ath/wil6210/sysfs.c
+++ b/drivers/net/wireless/ath/wil6210/sysfs.c
@@ -291,6 +291,8 @@ int wil6210_sysfs_init(struct wil6210_priv *wil)
return err;
}
+ kobject_uevent(&dev->kobj, KOBJ_CHANGE);
+
return 0;
}
@@ -299,4 +301,5 @@ void wil6210_sysfs_remove(struct wil6210_priv *wil)
struct device *dev = wil_to_dev(wil);
sysfs_remove_group(&dev->kobj, &wil6210_attribute_group);
+ kobject_uevent(&dev->kobj, KOBJ_CHANGE);
}
diff --git a/drivers/net/wireless/cnss/Kconfig b/drivers/net/wireless/cnss/Kconfig
index 8946f65df716..6faf9f1ef5d0 100644
--- a/drivers/net/wireless/cnss/Kconfig
+++ b/drivers/net/wireless/cnss/Kconfig
@@ -56,6 +56,9 @@ config CLD_LL_CORE
select WEXT_PRIV
select WEXT_SPY
select WIRELESS_EXT
+ select CRYPTO
+ select CRYPTO_HASH
+ select CRYPTO_BLKCIPHER
---help---
This section contains the necessary modules needed to enable the
core WLAN driver for Qualcomm QCA6174 chipset.
@@ -73,7 +76,7 @@ config CNSS_SECURE_FW
config BUS_AUTO_SUSPEND
bool "Enable/Disable Runtime PM support for PCIe based WLAN Drivers"
- depends on CNSS
+ depends on CNSS || CNSS2
depends on PCI
---help---
Runtime Power Management is supported for PCIe based WLAN Drivers.
diff --git a/drivers/net/wireless/cnss2/Kconfig b/drivers/net/wireless/cnss2/Kconfig
new file mode 100644
index 000000000000..85d2a7b30a84
--- /dev/null
+++ b/drivers/net/wireless/cnss2/Kconfig
@@ -0,0 +1,17 @@
+config CNSS2
+ tristate "CNSS2 Platform Driver for Wi-Fi Module"
+ depends on !CNSS && PCI_MSM
+ ---help---
+ This module adds the support for Connectivity Subsystem (CNSS) used
+ for PCIe based Wi-Fi devices with QCA6174/QCA6290 chipsets.
+ This driver also adds support to integrate WLAN module to subsystem
+ restart framework.
+
+config CNSS2_DEBUG
+ bool "CNSS2 Platform Driver Debug Support"
+ depends on CNSS2
+ ---help---
+ This option is to enable CNSS2 platform driver debug support which
+ primarily includes providing additional verbose logs for certain
+ features, enabling kernel panic for certain cases to aid the
+ debugging, and enabling any other debug mechanisms.
diff --git a/drivers/net/wireless/cnss2/Makefile b/drivers/net/wireless/cnss2/Makefile
new file mode 100644
index 000000000000..9d383c8daa43
--- /dev/null
+++ b/drivers/net/wireless/cnss2/Makefile
@@ -0,0 +1,9 @@
+obj-$(CONFIG_CNSS2) += cnss2.o
+
+cnss2-y := main.o
+cnss2-y += debug.o
+cnss2-y += pci.o
+cnss2-y += power.o
+cnss2-y += qmi.o
+cnss2-y += utils.o
+cnss2-y += wlan_firmware_service_v01.o
diff --git a/drivers/net/wireless/cnss2/debug.c b/drivers/net/wireless/cnss2/debug.c
new file mode 100644
index 000000000000..360ab31c61dd
--- /dev/null
+++ b/drivers/net/wireless/cnss2/debug.c
@@ -0,0 +1,174 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/err.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include "main.h"
+#include "debug.h"
+
+#define CNSS_IPC_LOG_PAGES 32
+
+void *cnss_ipc_log_context;
+
+static int cnss_pin_connect_show(struct seq_file *s, void *data)
+{
+ struct cnss_plat_data *cnss_priv = s->private;
+
+ seq_puts(s, "Pin connect results\n");
+ seq_printf(s, "FW power pin result: %04x\n",
+ cnss_priv->pin_result.fw_pwr_pin_result);
+ seq_printf(s, "FW PHY IO pin result: %04x\n",
+ cnss_priv->pin_result.fw_phy_io_pin_result);
+ seq_printf(s, "FW RF pin result: %04x\n",
+ cnss_priv->pin_result.fw_rf_pin_result);
+ seq_printf(s, "Host pin result: %04x\n",
+ cnss_priv->pin_result.host_pin_result);
+ seq_puts(s, "\n");
+
+ return 0;
+}
+
+static int cnss_pin_connect_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, cnss_pin_connect_show, inode->i_private);
+}
+
+static const struct file_operations cnss_pin_connect_fops = {
+ .read = seq_read,
+ .release = single_release,
+ .open = cnss_pin_connect_open,
+ .owner = THIS_MODULE,
+ .llseek = seq_lseek,
+};
+
+static int cnss_stats_show_state(struct seq_file *s,
+ struct cnss_plat_data *plat_priv)
+{
+ enum cnss_driver_state i;
+ int skip = 0;
+ unsigned long state;
+
+ seq_printf(s, "\nState: 0x%lx(", plat_priv->driver_state);
+ for (i = 0, state = plat_priv->driver_state; state != 0;
+ state >>= 1, i++) {
+ if (!(state & 0x1))
+ continue;
+
+ if (skip++)
+ seq_puts(s, " | ");
+
+ switch (i) {
+ case CNSS_QMI_WLFW_CONNECTED:
+ seq_puts(s, "QMI_WLFW_CONNECTED");
+ continue;
+ case CNSS_FW_MEM_READY:
+ seq_puts(s, "FW_MEM_READY");
+ continue;
+ case CNSS_FW_READY:
+ seq_puts(s, "FW_READY");
+ continue;
+ case CNSS_COLD_BOOT_CAL:
+ seq_puts(s, "COLD_BOOT_CAL");
+ continue;
+ case CNSS_DRIVER_LOADING:
+ seq_puts(s, "DRIVER_LOADING");
+ continue;
+ case CNSS_DRIVER_UNLOADING:
+ seq_puts(s, "DRIVER_UNLOADING");
+ continue;
+ case CNSS_DRIVER_PROBED:
+ seq_puts(s, "DRIVER_PROBED");
+ continue;
+ case CNSS_DRIVER_RECOVERY:
+ seq_puts(s, "DRIVER_RECOVERY");
+ continue;
+ case CNSS_FW_BOOT_RECOVERY:
+ seq_puts(s, "FW_BOOT_RECOVERY");
+ continue;
+ case CNSS_DEV_ERR_NOTIFY:
+ seq_puts(s, "DEV_ERR");
+ }
+
+ seq_printf(s, "UNKNOWN-%d", i);
+ }
+ seq_puts(s, ")\n");
+
+ return 0;
+}
+
+static int cnss_stats_show(struct seq_file *s, void *data)
+{
+ struct cnss_plat_data *plat_priv = s->private;
+
+ cnss_stats_show_state(s, plat_priv);
+
+ return 0;
+}
+
+static int cnss_stats_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, cnss_stats_show, inode->i_private);
+}
+
+static const struct file_operations cnss_stats_fops = {
+ .read = seq_read,
+ .release = single_release,
+ .open = cnss_stats_open,
+ .owner = THIS_MODULE,
+ .llseek = seq_lseek,
+};
+
+int cnss_debugfs_create(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+ struct dentry *root_dentry;
+
+ root_dentry = debugfs_create_dir("cnss", 0);
+ if (IS_ERR(root_dentry)) {
+ ret = PTR_ERR(root_dentry);
+ cnss_pr_err("Unable to create debugfs %d\n", ret);
+ goto out;
+ }
+ plat_priv->root_dentry = root_dentry;
+ debugfs_create_file("pin_connect_result", 0644, root_dentry, plat_priv,
+ &cnss_pin_connect_fops);
+ debugfs_create_file("stats", 0644, root_dentry, plat_priv,
+ &cnss_stats_fops);
+out:
+ return ret;
+}
+
+void cnss_debugfs_destroy(struct cnss_plat_data *plat_priv)
+{
+ debugfs_remove_recursive(plat_priv->root_dentry);
+}
+
+int cnss_debug_init(void)
+{
+ cnss_ipc_log_context = ipc_log_context_create(CNSS_IPC_LOG_PAGES,
+ "cnss", 0);
+ if (!cnss_ipc_log_context) {
+ cnss_pr_err("Unable to create IPC log context!\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void cnss_debug_deinit(void)
+{
+ if (cnss_ipc_log_context) {
+ ipc_log_context_destroy(cnss_ipc_log_context);
+ cnss_ipc_log_context = NULL;
+ }
+}
diff --git a/drivers/net/wireless/cnss2/debug.h b/drivers/net/wireless/cnss2/debug.h
new file mode 100644
index 000000000000..1621514eb5b2
--- /dev/null
+++ b/drivers/net/wireless/cnss2/debug.h
@@ -0,0 +1,73 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CNSS_DEBUG_H
+#define _CNSS_DEBUG_H
+
+#include <linux/ipc_logging.h>
+#include <linux/printk.h>
+
+extern void *cnss_ipc_log_context;
+
+#define cnss_ipc_log_string(_x...) do { \
+ if (cnss_ipc_log_context) \
+ ipc_log_string(cnss_ipc_log_context, _x); \
+ } while (0)
+
+#define cnss_pr_err(_fmt, ...) do { \
+ pr_err("cnss: " _fmt, ##__VA_ARGS__); \
+ cnss_ipc_log_string("ERR: " pr_fmt(_fmt), \
+ ##__VA_ARGS__); \
+ } while (0)
+
+#define cnss_pr_warn(_fmt, ...) do { \
+ pr_warn("cnss: " _fmt, ##__VA_ARGS__); \
+ cnss_ipc_log_string("WRN: " pr_fmt(_fmt), \
+ ##__VA_ARGS__); \
+ } while (0)
+
+#define cnss_pr_info(_fmt, ...) do { \
+ pr_info("cnss: " _fmt, ##__VA_ARGS__); \
+ cnss_ipc_log_string("INF: " pr_fmt(_fmt), \
+ ##__VA_ARGS__); \
+ } while (0)
+
+#define cnss_pr_dbg(_fmt, ...) do { \
+ pr_debug("cnss: " _fmt, ##__VA_ARGS__); \
+ cnss_ipc_log_string("DBG: " pr_fmt(_fmt), \
+ ##__VA_ARGS__); \
+ } while (0)
+
+#ifdef CONFIG_CNSS2_DEBUG
+#define CNSS_ASSERT(_condition) do { \
+ if (!(_condition)) { \
+ cnss_pr_err("ASSERT at line %d\n", \
+ __LINE__); \
+ WARN_ON(1); \
+ } \
+ } while (0)
+#else
+#define CNSS_ASSERT(_condition) do { \
+ if (!(_condition)) { \
+ cnss_pr_err("ASSERT at line %d\n", \
+ __LINE__); \
+ WARN_ON(1); \
+ } \
+ } while (0)
+#endif
+
+int cnss_debug_init(void);
+void cnss_debug_deinit(void);
+int cnss_debugfs_create(struct cnss_plat_data *plat_priv);
+void cnss_debugfs_destroy(struct cnss_plat_data *plat_priv);
+
+#endif /* _CNSS_DEBUG_H */
diff --git a/drivers/net/wireless/cnss2/main.c b/drivers/net/wireless/cnss2/main.c
new file mode 100644
index 000000000000..e114d0c51a07
--- /dev/null
+++ b/drivers/net/wireless/cnss2/main.c
@@ -0,0 +1,2360 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pm_wakeup.h>
+#include <linux/rwsem.h>
+#include <linux/suspend.h>
+#include <linux/timer.h>
+#include <soc/qcom/ramdump.h>
+#include <soc/qcom/subsystem_notif.h>
+
+#include "main.h"
+#include "debug.h"
+#include "pci.h"
+
+#define CNSS_DUMP_FORMAT_VER 0x11
+#define CNSS_DUMP_FORMAT_VER_V2 0x22
+#define CNSS_DUMP_MAGIC_VER_V2 0x42445953
+#define CNSS_DUMP_NAME "CNSS_WLAN"
+#define CNSS_DUMP_DESC_SIZE 0x1000
+#define CNSS_DUMP_SEG_VER 0x1
+#define WLAN_RECOVERY_DELAY 1000
+#define FILE_SYSTEM_READY 1
+#define FW_READY_TIMEOUT 20000
+#define FW_ASSERT_TIMEOUT 5000
+#define CNSS_EVENT_PENDING 2989
+#define WAKE_MSI_NAME "WAKE"
+
+static struct cnss_plat_data *plat_env;
+
+static DECLARE_RWSEM(cnss_pm_sem);
+
+static bool qmi_bypass;
+#ifdef CONFIG_CNSS2_DEBUG
+module_param(qmi_bypass, bool, 0600);
+MODULE_PARM_DESC(qmi_bypass, "Bypass QMI from platform driver");
+#endif
+
+static bool enable_waltest;
+#ifdef CONFIG_CNSS2_DEBUG
+module_param(enable_waltest, bool, 0600);
+MODULE_PARM_DESC(enable_waltest, "Enable to handle firmware waltest");
+#endif
+
+enum cnss_debug_quirks {
+ LINK_DOWN_SELF_RECOVERY,
+};
+
+unsigned long quirks;
+#ifdef CONFIG_CNSS2_DEBUG
+module_param(quirks, ulong, 0600);
+MODULE_PARM_DESC(quirks, "Debug quirks for the driver");
+#endif
+
+static struct cnss_fw_files FW_FILES_QCA6174_FW_3_0 = {
+ "qwlan30.bin", "bdwlan30.bin", "otp30.bin", "utf30.bin",
+ "utfbd30.bin", "epping30.bin", "evicted30.bin"
+};
+
+static struct cnss_fw_files FW_FILES_DEFAULT = {
+ "qwlan.bin", "bdwlan.bin", "otp.bin", "utf.bin",
+ "utfbd.bin", "epping.bin", "evicted.bin"
+};
+
+struct cnss_driver_event {
+ struct list_head list;
+ enum cnss_driver_event_type type;
+ bool sync;
+ struct completion complete;
+ int ret;
+ void *data;
+};
+
+static enum cnss_dev_bus_type cnss_get_dev_bus_type(struct device *dev)
+{
+ if (!dev)
+ return CNSS_BUS_NONE;
+
+ if (!dev->bus)
+ return CNSS_BUS_NONE;
+
+ if (memcmp(dev->bus->name, "pci", 3) == 0)
+ return CNSS_BUS_PCI;
+ else
+ return CNSS_BUS_NONE;
+}
+
+static void cnss_set_plat_priv(struct platform_device *plat_dev,
+ struct cnss_plat_data *plat_priv)
+{
+ plat_env = plat_priv;
+}
+
+static struct cnss_plat_data *cnss_get_plat_priv(struct platform_device
+ *plat_dev)
+{
+ return plat_env;
+}
+
+void *cnss_bus_dev_to_bus_priv(struct device *dev)
+{
+ if (!dev)
+ return NULL;
+
+ switch (cnss_get_dev_bus_type(dev)) {
+ case CNSS_BUS_PCI:
+ return cnss_get_pci_priv(to_pci_dev(dev));
+ default:
+ return NULL;
+ }
+}
+
+struct cnss_plat_data *cnss_bus_dev_to_plat_priv(struct device *dev)
+{
+ void *bus_priv;
+
+ if (!dev)
+ return cnss_get_plat_priv(NULL);
+
+ bus_priv = cnss_bus_dev_to_bus_priv(dev);
+ if (!bus_priv)
+ return NULL;
+
+ switch (cnss_get_dev_bus_type(dev)) {
+ case CNSS_BUS_PCI:
+ return cnss_pci_priv_to_plat_priv(bus_priv);
+ default:
+ return NULL;
+ }
+}
+
+static int cnss_pm_notify(struct notifier_block *b,
+ unsigned long event, void *p)
+{
+ switch (event) {
+ case PM_SUSPEND_PREPARE:
+ down_write(&cnss_pm_sem);
+ break;
+ case PM_POST_SUSPEND:
+ up_write(&cnss_pm_sem);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block cnss_pm_notifier = {
+ .notifier_call = cnss_pm_notify,
+};
+
+static void cnss_pm_stay_awake(struct cnss_plat_data *plat_priv)
+{
+ if (atomic_inc_return(&plat_priv->pm_count) != 1)
+ return;
+
+ cnss_pr_dbg("PM stay awake, state: 0x%lx, count: %d\n",
+ plat_priv->driver_state,
+ atomic_read(&plat_priv->pm_count));
+ pm_stay_awake(&plat_priv->plat_dev->dev);
+}
+
+static void cnss_pm_relax(struct cnss_plat_data *plat_priv)
+{
+ int r = atomic_dec_return(&plat_priv->pm_count);
+
+ WARN_ON(r < 0);
+
+ if (r != 0)
+ return;
+
+ cnss_pr_dbg("PM relax, state: 0x%lx, count: %d\n",
+ plat_priv->driver_state,
+ atomic_read(&plat_priv->pm_count));
+ pm_relax(&plat_priv->plat_dev->dev);
+}
+
+void cnss_lock_pm_sem(void)
+{
+ down_read(&cnss_pm_sem);
+}
+EXPORT_SYMBOL(cnss_lock_pm_sem);
+
+void cnss_release_pm_sem(void)
+{
+ up_read(&cnss_pm_sem);
+}
+EXPORT_SYMBOL(cnss_release_pm_sem);
+
+int cnss_get_fw_files_for_target(struct cnss_fw_files *pfw_files,
+ u32 target_type, u32 target_version)
+{
+ if (!pfw_files)
+ return -ENODEV;
+
+ switch (target_version) {
+ case QCA6174_REV3_VERSION:
+ case QCA6174_REV3_2_VERSION:
+ memcpy(pfw_files, &FW_FILES_QCA6174_FW_3_0, sizeof(*pfw_files));
+ break;
+ default:
+ memcpy(pfw_files, &FW_FILES_DEFAULT, sizeof(*pfw_files));
+ cnss_pr_err("Unknown target version, type: 0x%X, version: 0x%X",
+ target_type, target_version);
+ break;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(cnss_get_fw_files_for_target);
+
+int cnss_request_bus_bandwidth(int bandwidth)
+{
+ int ret = 0;
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL);
+ struct cnss_bus_bw_info *bus_bw_info;
+
+ if (!plat_priv)
+ return -ENODEV;
+
+ bus_bw_info = &plat_priv->bus_bw_info;
+ if (!bus_bw_info->bus_client)
+ return -EINVAL;
+
+ switch (bandwidth) {
+ case CNSS_BUS_WIDTH_NONE:
+ case CNSS_BUS_WIDTH_LOW:
+ case CNSS_BUS_WIDTH_MEDIUM:
+ case CNSS_BUS_WIDTH_HIGH:
+ ret = msm_bus_scale_client_update_request(
+ bus_bw_info->bus_client, bandwidth);
+ if (!ret)
+ bus_bw_info->current_bw_vote = bandwidth;
+ else
+ cnss_pr_err("Could not set bus bandwidth: %d, err = %d\n",
+ bandwidth, ret);
+ break;
+ default:
+ cnss_pr_err("Invalid bus bandwidth: %d", bandwidth);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(cnss_request_bus_bandwidth);
+
+int cnss_get_platform_cap(struct cnss_platform_cap *cap)
+{
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL);
+
+ if (!plat_priv)
+ return -ENODEV;
+
+ if (cap)
+ *cap = plat_priv->cap;
+
+ return 0;
+}
+EXPORT_SYMBOL(cnss_get_platform_cap);
+
+int cnss_get_soc_info(struct device *dev, struct cnss_soc_info *info)
+{
+ int ret = 0;
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+ void *bus_priv = cnss_bus_dev_to_bus_priv(dev);
+
+ if (!plat_priv)
+ return -ENODEV;
+
+ ret = cnss_pci_get_bar_info(bus_priv, &info->va, &info->pa);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(cnss_get_soc_info);
+
+void cnss_set_driver_status(enum cnss_driver_status driver_status)
+{
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL);
+
+ if (!plat_priv)
+ return;
+
+ plat_priv->driver_status = driver_status;
+}
+EXPORT_SYMBOL(cnss_set_driver_status);
+
+void cnss_request_pm_qos(u32 qos_val)
+{
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL);
+
+ if (!plat_priv)
+ return;
+
+ pm_qos_add_request(&plat_priv->qos_request, PM_QOS_CPU_DMA_LATENCY,
+ qos_val);
+}
+EXPORT_SYMBOL(cnss_request_pm_qos);
+
+void cnss_remove_pm_qos(void)
+{
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL);
+
+ if (!plat_priv)
+ return;
+
+ pm_qos_remove_request(&plat_priv->qos_request);
+}
+EXPORT_SYMBOL(cnss_remove_pm_qos);
+
+u8 *cnss_common_get_wlan_mac_address(struct device *dev, u32 *num)
+{
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+ struct cnss_wlan_mac_info *wlan_mac_info;
+ struct cnss_wlan_mac_addr *addr;
+
+ if (!plat_priv)
+ goto out;
+
+ wlan_mac_info = &plat_priv->wlan_mac_info;
+ if (!wlan_mac_info->is_wlan_mac_set) {
+ cnss_pr_info("Platform driver doesn't have any MAC address!\n");
+ goto out;
+ }
+
+ addr = &wlan_mac_info->wlan_mac_addr;
+ *num = addr->no_of_mac_addr_set;
+
+ return &addr->mac_addr[0][0];
+out:
+ *num = 0;
+ return NULL;
+}
+EXPORT_SYMBOL(cnss_common_get_wlan_mac_address);
+
+int cnss_wlan_enable(struct device *dev,
+ struct cnss_wlan_enable_cfg *config,
+ enum cnss_driver_mode mode,
+ const char *host_version)
+{
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+ struct wlfw_wlan_cfg_req_msg_v01 req;
+ u32 i;
+ int ret = 0;
+
+ if (plat_priv->device_id == QCA6174_DEVICE_ID)
+ return 0;
+
+ if (qmi_bypass)
+ return 0;
+
+ if (!config || !host_version) {
+ cnss_pr_err("Invalid config or host_version pointer\n");
+ return -EINVAL;
+ }
+
+ cnss_pr_dbg("Mode: %d, config: %pK, host_version: %s\n",
+ mode, config, host_version);
+
+ if (mode == CNSS_WALTEST || mode == CNSS_CCPM)
+ goto skip_cfg;
+
+ memset(&req, 0, sizeof(req));
+
+ req.host_version_valid = 1;
+ strlcpy(req.host_version, host_version,
+ QMI_WLFW_MAX_STR_LEN_V01 + 1);
+
+ req.tgt_cfg_valid = 1;
+ if (config->num_ce_tgt_cfg > QMI_WLFW_MAX_NUM_CE_V01)
+ req.tgt_cfg_len = QMI_WLFW_MAX_NUM_CE_V01;
+ else
+ req.tgt_cfg_len = config->num_ce_tgt_cfg;
+ for (i = 0; i < req.tgt_cfg_len; i++) {
+ req.tgt_cfg[i].pipe_num = config->ce_tgt_cfg[i].pipe_num;
+ req.tgt_cfg[i].pipe_dir = config->ce_tgt_cfg[i].pipe_dir;
+ req.tgt_cfg[i].nentries = config->ce_tgt_cfg[i].nentries;
+ req.tgt_cfg[i].nbytes_max = config->ce_tgt_cfg[i].nbytes_max;
+ req.tgt_cfg[i].flags = config->ce_tgt_cfg[i].flags;
+ }
+
+ req.svc_cfg_valid = 1;
+ if (config->num_ce_svc_pipe_cfg > QMI_WLFW_MAX_NUM_SVC_V01)
+ req.svc_cfg_len = QMI_WLFW_MAX_NUM_SVC_V01;
+ else
+ req.svc_cfg_len = config->num_ce_svc_pipe_cfg;
+ for (i = 0; i < req.svc_cfg_len; i++) {
+ req.svc_cfg[i].service_id = config->ce_svc_cfg[i].service_id;
+ req.svc_cfg[i].pipe_dir = config->ce_svc_cfg[i].pipe_dir;
+ req.svc_cfg[i].pipe_num = config->ce_svc_cfg[i].pipe_num;
+ }
+
+ req.shadow_reg_v2_valid = 1;
+ if (config->num_shadow_reg_v2_cfg >
+ QMI_WLFW_MAX_NUM_SHADOW_REG_V2_V01)
+ req.shadow_reg_v2_len = QMI_WLFW_MAX_NUM_SHADOW_REG_V2_V01;
+ else
+ req.shadow_reg_v2_len = config->num_shadow_reg_v2_cfg;
+
+ memcpy(req.shadow_reg_v2, config->shadow_reg_v2_cfg,
+ sizeof(struct wlfw_shadow_reg_v2_cfg_s_v01)
+ * req.shadow_reg_v2_len);
+
+ ret = cnss_wlfw_wlan_cfg_send_sync(plat_priv, &req);
+ if (ret)
+ goto out;
+
+skip_cfg:
+ ret = cnss_wlfw_wlan_mode_send_sync(plat_priv, mode);
+out:
+ return ret;
+}
+EXPORT_SYMBOL(cnss_wlan_enable);
+
+int cnss_wlan_disable(struct device *dev, enum cnss_driver_mode mode)
+{
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+
+ if (plat_priv->device_id == QCA6174_DEVICE_ID)
+ return 0;
+
+ if (qmi_bypass)
+ return 0;
+
+ return cnss_wlfw_wlan_mode_send_sync(plat_priv, QMI_WLFW_OFF_V01);
+}
+EXPORT_SYMBOL(cnss_wlan_disable);
+
+#ifdef CONFIG_CNSS2_DEBUG
+int cnss_athdiag_read(struct device *dev, u32 offset, u32 mem_type,
+ u32 data_len, u8 *output)
+{
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+ int ret = 0;
+
+ if (!plat_priv) {
+ cnss_pr_err("plat_priv is NULL!\n");
+ return -EINVAL;
+ }
+
+ if (plat_priv->device_id == QCA6174_DEVICE_ID)
+ return 0;
+
+ if (!output || data_len == 0 || data_len > QMI_WLFW_MAX_DATA_SIZE_V01) {
+ cnss_pr_err("Invalid parameters for athdiag read: output %p, data_len %u\n",
+ output, data_len);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!test_bit(CNSS_FW_READY, &plat_priv->driver_state)) {
+ cnss_pr_err("Invalid state for athdiag read: 0x%lx\n",
+ plat_priv->driver_state);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = cnss_wlfw_athdiag_read_send_sync(plat_priv, offset, mem_type,
+ data_len, output);
+
+out:
+ return ret;
+}
+EXPORT_SYMBOL(cnss_athdiag_read);
+
+int cnss_athdiag_write(struct device *dev, u32 offset, u32 mem_type,
+ u32 data_len, u8 *input)
+{
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+ int ret = 0;
+
+ if (!plat_priv) {
+ cnss_pr_err("plat_priv is NULL!\n");
+ return -EINVAL;
+ }
+
+ if (plat_priv->device_id == QCA6174_DEVICE_ID)
+ return 0;
+
+ if (!input || data_len == 0 || data_len > QMI_WLFW_MAX_DATA_SIZE_V01) {
+ cnss_pr_err("Invalid parameters for athdiag write: input %p, data_len %u\n",
+ input, data_len);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!test_bit(CNSS_FW_READY, &plat_priv->driver_state)) {
+ cnss_pr_err("Invalid state for athdiag write: 0x%lx\n",
+ plat_priv->driver_state);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = cnss_wlfw_athdiag_write_send_sync(plat_priv, offset, mem_type,
+ data_len, input);
+
+out:
+ return ret;
+}
+EXPORT_SYMBOL(cnss_athdiag_write);
+#else
+int cnss_athdiag_read(struct device *dev, u32 offset, u32 mem_type,
+ u32 data_len, u8 *output)
+{
+ return -EPERM;
+}
+EXPORT_SYMBOL(cnss_athdiag_read);
+
+int cnss_athdiag_write(struct device *dev, u32 offset, u32 mem_type,
+ u32 data_len, u8 *input)
+{
+ return -EPERM;
+}
+EXPORT_SYMBOL(cnss_athdiag_write);
+#endif
+
+int cnss_set_fw_log_mode(struct device *dev, u8 fw_log_mode)
+{
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+
+ if (plat_priv->device_id == QCA6174_DEVICE_ID)
+ return 0;
+
+ return cnss_wlfw_ini_send_sync(plat_priv, fw_log_mode);
+}
+EXPORT_SYMBOL(cnss_set_fw_log_mode);
+
+u32 cnss_get_wake_msi(struct cnss_plat_data *plat_priv)
+{
+ struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
+ int ret, num_vectors;
+ u32 user_base_data, base_vector;
+
+ ret = cnss_get_user_msi_assignment(&pci_priv->pci_dev->dev,
+ WAKE_MSI_NAME, &num_vectors,
+ &user_base_data, &base_vector);
+
+ if (ret) {
+ cnss_pr_err("WAKE MSI is not valid\n");
+ return 0;
+ }
+
+ return user_base_data;
+}
+
+static int cnss_fw_mem_ready_hdlr(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+
+ if (!plat_priv)
+ return -ENODEV;
+
+ set_bit(CNSS_FW_MEM_READY, &plat_priv->driver_state);
+
+ ret = cnss_wlfw_tgt_cap_send_sync(plat_priv);
+ if (ret)
+ goto out;
+
+ ret = cnss_wlfw_bdf_dnld_send_sync(plat_priv);
+ if (ret)
+ goto out;
+
+ ret = cnss_pci_load_m3(plat_priv->bus_priv);
+ if (ret)
+ goto out;
+
+ ret = cnss_wlfw_m3_dnld_send_sync(plat_priv);
+ if (ret)
+ goto out;
+
+ return 0;
+out:
+ return ret;
+}
+
+static int cnss_driver_call_probe(struct cnss_plat_data *plat_priv)
+{
+ int ret;
+ struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
+
+ if (!plat_priv->driver_ops) {
+ cnss_pr_err("driver_ops is NULL\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
+ ret = plat_priv->driver_ops->reinit(pci_priv->pci_dev,
+ pci_priv->pci_device_id);
+ if (ret) {
+ cnss_pr_err("Failed to reinit host driver, err = %d\n",
+ ret);
+ goto out;
+ }
+ clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
+ } else if (test_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state)) {
+ ret = plat_priv->driver_ops->probe(pci_priv->pci_dev,
+ pci_priv->pci_device_id);
+ if (ret) {
+ cnss_pr_err("Failed to probe host driver, err = %d\n",
+ ret);
+ goto out;
+ }
+ clear_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state);
+ set_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state);
+ }
+
+ return 0;
+
+out:
+ return ret;
+}
+
+static int cnss_driver_call_remove(struct cnss_plat_data *plat_priv)
+{
+ struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
+
+ if (!plat_priv->driver_ops) {
+ cnss_pr_err("driver_ops is NULL\n");
+ return -EINVAL;
+ }
+
+ if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
+ plat_priv->driver_ops->shutdown(pci_priv->pci_dev);
+ } else if (test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state)) {
+ plat_priv->driver_ops->remove(pci_priv->pci_dev);
+ clear_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state);
+ }
+
+ return 0;
+}
+
+static int cnss_fw_ready_hdlr(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+
+ if (!plat_priv)
+ return -ENODEV;
+
+ del_timer(&plat_priv->fw_boot_timer);
+ set_bit(CNSS_FW_READY, &plat_priv->driver_state);
+
+ if (test_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state)) {
+ clear_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state);
+ clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
+ }
+
+ if (enable_waltest) {
+ ret = cnss_wlfw_wlan_mode_send_sync(plat_priv,
+ QMI_WLFW_WALTEST_V01);
+ } else if (test_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state)) {
+ ret = cnss_wlfw_wlan_mode_send_sync(plat_priv,
+ QMI_WLFW_CALIBRATION_V01);
+ } else if (test_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state) ||
+ test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
+ ret = cnss_driver_call_probe(plat_priv);
+ } else {
+ complete(&plat_priv->power_up_complete);
+ }
+
+ if (ret)
+ goto shutdown;
+
+ return 0;
+
+shutdown:
+ cnss_pci_stop_mhi(plat_priv->bus_priv);
+ cnss_suspend_pci_link(plat_priv->bus_priv);
+ cnss_power_off_device(plat_priv);
+
+ return ret;
+}
+
+static char *cnss_driver_event_to_str(enum cnss_driver_event_type type)
+{
+ switch (type) {
+ case CNSS_DRIVER_EVENT_SERVER_ARRIVE:
+ return "SERVER_ARRIVE";
+ case CNSS_DRIVER_EVENT_SERVER_EXIT:
+ return "SERVER_EXIT";
+ case CNSS_DRIVER_EVENT_REQUEST_MEM:
+ return "REQUEST_MEM";
+ case CNSS_DRIVER_EVENT_FW_MEM_READY:
+ return "FW_MEM_READY";
+ case CNSS_DRIVER_EVENT_FW_READY:
+ return "FW_READY";
+ case CNSS_DRIVER_EVENT_COLD_BOOT_CAL_START:
+ return "COLD_BOOT_CAL_START";
+ case CNSS_DRIVER_EVENT_COLD_BOOT_CAL_DONE:
+ return "COLD_BOOT_CAL_DONE";
+ case CNSS_DRIVER_EVENT_REGISTER_DRIVER:
+ return "REGISTER_DRIVER";
+ case CNSS_DRIVER_EVENT_UNREGISTER_DRIVER:
+ return "UNREGISTER_DRIVER";
+ case CNSS_DRIVER_EVENT_RECOVERY:
+ return "RECOVERY";
+ case CNSS_DRIVER_EVENT_FORCE_FW_ASSERT:
+ return "FORCE_FW_ASSERT";
+ case CNSS_DRIVER_EVENT_POWER_UP:
+ return "POWER_UP";
+ case CNSS_DRIVER_EVENT_POWER_DOWN:
+ return "POWER_DOWN";
+ case CNSS_DRIVER_EVENT_MAX:
+ return "EVENT_MAX";
+ }
+
+ return "UNKNOWN";
+};
+
+int cnss_driver_event_post(struct cnss_plat_data *plat_priv,
+ enum cnss_driver_event_type type,
+ bool sync, void *data)
+{
+ struct cnss_driver_event *event;
+ unsigned long flags;
+ int gfp = GFP_KERNEL;
+ int ret = 0;
+
+ if (!plat_priv)
+ return -ENODEV;
+
+ cnss_pr_dbg("Posting event: %s(%d)%s, state: 0x%lx\n",
+ cnss_driver_event_to_str(type), type,
+ sync ? "-sync" : "", plat_priv->driver_state);
+
+ if (type >= CNSS_DRIVER_EVENT_MAX) {
+ cnss_pr_err("Invalid Event type: %d, can't post", type);
+ return -EINVAL;
+ }
+
+ if (in_interrupt() || irqs_disabled())
+ gfp = GFP_ATOMIC;
+
+ event = kzalloc(sizeof(*event), gfp);
+ if (!event)
+ return -ENOMEM;
+
+ cnss_pm_stay_awake(plat_priv);
+
+ event->type = type;
+ event->data = data;
+ init_completion(&event->complete);
+ event->ret = CNSS_EVENT_PENDING;
+ event->sync = sync;
+
+ spin_lock_irqsave(&plat_priv->event_lock, flags);
+ list_add_tail(&event->list, &plat_priv->event_list);
+ spin_unlock_irqrestore(&plat_priv->event_lock, flags);
+
+ queue_work(plat_priv->event_wq, &plat_priv->event_work);
+
+ if (!sync)
+ goto out;
+
+ ret = wait_for_completion_interruptible(&event->complete);
+
+ cnss_pr_dbg("Completed event: %s(%d), state: 0x%lx, ret: %d/%d\n",
+ cnss_driver_event_to_str(type), type,
+ plat_priv->driver_state, ret, event->ret);
+
+ spin_lock_irqsave(&plat_priv->event_lock, flags);
+ if (ret == -ERESTARTSYS && event->ret == CNSS_EVENT_PENDING) {
+ event->sync = false;
+ spin_unlock_irqrestore(&plat_priv->event_lock, flags);
+ ret = -EINTR;
+ goto out;
+ }
+ spin_unlock_irqrestore(&plat_priv->event_lock, flags);
+
+ ret = event->ret;
+ kfree(event);
+
+out:
+ cnss_pm_relax(plat_priv);
+ return ret;
+}
+
+int cnss_power_up(struct device *dev)
+{
+ int ret = 0;
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+ unsigned int timeout;
+
+ if (!plat_priv) {
+ cnss_pr_err("plat_priv is NULL\n");
+ return -ENODEV;
+ }
+
+ cnss_pr_dbg("Powering up device\n");
+
+ ret = cnss_driver_event_post(plat_priv,
+ CNSS_DRIVER_EVENT_POWER_UP,
+ true, NULL);
+ if (ret)
+ goto out;
+
+ if (plat_priv->device_id == QCA6174_DEVICE_ID)
+ goto out;
+
+ timeout = cnss_get_qmi_timeout();
+
+ reinit_completion(&plat_priv->power_up_complete);
+ ret = wait_for_completion_timeout(&plat_priv->power_up_complete,
+ msecs_to_jiffies(timeout) << 2);
+ if (!ret) {
+ cnss_pr_err("Timeout waiting for power up to complete\n");
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ return 0;
+
+out:
+ return ret;
+}
+EXPORT_SYMBOL(cnss_power_up);
+
+int cnss_power_down(struct device *dev)
+{
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+
+ if (!plat_priv) {
+ cnss_pr_err("plat_priv is NULL\n");
+ return -ENODEV;
+ }
+
+ cnss_pr_dbg("Powering down device\n");
+
+ return cnss_driver_event_post(plat_priv,
+ CNSS_DRIVER_EVENT_POWER_DOWN,
+ true, NULL);
+}
+EXPORT_SYMBOL(cnss_power_down);
+
+int cnss_wlan_register_driver(struct cnss_wlan_driver *driver_ops)
+{
+ int ret = 0;
+ struct cnss_plat_data *plat_priv = cnss_get_plat_priv(NULL);
+
+ if (!plat_priv) {
+ cnss_pr_err("plat_priv is NULL!\n");
+ return -ENODEV;
+ }
+
+ if (plat_priv->driver_ops) {
+ cnss_pr_err("Driver has already registered!\n");
+ return -EEXIST;
+ }
+
+ ret = cnss_driver_event_post(plat_priv,
+ CNSS_DRIVER_EVENT_REGISTER_DRIVER,
+ true, driver_ops);
+ return ret;
+}
+EXPORT_SYMBOL(cnss_wlan_register_driver);
+
+void cnss_wlan_unregister_driver(struct cnss_wlan_driver *driver_ops)
+{
+ struct cnss_plat_data *plat_priv = cnss_get_plat_priv(NULL);
+
+ if (!plat_priv) {
+ cnss_pr_err("plat_priv is NULL!\n");
+ return;
+ }
+
+ cnss_driver_event_post(plat_priv,
+ CNSS_DRIVER_EVENT_UNREGISTER_DRIVER,
+ true, NULL);
+}
+EXPORT_SYMBOL(cnss_wlan_unregister_driver);
+
+static int cnss_get_resources(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+
+ ret = cnss_get_vreg(plat_priv);
+ if (ret) {
+ cnss_pr_err("Failed to get vreg, err = %d\n", ret);
+ goto out;
+ }
+
+ ret = cnss_get_pinctrl(plat_priv);
+ if (ret) {
+ cnss_pr_err("Failed to get pinctrl, err = %d\n", ret);
+ goto out;
+ }
+
+ return 0;
+out:
+ return ret;
+}
+
+static void cnss_put_resources(struct cnss_plat_data *plat_priv)
+{
+}
+
+static int cnss_modem_notifier_nb(struct notifier_block *nb,
+ unsigned long code,
+ void *ss_handle)
+{
+ struct cnss_plat_data *plat_priv =
+ container_of(nb, struct cnss_plat_data, modem_nb);
+ struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
+ struct cnss_esoc_info *esoc_info;
+ struct cnss_wlan_driver *driver_ops;
+
+ cnss_pr_dbg("Modem notifier: event %lu\n", code);
+
+ if (!pci_priv)
+ return NOTIFY_DONE;
+
+ esoc_info = &plat_priv->esoc_info;
+
+ if (code == SUBSYS_AFTER_POWERUP)
+ esoc_info->modem_current_status = 1;
+ else if (code == SUBSYS_BEFORE_SHUTDOWN)
+ esoc_info->modem_current_status = 0;
+ else
+ return NOTIFY_DONE;
+
+ driver_ops = plat_priv->driver_ops;
+ if (!driver_ops || !driver_ops->modem_status)
+ return NOTIFY_DONE;
+
+ driver_ops->modem_status(pci_priv->pci_dev,
+ esoc_info->modem_current_status);
+
+ return NOTIFY_OK;
+}
+
+static int cnss_register_esoc(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+ struct device *dev;
+ struct cnss_esoc_info *esoc_info;
+ struct esoc_desc *esoc_desc;
+ const char *client_desc;
+
+ dev = &plat_priv->plat_dev->dev;
+ esoc_info = &plat_priv->esoc_info;
+
+ esoc_info->notify_modem_status =
+ of_property_read_bool(dev->of_node,
+ "qcom,notify-modem-status");
+
+ if (esoc_info->notify_modem_status)
+ goto out;
+
+ ret = of_property_read_string_index(dev->of_node, "esoc-names", 0,
+ &client_desc);
+ if (ret) {
+ cnss_pr_dbg("esoc-names is not defined in DT, skip!\n");
+ } else {
+ esoc_desc = devm_register_esoc_client(dev, client_desc);
+ if (IS_ERR_OR_NULL(esoc_desc)) {
+ ret = PTR_RET(esoc_desc);
+ cnss_pr_err("Failed to register esoc_desc, err = %d\n",
+ ret);
+ goto out;
+ }
+ esoc_info->esoc_desc = esoc_desc;
+ }
+
+ plat_priv->modem_nb.notifier_call = cnss_modem_notifier_nb;
+ esoc_info->modem_current_status = 0;
+ esoc_info->modem_notify_handler =
+ subsys_notif_register_notifier(esoc_info->esoc_desc ?
+ esoc_info->esoc_desc->name :
+ "modem", &plat_priv->modem_nb);
+ if (IS_ERR(esoc_info->modem_notify_handler)) {
+ ret = PTR_ERR(esoc_info->modem_notify_handler);
+ cnss_pr_err("Failed to register esoc notifier, err = %d\n",
+ ret);
+ goto unreg_esoc;
+ }
+
+ return 0;
+unreg_esoc:
+ if (esoc_info->esoc_desc)
+ devm_unregister_esoc_client(dev, esoc_info->esoc_desc);
+out:
+ return ret;
+}
+
+static void cnss_unregister_esoc(struct cnss_plat_data *plat_priv)
+{
+ struct device *dev;
+ struct cnss_esoc_info *esoc_info;
+
+ dev = &plat_priv->plat_dev->dev;
+ esoc_info = &plat_priv->esoc_info;
+
+ if (esoc_info->notify_modem_status)
+ subsys_notif_unregister_notifier(esoc_info->
+ modem_notify_handler,
+ &plat_priv->modem_nb);
+ if (esoc_info->esoc_desc)
+ devm_unregister_esoc_client(dev, esoc_info->esoc_desc);
+}
+
+static int cnss_qca6174_powerup(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+ struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
+
+ if (!pci_priv) {
+ cnss_pr_err("pci_priv is NULL!\n");
+ return -ENODEV;
+ }
+
+ if (!plat_priv->driver_ops) {
+ cnss_pr_err("driver_ops is NULL!\n");
+ return -EINVAL;
+ }
+
+ ret = cnss_power_on_device(plat_priv);
+ if (ret) {
+ cnss_pr_err("Failed to power on device, err = %d\n", ret);
+ goto out;
+ }
+
+ ret = cnss_resume_pci_link(pci_priv);
+ if (ret) {
+ cnss_pr_err("Failed to resume PCI link, err = %d\n", ret);
+ goto power_off;
+ }
+
+ ret = cnss_driver_call_probe(plat_priv);
+ if (ret)
+ goto suspend_link;
+
+ return 0;
+suspend_link:
+ cnss_suspend_pci_link(pci_priv);
+power_off:
+ cnss_power_off_device(plat_priv);
+out:
+ return ret;
+}
+
+static int cnss_qca6174_shutdown(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+ struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
+
+ if (!pci_priv)
+ return -ENODEV;
+
+ if (!plat_priv->driver_ops)
+ return -EINVAL;
+
+ cnss_driver_call_remove(plat_priv);
+
+ cnss_request_bus_bandwidth(CNSS_BUS_WIDTH_NONE);
+ cnss_pci_set_monitor_wake_intr(pci_priv, false);
+ cnss_pci_set_auto_suspended(pci_priv, 0);
+
+ ret = cnss_suspend_pci_link(pci_priv);
+ if (ret)
+ cnss_pr_err("Failed to suspend PCI link, err = %d\n", ret);
+
+ cnss_power_off_device(plat_priv);
+
+ clear_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state);
+
+ return ret;
+}
+
+static void cnss_qca6174_crash_shutdown(struct cnss_plat_data *plat_priv)
+{
+ struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
+
+ if (!plat_priv->driver_ops)
+ return;
+
+ plat_priv->driver_ops->crash_shutdown(pci_priv->pci_dev);
+}
+
+static int cnss_qca6290_powerup(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+ struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
+ unsigned int timeout;
+
+ if (!pci_priv) {
+ cnss_pr_err("pci_priv is NULL!\n");
+ return -ENODEV;
+ }
+
+ if (plat_priv->ramdump_info_v2.dump_data_valid ||
+ test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
+ cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_DEINIT);
+ cnss_pci_clear_dump_info(pci_priv);
+ }
+
+ ret = cnss_power_on_device(plat_priv);
+ if (ret) {
+ cnss_pr_err("Failed to power on device, err = %d\n", ret);
+ goto out;
+ }
+
+ ret = cnss_resume_pci_link(pci_priv);
+ if (ret) {
+ cnss_pr_err("Failed to resume PCI link, err = %d\n", ret);
+ goto power_off;
+ }
+
+ timeout = cnss_get_qmi_timeout();
+
+ ret = cnss_pci_start_mhi(pci_priv);
+ if (ret) {
+ cnss_pr_err("Failed to start MHI, err = %d\n", ret);
+ if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state) &&
+ !pci_priv->pci_link_down_ind && timeout)
+ mod_timer(&plat_priv->fw_boot_timer,
+ jiffies + msecs_to_jiffies(timeout >> 1));
+ return 0;
+ }
+
+ cnss_set_pin_connect_status(plat_priv);
+
+ if (qmi_bypass) {
+ ret = cnss_driver_call_probe(plat_priv);
+ if (ret)
+ goto stop_mhi;
+ } else if (timeout) {
+ mod_timer(&plat_priv->fw_boot_timer,
+ jiffies + msecs_to_jiffies(timeout << 1));
+ }
+
+ return 0;
+
+stop_mhi:
+ cnss_pci_stop_mhi(pci_priv);
+ cnss_suspend_pci_link(pci_priv);
+power_off:
+ cnss_power_off_device(plat_priv);
+out:
+ return ret;
+}
+
+static int cnss_qca6290_shutdown(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+ struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
+
+ if (!pci_priv)
+ return -ENODEV;
+
+ if (test_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state) ||
+ test_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state))
+ goto skip_driver_remove;
+
+ if (!plat_priv->driver_ops)
+ return -EINVAL;
+
+ cnss_driver_call_remove(plat_priv);
+
+skip_driver_remove:
+ cnss_request_bus_bandwidth(CNSS_BUS_WIDTH_NONE);
+ cnss_pci_set_monitor_wake_intr(pci_priv, false);
+ cnss_pci_set_auto_suspended(pci_priv, 0);
+
+ cnss_pci_stop_mhi(pci_priv);
+
+ ret = cnss_suspend_pci_link(pci_priv);
+ if (ret)
+ cnss_pr_err("Failed to suspend PCI link, err = %d\n", ret);
+
+ cnss_power_off_device(plat_priv);
+
+ clear_bit(CNSS_FW_READY, &plat_priv->driver_state);
+ clear_bit(CNSS_FW_MEM_READY, &plat_priv->driver_state);
+ clear_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state);
+
+ return ret;
+}
+
+static void cnss_qca6290_crash_shutdown(struct cnss_plat_data *plat_priv)
+{
+ struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
+ int ret = 0;
+
+ cnss_pr_dbg("Crash shutdown with driver_state 0x%lx\n",
+ plat_priv->driver_state);
+
+ if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state) ||
+ test_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state) ||
+ test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state))
+ return;
+
+ ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RDDM_KERNEL_PANIC);
+ if (ret) {
+ cnss_pr_err("Fail to complete RDDM, err = %d\n", ret);
+ return;
+ }
+
+ cnss_pci_collect_dump_info(pci_priv);
+}
+
+static int cnss_powerup(const struct subsys_desc *subsys_desc)
+{
+ int ret = 0;
+ struct cnss_plat_data *plat_priv;
+
+ if (!subsys_desc->dev) {
+ cnss_pr_err("dev from subsys_desc is NULL\n");
+ return -ENODEV;
+ }
+
+ plat_priv = dev_get_drvdata(subsys_desc->dev);
+ if (!plat_priv) {
+ cnss_pr_err("plat_priv is NULL!\n");
+ return -ENODEV;
+ }
+
+ if (!plat_priv->driver_state) {
+ cnss_pr_dbg("Powerup is ignored.\n");
+ return 0;
+ }
+
+ switch (plat_priv->device_id) {
+ case QCA6174_DEVICE_ID:
+ ret = cnss_qca6174_powerup(plat_priv);
+ break;
+ case QCA6290_EMULATION_DEVICE_ID:
+ case QCA6290_DEVICE_ID:
+ ret = cnss_qca6290_powerup(plat_priv);
+ break;
+ default:
+ cnss_pr_err("Unknown device_id found: 0x%lx\n",
+ plat_priv->device_id);
+ ret = -ENODEV;
+ }
+
+ return ret;
+}
+
+static int cnss_shutdown(const struct subsys_desc *subsys_desc, bool force_stop)
+{
+ int ret = 0;
+ struct cnss_plat_data *plat_priv;
+
+ if (!subsys_desc->dev) {
+ cnss_pr_err("dev from subsys_desc is NULL\n");
+ return -ENODEV;
+ }
+
+ plat_priv = dev_get_drvdata(subsys_desc->dev);
+ if (!plat_priv) {
+ cnss_pr_err("plat_priv is NULL!\n");
+ return -ENODEV;
+ }
+
+ switch (plat_priv->device_id) {
+ case QCA6174_DEVICE_ID:
+ ret = cnss_qca6174_shutdown(plat_priv);
+ break;
+ case QCA6290_EMULATION_DEVICE_ID:
+ case QCA6290_DEVICE_ID:
+ ret = cnss_qca6290_shutdown(plat_priv);
+ break;
+ default:
+ cnss_pr_err("Unknown device_id found: 0x%lx\n",
+ plat_priv->device_id);
+ ret = -ENODEV;
+ }
+
+ return ret;
+}
+
+static int cnss_qca6290_ramdump(struct cnss_plat_data *plat_priv)
+{
+ struct cnss_ramdump_info_v2 *info_v2 = &plat_priv->ramdump_info_v2;
+ struct cnss_dump_data *dump_data = &info_v2->dump_data;
+ struct cnss_dump_seg *dump_seg = info_v2->dump_data_vaddr;
+ struct ramdump_segment *ramdump_segs, *s;
+ int i, ret = 0;
+
+ if (!info_v2->dump_data_valid ||
+ dump_data->nentries == 0)
+ return 0;
+
+ ramdump_segs = kcalloc(dump_data->nentries,
+ sizeof(*ramdump_segs),
+ GFP_KERNEL);
+ if (!ramdump_segs)
+ return -ENOMEM;
+
+ s = ramdump_segs;
+ for (i = 0; i < dump_data->nentries; i++) {
+ s->address = dump_seg->address;
+ s->v_address = dump_seg->v_address;
+ s->size = dump_seg->size;
+ s++;
+ dump_seg++;
+ }
+
+ ret = do_elf_ramdump(info_v2->ramdump_dev, ramdump_segs,
+ dump_data->nentries);
+ kfree(ramdump_segs);
+
+ cnss_pci_set_mhi_state(plat_priv->bus_priv, CNSS_MHI_DEINIT);
+ cnss_pci_clear_dump_info(plat_priv->bus_priv);
+
+ return ret;
+}
+
+static int cnss_qca6174_ramdump(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+ struct cnss_ramdump_info *ramdump_info;
+ struct ramdump_segment segment;
+
+ ramdump_info = &plat_priv->ramdump_info;
+ if (!ramdump_info->ramdump_size)
+ return -EINVAL;
+
+ memset(&segment, 0, sizeof(segment));
+ segment.v_address = ramdump_info->ramdump_va;
+ segment.size = ramdump_info->ramdump_size;
+ ret = do_ramdump(ramdump_info->ramdump_dev, &segment, 1);
+
+ return ret;
+}
+
+static int cnss_ramdump(int enable, const struct subsys_desc *subsys_desc)
+{
+ int ret = 0;
+ struct cnss_plat_data *plat_priv = dev_get_drvdata(subsys_desc->dev);
+
+ if (!plat_priv) {
+ cnss_pr_err("plat_priv is NULL!\n");
+ return -ENODEV;
+ }
+
+ if (!enable)
+ return 0;
+
+ switch (plat_priv->device_id) {
+ case QCA6174_DEVICE_ID:
+ ret = cnss_qca6174_ramdump(plat_priv);
+ break;
+ case QCA6290_EMULATION_DEVICE_ID:
+ case QCA6290_DEVICE_ID:
+ ret = cnss_qca6290_ramdump(plat_priv);
+ break;
+ default:
+ cnss_pr_err("Unknown device_id found: 0x%lx\n",
+ plat_priv->device_id);
+ ret = -ENODEV;
+ }
+
+ return ret;
+}
+
+void *cnss_get_virt_ramdump_mem(unsigned long *size)
+{
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL);
+ struct cnss_ramdump_info *ramdump_info;
+
+ if (!plat_priv)
+ return NULL;
+
+ ramdump_info = &plat_priv->ramdump_info;
+ *size = ramdump_info->ramdump_size;
+
+ return ramdump_info->ramdump_va;
+}
+EXPORT_SYMBOL(cnss_get_virt_ramdump_mem);
+
+void cnss_device_crashed(void)
+{
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL);
+ struct cnss_subsys_info *subsys_info;
+
+ if (!plat_priv)
+ return;
+
+ subsys_info = &plat_priv->subsys_info;
+ if (subsys_info->subsys_device) {
+ set_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
+ subsys_set_crash_status(subsys_info->subsys_device, true);
+ subsystem_restart_dev(subsys_info->subsys_device);
+ }
+}
+EXPORT_SYMBOL(cnss_device_crashed);
+
+static void cnss_crash_shutdown(const struct subsys_desc *subsys_desc)
+{
+ struct cnss_plat_data *plat_priv = dev_get_drvdata(subsys_desc->dev);
+
+ if (!plat_priv) {
+ cnss_pr_err("plat_priv is NULL!\n");
+ return;
+ }
+
+ switch (plat_priv->device_id) {
+ case QCA6174_DEVICE_ID:
+ cnss_qca6174_crash_shutdown(plat_priv);
+ break;
+ case QCA6290_EMULATION_DEVICE_ID:
+ case QCA6290_DEVICE_ID:
+ cnss_qca6290_crash_shutdown(plat_priv);
+ break;
+ default:
+ cnss_pr_err("Unknown device_id found: 0x%lx\n",
+ plat_priv->device_id);
+ }
+}
+
+static const char *cnss_recovery_reason_to_str(enum cnss_recovery_reason reason)
+{
+ switch (reason) {
+ case CNSS_REASON_DEFAULT:
+ return "DEFAULT";
+ case CNSS_REASON_LINK_DOWN:
+ return "LINK_DOWN";
+ case CNSS_REASON_RDDM:
+ return "RDDM";
+ case CNSS_REASON_TIMEOUT:
+ return "TIMEOUT";
+ }
+
+ return "UNKNOWN";
+};
+
+static int cnss_do_recovery(struct cnss_plat_data *plat_priv,
+ enum cnss_recovery_reason reason)
+{
+ struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
+ struct cnss_subsys_info *subsys_info =
+ &plat_priv->subsys_info;
+ int ret = 0;
+
+ plat_priv->recovery_count++;
+
+ if (plat_priv->device_id == QCA6174_DEVICE_ID)
+ goto self_recovery;
+
+ if (plat_priv->driver_ops &&
+ test_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state))
+ plat_priv->driver_ops->update_status(pci_priv->pci_dev,
+ CNSS_RECOVERY);
+
+ switch (reason) {
+ case CNSS_REASON_LINK_DOWN:
+ if (test_bit(LINK_DOWN_SELF_RECOVERY, &quirks))
+ goto self_recovery;
+ break;
+ case CNSS_REASON_RDDM:
+ clear_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state);
+ ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RDDM);
+ if (ret) {
+ cnss_pr_err("Failed to complete RDDM, err = %d\n", ret);
+ break;
+ }
+ cnss_pci_collect_dump_info(pci_priv);
+ break;
+ case CNSS_REASON_DEFAULT:
+ case CNSS_REASON_TIMEOUT:
+ break;
+ default:
+ cnss_pr_err("Unsupported recovery reason: %s(%d)\n",
+ cnss_recovery_reason_to_str(reason), reason);
+ break;
+ }
+
+ if (!subsys_info->subsys_device)
+ return 0;
+
+ subsys_set_crash_status(subsys_info->subsys_device, true);
+ subsystem_restart_dev(subsys_info->subsys_device);
+
+ return 0;
+
+self_recovery:
+ cnss_shutdown(&subsys_info->subsys_desc, false);
+ cnss_powerup(&subsys_info->subsys_desc);
+
+ return 0;
+}
+
+static int cnss_driver_recovery_hdlr(struct cnss_plat_data *plat_priv,
+ void *data)
+{
+ struct cnss_recovery_data *recovery_data = data;
+ int ret = 0;
+
+ cnss_pr_dbg("Driver recovery is triggered with reason: %s(%d)\n",
+ cnss_recovery_reason_to_str(recovery_data->reason),
+ recovery_data->reason);
+
+ if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
+ cnss_pr_err("Recovery is already in progress!\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state)) {
+ cnss_pr_err("Driver unload is in progress, ignore recovery\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ switch (plat_priv->device_id) {
+ case QCA6174_DEVICE_ID:
+ if (test_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state)) {
+ cnss_pr_err("Driver load is in progress, ignore recovery\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ break;
+ default:
+ if (!test_bit(CNSS_FW_READY, &plat_priv->driver_state)) {
+ set_bit(CNSS_FW_BOOT_RECOVERY,
+ &plat_priv->driver_state);
+ } else if (test_bit(CNSS_DRIVER_LOADING,
+ &plat_priv->driver_state)) {
+ cnss_pr_err("Driver probe is in progress, ignore recovery\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ break;
+ }
+
+ set_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
+ ret = cnss_do_recovery(plat_priv, recovery_data->reason);
+
+out:
+ kfree(data);
+ return ret;
+}
+
+int cnss_self_recovery(struct device *dev,
+ enum cnss_recovery_reason reason)
+{
+ cnss_schedule_recovery(dev, reason);
+ return 0;
+}
+EXPORT_SYMBOL(cnss_self_recovery);
+
+void cnss_schedule_recovery(struct device *dev,
+ enum cnss_recovery_reason reason)
+{
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+ struct cnss_recovery_data *data;
+ int gfp = GFP_KERNEL;
+
+ if (in_interrupt() || irqs_disabled())
+ gfp = GFP_ATOMIC;
+
+ data = kzalloc(sizeof(*data), gfp);
+ if (!data)
+ return;
+
+ data->reason = reason;
+ cnss_driver_event_post(plat_priv,
+ CNSS_DRIVER_EVENT_RECOVERY,
+ false, data);
+}
+EXPORT_SYMBOL(cnss_schedule_recovery);
+
+static int cnss_force_fw_assert_hdlr(struct cnss_plat_data *plat_priv)
+{
+ struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
+ int ret;
+
+ ret = cnss_pci_set_mhi_state(plat_priv->bus_priv,
+ CNSS_MHI_TRIGGER_RDDM);
+ if (ret) {
+ cnss_pr_err("Failed to trigger RDDM, err = %d\n", ret);
+ cnss_schedule_recovery(&pci_priv->pci_dev->dev,
+ CNSS_REASON_DEFAULT);
+ return 0;
+ }
+
+ if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state)) {
+ mod_timer(&plat_priv->fw_boot_timer,
+ jiffies + msecs_to_jiffies(FW_ASSERT_TIMEOUT));
+ }
+
+ return 0;
+}
+
+int cnss_force_fw_assert(struct device *dev)
+{
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+
+ if (!plat_priv) {
+ cnss_pr_err("plat_priv is NULL\n");
+ return -ENODEV;
+ }
+
+ if (plat_priv->device_id == QCA6174_DEVICE_ID) {
+ cnss_pr_info("Forced FW assert is not supported\n");
+ return -EINVAL;
+ }
+
+ if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
+ cnss_pr_info("Recovery is already in progress, ignore forced FW assert\n");
+ return 0;
+ }
+
+ cnss_driver_event_post(plat_priv,
+ CNSS_DRIVER_EVENT_FORCE_FW_ASSERT,
+ false, NULL);
+
+ return 0;
+}
+EXPORT_SYMBOL(cnss_force_fw_assert);
+
+void fw_boot_timeout(unsigned long data)
+{
+ struct cnss_plat_data *plat_priv = (struct cnss_plat_data *)data;
+ struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
+
+ cnss_pr_err("Timeout waiting for FW ready indication!\n");
+
+ cnss_schedule_recovery(&pci_priv->pci_dev->dev,
+ CNSS_REASON_TIMEOUT);
+}
+
+static int cnss_register_driver_hdlr(struct cnss_plat_data *plat_priv,
+ void *data)
+{
+ int ret = 0;
+ struct cnss_subsys_info *subsys_info = &plat_priv->subsys_info;
+
+ set_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state);
+ plat_priv->driver_ops = data;
+
+ ret = cnss_powerup(&subsys_info->subsys_desc);
+ if (ret) {
+ clear_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state);
+ plat_priv->driver_ops = NULL;
+ }
+
+ return ret;
+}
+
+static int cnss_unregister_driver_hdlr(struct cnss_plat_data *plat_priv)
+{
+ struct cnss_subsys_info *subsys_info = &plat_priv->subsys_info;
+
+ set_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state);
+ cnss_shutdown(&subsys_info->subsys_desc, false);
+ plat_priv->driver_ops = NULL;
+
+ return 0;
+}
+
+static int cnss_cold_boot_cal_start_hdlr(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+ struct cnss_subsys_info *subsys_info = &plat_priv->subsys_info;
+
+ set_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state);
+ ret = cnss_powerup(&subsys_info->subsys_desc);
+ if (ret)
+ clear_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state);
+
+ return ret;
+}
+
+static int cnss_cold_boot_cal_done_hdlr(struct cnss_plat_data *plat_priv)
+{
+ struct cnss_subsys_info *subsys_info = &plat_priv->subsys_info;
+
+ cnss_wlfw_wlan_mode_send_sync(plat_priv, QMI_WLFW_OFF_V01);
+ cnss_shutdown(&subsys_info->subsys_desc, false);
+ clear_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state);
+
+ return 0;
+}
+
+static int cnss_power_up_hdlr(struct cnss_plat_data *plat_priv)
+{
+ struct cnss_subsys_info *subsys_info = &plat_priv->subsys_info;
+
+ return cnss_powerup(&subsys_info->subsys_desc);
+}
+
+static int cnss_power_down_hdlr(struct cnss_plat_data *plat_priv)
+{
+ struct cnss_subsys_info *subsys_info = &plat_priv->subsys_info;
+
+ cnss_shutdown(&subsys_info->subsys_desc, false);
+
+ return 0;
+}
+
+static void cnss_driver_event_work(struct work_struct *work)
+{
+ struct cnss_plat_data *plat_priv =
+ container_of(work, struct cnss_plat_data, event_work);
+ struct cnss_driver_event *event;
+ unsigned long flags;
+ int ret = 0;
+
+ if (!plat_priv) {
+ cnss_pr_err("plat_priv is NULL!\n");
+ return;
+ }
+
+ cnss_pm_stay_awake(plat_priv);
+
+ spin_lock_irqsave(&plat_priv->event_lock, flags);
+
+ while (!list_empty(&plat_priv->event_list)) {
+ event = list_first_entry(&plat_priv->event_list,
+ struct cnss_driver_event, list);
+ list_del(&event->list);
+ spin_unlock_irqrestore(&plat_priv->event_lock, flags);
+
+ cnss_pr_dbg("Processing driver event: %s%s(%d), state: 0x%lx\n",
+ cnss_driver_event_to_str(event->type),
+ event->sync ? "-sync" : "", event->type,
+ plat_priv->driver_state);
+
+ switch (event->type) {
+ case CNSS_DRIVER_EVENT_SERVER_ARRIVE:
+ ret = cnss_wlfw_server_arrive(plat_priv);
+ break;
+ case CNSS_DRIVER_EVENT_SERVER_EXIT:
+ ret = cnss_wlfw_server_exit(plat_priv);
+ break;
+ case CNSS_DRIVER_EVENT_REQUEST_MEM:
+ ret = cnss_pci_alloc_fw_mem(plat_priv->bus_priv);
+ if (ret)
+ break;
+ ret = cnss_wlfw_respond_mem_send_sync(plat_priv);
+ break;
+ case CNSS_DRIVER_EVENT_FW_MEM_READY:
+ ret = cnss_fw_mem_ready_hdlr(plat_priv);
+ break;
+ case CNSS_DRIVER_EVENT_FW_READY:
+ ret = cnss_fw_ready_hdlr(plat_priv);
+ break;
+ case CNSS_DRIVER_EVENT_COLD_BOOT_CAL_START:
+ ret = cnss_cold_boot_cal_start_hdlr(plat_priv);
+ break;
+ case CNSS_DRIVER_EVENT_COLD_BOOT_CAL_DONE:
+ ret = cnss_cold_boot_cal_done_hdlr(plat_priv);
+ break;
+ case CNSS_DRIVER_EVENT_REGISTER_DRIVER:
+ ret = cnss_register_driver_hdlr(plat_priv,
+ event->data);
+ break;
+ case CNSS_DRIVER_EVENT_UNREGISTER_DRIVER:
+ ret = cnss_unregister_driver_hdlr(plat_priv);
+ break;
+ case CNSS_DRIVER_EVENT_RECOVERY:
+ ret = cnss_driver_recovery_hdlr(plat_priv,
+ event->data);
+ break;
+ case CNSS_DRIVER_EVENT_FORCE_FW_ASSERT:
+ ret = cnss_force_fw_assert_hdlr(plat_priv);
+ break;
+ case CNSS_DRIVER_EVENT_POWER_UP:
+ ret = cnss_power_up_hdlr(plat_priv);
+ break;
+ case CNSS_DRIVER_EVENT_POWER_DOWN:
+ ret = cnss_power_down_hdlr(plat_priv);
+ break;
+ default:
+ cnss_pr_err("Invalid driver event type: %d",
+ event->type);
+ kfree(event);
+ spin_lock_irqsave(&plat_priv->event_lock, flags);
+ continue;
+ }
+
+ spin_lock_irqsave(&plat_priv->event_lock, flags);
+ if (event->sync) {
+ event->ret = ret;
+ complete(&event->complete);
+ continue;
+ }
+ spin_unlock_irqrestore(&plat_priv->event_lock, flags);
+
+ kfree(event);
+
+ spin_lock_irqsave(&plat_priv->event_lock, flags);
+ }
+ spin_unlock_irqrestore(&plat_priv->event_lock, flags);
+
+ cnss_pm_relax(plat_priv);
+}
+
+int cnss_register_subsys(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+ struct cnss_subsys_info *subsys_info;
+
+ subsys_info = &plat_priv->subsys_info;
+
+ switch (plat_priv->device_id) {
+ case QCA6174_DEVICE_ID:
+ subsys_info->subsys_desc.name = "AR6320";
+ break;
+ case QCA6290_EMULATION_DEVICE_ID:
+ case QCA6290_DEVICE_ID:
+ subsys_info->subsys_desc.name = "QCA6290";
+ break;
+ default:
+ cnss_pr_err("Unknown device ID: 0x%lx\n", plat_priv->device_id);
+ ret = -ENODEV;
+ goto out;
+ }
+
+ subsys_info->subsys_desc.owner = THIS_MODULE;
+ subsys_info->subsys_desc.powerup = cnss_powerup;
+ subsys_info->subsys_desc.shutdown = cnss_shutdown;
+ subsys_info->subsys_desc.ramdump = cnss_ramdump;
+ subsys_info->subsys_desc.crash_shutdown = cnss_crash_shutdown;
+ subsys_info->subsys_desc.dev = &plat_priv->plat_dev->dev;
+
+ subsys_info->subsys_device = subsys_register(&subsys_info->subsys_desc);
+ if (IS_ERR(subsys_info->subsys_device)) {
+ ret = PTR_ERR(subsys_info->subsys_device);
+ cnss_pr_err("Failed to register subsys, err = %d\n", ret);
+ goto out;
+ }
+
+ subsys_info->subsys_handle =
+ subsystem_get(subsys_info->subsys_desc.name);
+ if (!subsys_info->subsys_handle) {
+ cnss_pr_err("Failed to get subsys_handle!\n");
+ ret = -EINVAL;
+ goto unregister_subsys;
+ } else if (IS_ERR(subsys_info->subsys_handle)) {
+ ret = PTR_ERR(subsys_info->subsys_handle);
+ cnss_pr_err("Failed to do subsystem_get, err = %d\n", ret);
+ goto unregister_subsys;
+ }
+
+ return 0;
+
+unregister_subsys:
+ subsys_unregister(subsys_info->subsys_device);
+out:
+ return ret;
+}
+
+void cnss_unregister_subsys(struct cnss_plat_data *plat_priv)
+{
+ struct cnss_subsys_info *subsys_info;
+
+ subsys_info = &plat_priv->subsys_info;
+ subsystem_put(subsys_info->subsys_handle);
+ subsys_unregister(subsys_info->subsys_device);
+}
+
+static int cnss_init_dump_entry(struct cnss_plat_data *plat_priv)
+{
+ struct cnss_ramdump_info *ramdump_info;
+ struct msm_dump_entry dump_entry;
+
+ ramdump_info = &plat_priv->ramdump_info;
+ ramdump_info->dump_data.addr = ramdump_info->ramdump_pa;
+ ramdump_info->dump_data.len = ramdump_info->ramdump_size;
+ ramdump_info->dump_data.version = CNSS_DUMP_FORMAT_VER;
+ ramdump_info->dump_data.magic = CNSS_DUMP_MAGIC_VER_V2;
+ strlcpy(ramdump_info->dump_data.name, CNSS_DUMP_NAME,
+ sizeof(ramdump_info->dump_data.name));
+ dump_entry.id = MSM_DUMP_DATA_CNSS_WLAN;
+ dump_entry.addr = virt_to_phys(&ramdump_info->dump_data);
+
+ return msm_dump_data_register(MSM_DUMP_TABLE_APPS, &dump_entry);
+}
+
+static int cnss_qca6174_register_ramdump(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+ struct device *dev;
+ struct cnss_subsys_info *subsys_info;
+ struct cnss_ramdump_info *ramdump_info;
+ u32 ramdump_size = 0;
+
+ dev = &plat_priv->plat_dev->dev;
+ subsys_info = &plat_priv->subsys_info;
+ ramdump_info = &plat_priv->ramdump_info;
+
+ if (of_property_read_u32(dev->of_node, "qcom,wlan-ramdump-dynamic",
+ &ramdump_size) == 0) {
+ ramdump_info->ramdump_va = dma_alloc_coherent(dev, ramdump_size,
+ &ramdump_info->ramdump_pa, GFP_KERNEL);
+
+ if (ramdump_info->ramdump_va)
+ ramdump_info->ramdump_size = ramdump_size;
+ }
+
+ cnss_pr_dbg("ramdump va: %pK, pa: %pa\n",
+ ramdump_info->ramdump_va, &ramdump_info->ramdump_pa);
+
+ if (ramdump_info->ramdump_size == 0) {
+ cnss_pr_info("Ramdump will not be collected");
+ goto out;
+ }
+
+ ret = cnss_init_dump_entry(plat_priv);
+ if (ret) {
+ cnss_pr_err("Failed to setup dump table, err = %d\n", ret);
+ goto free_ramdump;
+ }
+
+ ramdump_info->ramdump_dev = create_ramdump_device(
+ subsys_info->subsys_desc.name, subsys_info->subsys_desc.dev);
+ if (!ramdump_info->ramdump_dev) {
+ cnss_pr_err("Failed to create ramdump device!");
+ ret = -ENOMEM;
+ goto free_ramdump;
+ }
+
+ return 0;
+free_ramdump:
+ dma_free_coherent(dev, ramdump_info->ramdump_size,
+ ramdump_info->ramdump_va, ramdump_info->ramdump_pa);
+out:
+ return ret;
+}
+
+static void cnss_qca6174_unregister_ramdump(struct cnss_plat_data *plat_priv)
+{
+ struct device *dev;
+ struct cnss_ramdump_info *ramdump_info;
+
+ dev = &plat_priv->plat_dev->dev;
+ ramdump_info = &plat_priv->ramdump_info;
+
+ if (ramdump_info->ramdump_dev)
+ destroy_ramdump_device(ramdump_info->ramdump_dev);
+
+ if (ramdump_info->ramdump_va)
+ dma_free_coherent(dev, ramdump_info->ramdump_size,
+ ramdump_info->ramdump_va,
+ ramdump_info->ramdump_pa);
+}
+
+static int cnss_qca6290_register_ramdump(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+ struct cnss_subsys_info *subsys_info;
+ struct cnss_ramdump_info_v2 *info_v2;
+ struct cnss_dump_data *dump_data;
+ struct msm_dump_entry dump_entry;
+ struct device *dev = &plat_priv->plat_dev->dev;
+ u32 ramdump_size = 0;
+
+ subsys_info = &plat_priv->subsys_info;
+ info_v2 = &plat_priv->ramdump_info_v2;
+ dump_data = &info_v2->dump_data;
+
+ if (of_property_read_u32(dev->of_node, "qcom,wlan-ramdump-dynamic",
+ &ramdump_size) == 0)
+ info_v2->ramdump_size = ramdump_size;
+
+ cnss_pr_dbg("Ramdump size 0x%lx\n", info_v2->ramdump_size);
+
+ info_v2->dump_data_vaddr = kzalloc(CNSS_DUMP_DESC_SIZE, GFP_KERNEL);
+ if (!info_v2->dump_data_vaddr)
+ return -ENOMEM;
+
+ dump_data->paddr = virt_to_phys(info_v2->dump_data_vaddr);
+ dump_data->version = CNSS_DUMP_FORMAT_VER_V2;
+ dump_data->magic = CNSS_DUMP_MAGIC_VER_V2;
+ dump_data->seg_version = CNSS_DUMP_SEG_VER;
+ strlcpy(dump_data->name, CNSS_DUMP_NAME,
+ sizeof(dump_data->name));
+ dump_entry.id = MSM_DUMP_DATA_CNSS_WLAN;
+ dump_entry.addr = virt_to_phys(dump_data);
+
+ ret = msm_dump_data_register(MSM_DUMP_TABLE_APPS, &dump_entry);
+ if (ret) {
+ cnss_pr_err("Failed to setup dump table, err = %d\n", ret);
+ goto free_ramdump;
+ }
+
+ info_v2->ramdump_dev =
+ create_ramdump_device(subsys_info->subsys_desc.name,
+ subsys_info->subsys_desc.dev);
+ if (!info_v2->ramdump_dev) {
+ cnss_pr_err("Failed to create ramdump device!\n");
+ ret = -ENOMEM;
+ goto free_ramdump;
+ }
+
+ return 0;
+
+free_ramdump:
+ kfree(info_v2->dump_data_vaddr);
+ info_v2->dump_data_vaddr = NULL;
+ return ret;
+}
+
+static void cnss_qca6290_unregister_ramdump(struct cnss_plat_data *plat_priv)
+{
+ struct cnss_ramdump_info_v2 *info_v2;
+
+ info_v2 = &plat_priv->ramdump_info_v2;
+
+ if (info_v2->ramdump_dev)
+ destroy_ramdump_device(info_v2->ramdump_dev);
+
+ kfree(info_v2->dump_data_vaddr);
+ info_v2->dump_data_vaddr = NULL;
+ info_v2->dump_data_valid = false;
+}
+
+int cnss_register_ramdump(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+
+ switch (plat_priv->device_id) {
+ case QCA6174_DEVICE_ID:
+ ret = cnss_qca6174_register_ramdump(plat_priv);
+ break;
+ case QCA6290_EMULATION_DEVICE_ID:
+ case QCA6290_DEVICE_ID:
+ ret = cnss_qca6290_register_ramdump(plat_priv);
+ break;
+ default:
+ cnss_pr_err("Unknown device ID: 0x%lx\n", plat_priv->device_id);
+ ret = -ENODEV;
+ break;
+ }
+ return ret;
+}
+
+void cnss_unregister_ramdump(struct cnss_plat_data *plat_priv)
+{
+ switch (plat_priv->device_id) {
+ case QCA6174_DEVICE_ID:
+ cnss_qca6174_unregister_ramdump(plat_priv);
+ break;
+ case QCA6290_EMULATION_DEVICE_ID:
+ case QCA6290_DEVICE_ID:
+ cnss_qca6290_unregister_ramdump(plat_priv);
+ break;
+ default:
+ cnss_pr_err("Unknown device ID: 0x%lx\n", plat_priv->device_id);
+ break;
+ }
+}
+
+static int cnss_register_bus_scale(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+ struct cnss_bus_bw_info *bus_bw_info;
+
+ bus_bw_info = &plat_priv->bus_bw_info;
+
+ bus_bw_info->bus_scale_table =
+ msm_bus_cl_get_pdata(plat_priv->plat_dev);
+ if (bus_bw_info->bus_scale_table) {
+ bus_bw_info->bus_client =
+ msm_bus_scale_register_client(
+ bus_bw_info->bus_scale_table);
+ if (!bus_bw_info->bus_client) {
+ cnss_pr_err("Failed to register bus scale client!\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ return 0;
+out:
+ return ret;
+}
+
+static void cnss_unregister_bus_scale(struct cnss_plat_data *plat_priv)
+{
+ struct cnss_bus_bw_info *bus_bw_info;
+
+ bus_bw_info = &plat_priv->bus_bw_info;
+
+ if (bus_bw_info->bus_client)
+ msm_bus_scale_unregister_client(bus_bw_info->bus_client);
+}
+
+static ssize_t cnss_fs_ready_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ int fs_ready = 0;
+ struct cnss_plat_data *plat_priv = dev_get_drvdata(dev);
+
+ if (sscanf(buf, "%du", &fs_ready) != 1)
+ return -EINVAL;
+
+ cnss_pr_dbg("File system is ready, fs_ready is %d, count is %zu\n",
+ fs_ready, count);
+
+ if (qmi_bypass) {
+ cnss_pr_dbg("QMI is bypassed.\n");
+ return count;
+ }
+
+ if (!plat_priv) {
+ cnss_pr_err("plat_priv is NULL!\n");
+ return count;
+ }
+
+ switch (plat_priv->device_id) {
+ case QCA6290_EMULATION_DEVICE_ID:
+ case QCA6290_DEVICE_ID:
+ break;
+ default:
+ cnss_pr_err("Not supported for device ID 0x%lx\n",
+ plat_priv->device_id);
+ return count;
+ }
+
+ if (fs_ready == FILE_SYSTEM_READY) {
+ cnss_driver_event_post(plat_priv,
+ CNSS_DRIVER_EVENT_COLD_BOOT_CAL_START,
+ true, NULL);
+ }
+
+ return count;
+}
+
+static DEVICE_ATTR(fs_ready, 0220, NULL, cnss_fs_ready_store);
+
+static int cnss_create_sysfs(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+
+ ret = device_create_file(&plat_priv->plat_dev->dev, &dev_attr_fs_ready);
+ if (ret) {
+ cnss_pr_err("Failed to create device file, err = %d\n", ret);
+ goto out;
+ }
+
+ return 0;
+out:
+ return ret;
+}
+
+static void cnss_remove_sysfs(struct cnss_plat_data *plat_priv)
+{
+ device_remove_file(&plat_priv->plat_dev->dev, &dev_attr_fs_ready);
+}
+
+static int cnss_event_work_init(struct cnss_plat_data *plat_priv)
+{
+ spin_lock_init(&plat_priv->event_lock);
+ plat_priv->event_wq = alloc_workqueue("cnss_driver_event",
+ WQ_UNBOUND, 1);
+ if (!plat_priv->event_wq) {
+ cnss_pr_err("Failed to create event workqueue!\n");
+ return -EFAULT;
+ }
+
+ INIT_WORK(&plat_priv->event_work, cnss_driver_event_work);
+ INIT_LIST_HEAD(&plat_priv->event_list);
+
+ return 0;
+}
+
+static void cnss_event_work_deinit(struct cnss_plat_data *plat_priv)
+{
+ destroy_workqueue(plat_priv->event_wq);
+}
+
+static const struct platform_device_id cnss_platform_id_table[] = {
+ { .name = "qca6174", .driver_data = QCA6174_DEVICE_ID, },
+ { .name = "qca6290", .driver_data = QCA6290_DEVICE_ID, },
+};
+
+static const struct of_device_id cnss_of_match_table[] = {
+ {
+ .compatible = "qcom,cnss",
+ .data = (void *)&cnss_platform_id_table[0]},
+ {
+ .compatible = "qcom,cnss-qca6290",
+ .data = (void *)&cnss_platform_id_table[1]},
+ { },
+};
+MODULE_DEVICE_TABLE(of, cnss_of_match_table);
+
+static int cnss_probe(struct platform_device *plat_dev)
+{
+ int ret = 0;
+ struct cnss_plat_data *plat_priv;
+ const struct of_device_id *of_id;
+ const struct platform_device_id *device_id;
+
+ if (cnss_get_plat_priv(plat_dev)) {
+ cnss_pr_err("Driver is already initialized!\n");
+ ret = -EEXIST;
+ goto out;
+ }
+
+ of_id = of_match_device(cnss_of_match_table, &plat_dev->dev);
+ if (!of_id || !of_id->data) {
+ cnss_pr_err("Failed to find of match device!\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ device_id = of_id->data;
+
+ plat_priv = devm_kzalloc(&plat_dev->dev, sizeof(*plat_priv),
+ GFP_KERNEL);
+ if (!plat_priv) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ plat_priv->plat_dev = plat_dev;
+ plat_priv->device_id = device_id->driver_data;
+ cnss_set_plat_priv(plat_dev, plat_priv);
+ platform_set_drvdata(plat_dev, plat_priv);
+
+ ret = cnss_get_resources(plat_priv);
+ if (ret)
+ goto reset_ctx;
+
+ ret = cnss_power_on_device(plat_priv);
+ if (ret)
+ goto free_res;
+
+ ret = cnss_pci_init(plat_priv);
+ if (ret)
+ goto power_off;
+
+ ret = cnss_register_esoc(plat_priv);
+ if (ret)
+ goto deinit_pci;
+
+ ret = cnss_register_bus_scale(plat_priv);
+ if (ret)
+ goto unreg_esoc;
+
+ ret = cnss_create_sysfs(plat_priv);
+ if (ret)
+ goto unreg_bus_scale;
+
+ ret = cnss_event_work_init(plat_priv);
+ if (ret)
+ goto remove_sysfs;
+
+ ret = cnss_qmi_init(plat_priv);
+ if (ret)
+ goto deinit_event_work;
+
+ ret = cnss_debugfs_create(plat_priv);
+ if (ret)
+ goto deinit_qmi;
+
+ setup_timer(&plat_priv->fw_boot_timer,
+ fw_boot_timeout, (unsigned long)plat_priv);
+
+ register_pm_notifier(&cnss_pm_notifier);
+
+ ret = device_init_wakeup(&plat_dev->dev, true);
+ if (ret)
+ cnss_pr_err("Failed to init platform device wakeup source, err = %d\n",
+ ret);
+
+ init_completion(&plat_priv->power_up_complete);
+
+ cnss_pr_info("Platform driver probed successfully.\n");
+
+ return 0;
+
+deinit_qmi:
+ cnss_qmi_deinit(plat_priv);
+deinit_event_work:
+ cnss_event_work_deinit(plat_priv);
+remove_sysfs:
+ cnss_remove_sysfs(plat_priv);
+unreg_bus_scale:
+ cnss_unregister_bus_scale(plat_priv);
+unreg_esoc:
+ cnss_unregister_esoc(plat_priv);
+deinit_pci:
+ cnss_pci_deinit(plat_priv);
+power_off:
+ cnss_power_off_device(plat_priv);
+free_res:
+ cnss_put_resources(plat_priv);
+reset_ctx:
+ platform_set_drvdata(plat_dev, NULL);
+ cnss_set_plat_priv(plat_dev, NULL);
+out:
+ return ret;
+}
+
+static int cnss_remove(struct platform_device *plat_dev)
+{
+ struct cnss_plat_data *plat_priv = platform_get_drvdata(plat_dev);
+
+ complete_all(&plat_priv->power_up_complete);
+ device_init_wakeup(&plat_dev->dev, false);
+ unregister_pm_notifier(&cnss_pm_notifier);
+ del_timer(&plat_priv->fw_boot_timer);
+ cnss_debugfs_destroy(plat_priv);
+ cnss_qmi_deinit(plat_priv);
+ cnss_event_work_deinit(plat_priv);
+ cnss_remove_sysfs(plat_priv);
+ cnss_unregister_bus_scale(plat_priv);
+ cnss_unregister_esoc(plat_priv);
+ cnss_pci_deinit(plat_priv);
+ cnss_put_resources(plat_priv);
+ platform_set_drvdata(plat_dev, NULL);
+ plat_env = NULL;
+
+ return 0;
+}
+
+static struct platform_driver cnss_platform_driver = {
+ .probe = cnss_probe,
+ .remove = cnss_remove,
+ .driver = {
+ .name = "cnss2",
+ .owner = THIS_MODULE,
+ .of_match_table = cnss_of_match_table,
+ },
+};
+
+static int __init cnss_initialize(void)
+{
+ int ret = 0;
+
+ cnss_debug_init();
+ ret = platform_driver_register(&cnss_platform_driver);
+ if (ret)
+ cnss_debug_deinit();
+
+ return ret;
+}
+
+static void __exit cnss_exit(void)
+{
+ platform_driver_unregister(&cnss_platform_driver);
+ cnss_debug_deinit();
+}
+
+module_init(cnss_initialize);
+module_exit(cnss_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("CNSS2 Platform Driver");
diff --git a/drivers/net/wireless/cnss2/main.h b/drivers/net/wireless/cnss2/main.h
new file mode 100644
index 000000000000..a2d9a02bde20
--- /dev/null
+++ b/drivers/net/wireless/cnss2/main.h
@@ -0,0 +1,225 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CNSS_MAIN_H
+#define _CNSS_MAIN_H
+
+#include <linux/esoc_client.h>
+#include <linux/etherdevice.h>
+#include <linux/msm-bus.h>
+#include <linux/pm_qos.h>
+#include <net/cnss2.h>
+#include <soc/qcom/memory_dump.h>
+#include <soc/qcom/subsystem_restart.h>
+
+#include "qmi.h"
+
+#define MAX_NO_OF_MAC_ADDR 4
+
+enum cnss_dev_bus_type {
+ CNSS_BUS_NONE = -1,
+ CNSS_BUS_PCI,
+};
+
+struct cnss_vreg_info {
+ struct regulator *reg;
+ const char *name;
+ u32 min_uv;
+ u32 max_uv;
+ u32 load_ua;
+ u32 delay_us;
+};
+
+struct cnss_pinctrl_info {
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *bootstrap_active;
+ struct pinctrl_state *wlan_en_active;
+ struct pinctrl_state *wlan_en_sleep;
+};
+
+struct cnss_subsys_info {
+ struct subsys_device *subsys_device;
+ struct subsys_desc subsys_desc;
+ void *subsys_handle;
+};
+
+struct cnss_ramdump_info {
+ struct ramdump_device *ramdump_dev;
+ unsigned long ramdump_size;
+ void *ramdump_va;
+ phys_addr_t ramdump_pa;
+ struct msm_dump_data dump_data;
+};
+
+struct cnss_dump_seg {
+ unsigned long address;
+ void *v_address;
+ unsigned long size;
+ u32 type;
+};
+
+struct cnss_dump_data {
+ u32 version;
+ u32 magic;
+ char name[32];
+ phys_addr_t paddr;
+ int nentries;
+ u32 seg_version;
+};
+
+struct cnss_ramdump_info_v2 {
+ struct ramdump_device *ramdump_dev;
+ unsigned long ramdump_size;
+ void *dump_data_vaddr;
+ bool dump_data_valid;
+ struct cnss_dump_data dump_data;
+};
+
+struct cnss_esoc_info {
+ struct esoc_desc *esoc_desc;
+ bool notify_modem_status;
+ void *modem_notify_handler;
+ int modem_current_status;
+};
+
+struct cnss_bus_bw_info {
+ struct msm_bus_scale_pdata *bus_scale_table;
+ u32 bus_client;
+ int current_bw_vote;
+};
+
+struct cnss_wlan_mac_addr {
+ u8 mac_addr[MAX_NO_OF_MAC_ADDR][ETH_ALEN];
+ u32 no_of_mac_addr_set;
+};
+
+struct cnss_wlan_mac_info {
+ struct cnss_wlan_mac_addr wlan_mac_addr;
+ bool is_wlan_mac_set;
+};
+
+struct cnss_fw_mem {
+ size_t size;
+ void *va;
+ phys_addr_t pa;
+ bool valid;
+};
+
+enum cnss_driver_event_type {
+ CNSS_DRIVER_EVENT_SERVER_ARRIVE,
+ CNSS_DRIVER_EVENT_SERVER_EXIT,
+ CNSS_DRIVER_EVENT_REQUEST_MEM,
+ CNSS_DRIVER_EVENT_FW_MEM_READY,
+ CNSS_DRIVER_EVENT_FW_READY,
+ CNSS_DRIVER_EVENT_COLD_BOOT_CAL_START,
+ CNSS_DRIVER_EVENT_COLD_BOOT_CAL_DONE,
+ CNSS_DRIVER_EVENT_REGISTER_DRIVER,
+ CNSS_DRIVER_EVENT_UNREGISTER_DRIVER,
+ CNSS_DRIVER_EVENT_RECOVERY,
+ CNSS_DRIVER_EVENT_FORCE_FW_ASSERT,
+ CNSS_DRIVER_EVENT_POWER_UP,
+ CNSS_DRIVER_EVENT_POWER_DOWN,
+ CNSS_DRIVER_EVENT_MAX,
+};
+
+enum cnss_driver_state {
+ CNSS_QMI_WLFW_CONNECTED,
+ CNSS_FW_MEM_READY,
+ CNSS_FW_READY,
+ CNSS_COLD_BOOT_CAL,
+ CNSS_DRIVER_LOADING,
+ CNSS_DRIVER_UNLOADING,
+ CNSS_DRIVER_PROBED,
+ CNSS_DRIVER_RECOVERY,
+ CNSS_FW_BOOT_RECOVERY,
+ CNSS_DEV_ERR_NOTIFY,
+};
+
+struct cnss_recovery_data {
+ enum cnss_recovery_reason reason;
+};
+
+enum cnss_pins {
+ CNSS_WLAN_EN,
+ CNSS_PCIE_TXP,
+ CNSS_PCIE_TXN,
+ CNSS_PCIE_RXP,
+ CNSS_PCIE_RXN,
+ CNSS_PCIE_REFCLKP,
+ CNSS_PCIE_REFCLKN,
+ CNSS_PCIE_RST,
+ CNSS_PCIE_WAKE,
+};
+
+struct cnss_pin_connect_result {
+ u32 fw_pwr_pin_result;
+ u32 fw_phy_io_pin_result;
+ u32 fw_rf_pin_result;
+ u32 host_pin_result;
+};
+
+struct cnss_plat_data {
+ struct platform_device *plat_dev;
+ void *bus_priv;
+ struct cnss_vreg_info *vreg_info;
+ struct cnss_pinctrl_info pinctrl_info;
+ struct cnss_subsys_info subsys_info;
+ struct cnss_ramdump_info ramdump_info;
+ struct cnss_ramdump_info_v2 ramdump_info_v2;
+ struct cnss_esoc_info esoc_info;
+ struct cnss_bus_bw_info bus_bw_info;
+ struct notifier_block modem_nb;
+ struct cnss_platform_cap cap;
+ struct pm_qos_request qos_request;
+ unsigned long device_id;
+ struct cnss_wlan_driver *driver_ops;
+ enum cnss_driver_status driver_status;
+ u32 recovery_count;
+ struct cnss_wlan_mac_info wlan_mac_info;
+ unsigned long driver_state;
+ struct list_head event_list;
+ spinlock_t event_lock; /* spinlock for driver work event handling */
+ struct work_struct event_work;
+ struct workqueue_struct *event_wq;
+ struct qmi_handle *qmi_wlfw_clnt;
+ struct work_struct qmi_recv_msg_work;
+ struct notifier_block qmi_wlfw_clnt_nb;
+ struct wlfw_rf_chip_info_s_v01 chip_info;
+ struct wlfw_rf_board_info_s_v01 board_info;
+ struct wlfw_soc_info_s_v01 soc_info;
+ struct wlfw_fw_version_info_s_v01 fw_version_info;
+ struct cnss_fw_mem fw_mem;
+ struct cnss_fw_mem m3_mem;
+ struct cnss_pin_connect_result pin_result;
+ struct dentry *root_dentry;
+ atomic_t pm_count;
+ struct timer_list fw_boot_timer;
+ struct completion power_up_complete;
+};
+
+void *cnss_bus_dev_to_bus_priv(struct device *dev);
+struct cnss_plat_data *cnss_bus_dev_to_plat_priv(struct device *dev);
+int cnss_driver_event_post(struct cnss_plat_data *plat_priv,
+ enum cnss_driver_event_type type,
+ bool sync, void *data);
+int cnss_get_vreg(struct cnss_plat_data *plat_priv);
+int cnss_get_pinctrl(struct cnss_plat_data *plat_priv);
+int cnss_power_on_device(struct cnss_plat_data *plat_priv);
+void cnss_power_off_device(struct cnss_plat_data *plat_priv);
+int cnss_register_subsys(struct cnss_plat_data *plat_priv);
+void cnss_unregister_subsys(struct cnss_plat_data *plat_priv);
+int cnss_register_ramdump(struct cnss_plat_data *plat_priv);
+void cnss_unregister_ramdump(struct cnss_plat_data *plat_priv);
+void cnss_set_pin_connect_status(struct cnss_plat_data *plat_priv);
+u32 cnss_get_wake_msi(struct cnss_plat_data *plat_priv);
+
+#endif /* _CNSS_MAIN_H */
diff --git a/drivers/net/wireless/cnss2/pci.c b/drivers/net/wireless/cnss2/pci.c
new file mode 100644
index 000000000000..edc39af2d361
--- /dev/null
+++ b/drivers/net/wireless/cnss2/pci.c
@@ -0,0 +1,1612 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/firmware.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of.h>
+#include <linux/pm_runtime.h>
+
+#include "main.h"
+#include "debug.h"
+#include "pci.h"
+
+#define PCI_LINK_UP 1
+#define PCI_LINK_DOWN 0
+
+#define SAVE_PCI_CONFIG_SPACE 1
+#define RESTORE_PCI_CONFIG_SPACE 0
+
+#define PM_OPTIONS_DEFAULT 0
+#define PM_OPTIONS_LINK_DOWN \
+ (MSM_PCIE_CONFIG_NO_CFG_RESTORE | MSM_PCIE_CONFIG_LINKDOWN)
+
+#define PCI_BAR_NUM 0
+
+#ifdef CONFIG_ARM_LPAE
+#define PCI_DMA_MASK 64
+#else
+#define PCI_DMA_MASK 32
+#endif
+
+#define MHI_NODE_NAME "qcom,mhi"
+
+#define MAX_M3_FILE_NAME_LENGTH 13
+#define DEFAULT_M3_FILE_NAME "m3.bin"
+
+static DEFINE_SPINLOCK(pci_link_down_lock);
+
+static unsigned int pci_link_down_panic;
+module_param(pci_link_down_panic, uint, 0600);
+MODULE_PARM_DESC(pci_link_down_panic,
+ "Trigger kernel panic when PCI link down is detected");
+
+static bool fbc_bypass;
+#ifdef CONFIG_CNSS2_DEBUG
+module_param(fbc_bypass, bool, 0600);
+MODULE_PARM_DESC(fbc_bypass,
+ "Bypass firmware download when loading WLAN driver");
+#endif
+
+static int cnss_set_pci_config_space(struct cnss_pci_data *pci_priv, bool save)
+{
+ int ret = 0;
+ struct pci_dev *pci_dev = pci_priv->pci_dev;
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+ bool link_down_or_recovery;
+
+ if (!plat_priv)
+ return -ENODEV;
+
+ link_down_or_recovery = pci_priv->pci_link_down_ind ||
+ (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state));
+
+ if (save) {
+ if (link_down_or_recovery) {
+ pci_priv->saved_state = NULL;
+ } else {
+ pci_save_state(pci_dev);
+ pci_priv->saved_state = pci_store_saved_state(pci_dev);
+ }
+ } else {
+ if (link_down_or_recovery) {
+ ret = msm_pcie_recover_config(pci_dev);
+ if (ret) {
+ cnss_pr_err("Failed to recover PCI config space, err = %d\n",
+ ret);
+ return ret;
+ }
+ } else if (pci_priv->saved_state) {
+ pci_load_and_free_saved_state(pci_dev,
+ &pci_priv->saved_state);
+ pci_restore_state(pci_dev);
+ }
+ }
+
+ return 0;
+}
+
+static int cnss_set_pci_link(struct cnss_pci_data *pci_priv, bool link_up)
+{
+ int ret = 0;
+ struct pci_dev *pci_dev = pci_priv->pci_dev;
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+ bool link_down_or_recovery;
+
+ if (!plat_priv)
+ return -ENODEV;
+
+ link_down_or_recovery = pci_priv->pci_link_down_ind ||
+ (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state));
+
+ ret = msm_pcie_pm_control(link_up ? MSM_PCIE_RESUME :
+ MSM_PCIE_SUSPEND,
+ pci_dev->bus->number,
+ pci_dev, NULL,
+ link_down_or_recovery ?
+ PM_OPTIONS_LINK_DOWN :
+ PM_OPTIONS_DEFAULT);
+ if (ret) {
+ cnss_pr_err("Failed to %s PCI link with %s option, err = %d\n",
+ link_up ? "resume" : "suspend",
+ link_down_or_recovery ? "link down" : "default",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int cnss_suspend_pci_link(struct cnss_pci_data *pci_priv)
+{
+ int ret = 0;
+
+ if (!pci_priv)
+ return -ENODEV;
+
+ if (!pci_priv->pci_link_state) {
+ cnss_pr_info("PCI link is already suspended!\n");
+ goto out;
+ }
+
+ ret = cnss_set_pci_config_space(pci_priv, SAVE_PCI_CONFIG_SPACE);
+ if (ret)
+ goto out;
+
+ pci_disable_device(pci_priv->pci_dev);
+
+ ret = pci_set_power_state(pci_priv->pci_dev, PCI_D3hot);
+ if (ret)
+ cnss_pr_err("Failed to set D3Hot, err = %d\n", ret);
+
+ ret = cnss_set_pci_link(pci_priv, PCI_LINK_DOWN);
+ if (ret)
+ goto out;
+
+ pci_priv->pci_link_state = PCI_LINK_DOWN;
+
+ return 0;
+out:
+ return ret;
+}
+
+int cnss_resume_pci_link(struct cnss_pci_data *pci_priv)
+{
+ int ret = 0;
+
+ if (!pci_priv)
+ return -ENODEV;
+
+ if (pci_priv->pci_link_state) {
+ cnss_pr_info("PCI link is already resumed!\n");
+ goto out;
+ }
+
+ ret = cnss_set_pci_link(pci_priv, PCI_LINK_UP);
+ if (ret)
+ goto out;
+
+ pci_priv->pci_link_state = PCI_LINK_UP;
+
+ ret = pci_enable_device(pci_priv->pci_dev);
+ if (ret) {
+ cnss_pr_err("Failed to enable PCI device, err = %d\n", ret);
+ goto out;
+ }
+
+ ret = cnss_set_pci_config_space(pci_priv, RESTORE_PCI_CONFIG_SPACE);
+ if (ret)
+ goto out;
+
+ pci_set_master(pci_priv->pci_dev);
+
+ if (pci_priv->pci_link_down_ind)
+ pci_priv->pci_link_down_ind = false;
+
+ return 0;
+out:
+ return ret;
+}
+
+int cnss_pci_link_down(struct device *dev)
+{
+ unsigned long flags;
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
+
+ if (!pci_priv) {
+ cnss_pr_err("pci_priv is NULL!\n");
+ return -EINVAL;
+ }
+
+ if (pci_link_down_panic)
+ panic("cnss: PCI link is down!\n");
+
+ spin_lock_irqsave(&pci_link_down_lock, flags);
+ if (pci_priv->pci_link_down_ind) {
+ cnss_pr_dbg("PCI link down recovery is in progress, ignore!\n");
+ spin_unlock_irqrestore(&pci_link_down_lock, flags);
+ return -EINVAL;
+ }
+ pci_priv->pci_link_down_ind = true;
+ spin_unlock_irqrestore(&pci_link_down_lock, flags);
+
+ cnss_pr_err("PCI link down is detected by host driver, schedule recovery!\n");
+
+ cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_NOTIFY_LINK_ERROR);
+ cnss_schedule_recovery(dev, CNSS_REASON_LINK_DOWN);
+
+ return 0;
+}
+EXPORT_SYMBOL(cnss_pci_link_down);
+
+static int cnss_pci_init_smmu(struct cnss_pci_data *pci_priv)
+{
+ int ret = 0;
+ struct device *dev;
+ struct dma_iommu_mapping *mapping;
+ int atomic_ctx = 1;
+ int s1_bypass = 1;
+
+ dev = &pci_priv->pci_dev->dev;
+
+ mapping = arm_iommu_create_mapping(&platform_bus_type,
+ pci_priv->smmu_iova_start,
+ pci_priv->smmu_iova_len);
+ if (IS_ERR(mapping)) {
+ ret = PTR_ERR(mapping);
+ cnss_pr_err("Failed to create SMMU mapping, err = %d\n", ret);
+ goto out;
+ }
+
+ ret = iommu_domain_set_attr(mapping->domain,
+ DOMAIN_ATTR_ATOMIC,
+ &atomic_ctx);
+ if (ret) {
+ pr_err("Failed to set SMMU atomic_ctx attribute, err = %d\n",
+ ret);
+ goto release_mapping;
+ }
+
+ ret = iommu_domain_set_attr(mapping->domain,
+ DOMAIN_ATTR_S1_BYPASS,
+ &s1_bypass);
+ if (ret) {
+ pr_err("Failed to set SMMU s1_bypass attribute, err = %d\n",
+ ret);
+ goto release_mapping;
+ }
+
+ ret = arm_iommu_attach_device(dev, mapping);
+ if (ret) {
+ pr_err("Failed to attach SMMU device, err = %d\n", ret);
+ goto release_mapping;
+ }
+
+ pci_priv->smmu_mapping = mapping;
+
+ return ret;
+release_mapping:
+ arm_iommu_release_mapping(mapping);
+out:
+ return ret;
+}
+
+static void cnss_pci_deinit_smmu(struct cnss_pci_data *pci_priv)
+{
+ arm_iommu_detach_device(&pci_priv->pci_dev->dev);
+ arm_iommu_release_mapping(pci_priv->smmu_mapping);
+
+ pci_priv->smmu_mapping = NULL;
+}
+
+static void cnss_pci_event_cb(struct msm_pcie_notify *notify)
+{
+ unsigned long flags;
+ struct pci_dev *pci_dev;
+ struct cnss_pci_data *pci_priv;
+
+ if (!notify)
+ return;
+
+ pci_dev = notify->user;
+ if (!pci_dev)
+ return;
+
+ pci_priv = cnss_get_pci_priv(pci_dev);
+ if (!pci_priv)
+ return;
+
+ switch (notify->event) {
+ case MSM_PCIE_EVENT_LINKDOWN:
+ if (pci_link_down_panic)
+ panic("cnss: PCI link is down!\n");
+
+ spin_lock_irqsave(&pci_link_down_lock, flags);
+ if (pci_priv->pci_link_down_ind) {
+ cnss_pr_dbg("PCI link down recovery is in progress, ignore!\n");
+ spin_unlock_irqrestore(&pci_link_down_lock, flags);
+ return;
+ }
+ pci_priv->pci_link_down_ind = true;
+ spin_unlock_irqrestore(&pci_link_down_lock, flags);
+
+ cnss_pr_err("PCI link down, schedule recovery!\n");
+ cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_NOTIFY_LINK_ERROR);
+ if (pci_dev->device == QCA6174_DEVICE_ID)
+ disable_irq(pci_dev->irq);
+ cnss_schedule_recovery(&pci_dev->dev, CNSS_REASON_LINK_DOWN);
+ break;
+ case MSM_PCIE_EVENT_WAKEUP:
+ if (cnss_pci_get_monitor_wake_intr(pci_priv) &&
+ cnss_pci_get_auto_suspended(pci_priv)) {
+ cnss_pci_set_monitor_wake_intr(pci_priv, false);
+ pm_request_resume(&pci_dev->dev);
+ }
+ break;
+ default:
+ cnss_pr_err("Received invalid PCI event: %d\n", notify->event);
+ }
+}
+
+static int cnss_reg_pci_event(struct cnss_pci_data *pci_priv)
+{
+ int ret = 0;
+ struct msm_pcie_register_event *pci_event;
+
+ pci_event = &pci_priv->msm_pci_event;
+ pci_event->events = MSM_PCIE_EVENT_LINKDOWN |
+ MSM_PCIE_EVENT_WAKEUP;
+ pci_event->user = pci_priv->pci_dev;
+ pci_event->mode = MSM_PCIE_TRIGGER_CALLBACK;
+ pci_event->callback = cnss_pci_event_cb;
+ pci_event->options = MSM_PCIE_CONFIG_NO_RECOVERY;
+
+ ret = msm_pcie_register_event(pci_event);
+ if (ret)
+ cnss_pr_err("Failed to register MSM PCI event, err = %d\n",
+ ret);
+
+ return ret;
+}
+
+static void cnss_dereg_pci_event(struct cnss_pci_data *pci_priv)
+{
+ msm_pcie_deregister_event(&pci_priv->msm_pci_event);
+}
+
+static int cnss_pci_suspend(struct device *dev)
+{
+ int ret = 0;
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
+ struct cnss_plat_data *plat_priv;
+ struct cnss_wlan_driver *driver_ops;
+
+ pm_message_t state = { .event = PM_EVENT_SUSPEND };
+
+ if (!pci_priv)
+ goto out;
+
+ plat_priv = pci_priv->plat_priv;
+ if (!plat_priv)
+ goto out;
+
+ driver_ops = plat_priv->driver_ops;
+ if (driver_ops && driver_ops->suspend) {
+ ret = driver_ops->suspend(pci_dev, state);
+ if (pci_priv->pci_link_state) {
+ if (cnss_pci_set_mhi_state(pci_priv,
+ CNSS_MHI_SUSPEND)) {
+ driver_ops->resume(pci_dev);
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ cnss_set_pci_config_space(pci_priv,
+ SAVE_PCI_CONFIG_SPACE);
+ pci_disable_device(pci_dev);
+
+ ret = pci_set_power_state(pci_dev, PCI_D3hot);
+ if (ret)
+ cnss_pr_err("Failed to set D3Hot, err = %d\n",
+ ret);
+ }
+ }
+
+ cnss_pci_set_monitor_wake_intr(pci_priv, false);
+
+out:
+ return ret;
+}
+
+static int cnss_pci_resume(struct device *dev)
+{
+ int ret = 0;
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
+ struct cnss_plat_data *plat_priv;
+ struct cnss_wlan_driver *driver_ops;
+
+ if (!pci_priv)
+ goto out;
+
+ plat_priv = pci_priv->plat_priv;
+ if (!plat_priv)
+ goto out;
+
+ driver_ops = plat_priv->driver_ops;
+ if (driver_ops && driver_ops->resume && !pci_priv->pci_link_down_ind) {
+ ret = pci_enable_device(pci_dev);
+ if (ret)
+ cnss_pr_err("Failed to enable PCI device, err = %d\n",
+ ret);
+
+ if (pci_priv->saved_state)
+ cnss_set_pci_config_space(pci_priv,
+ RESTORE_PCI_CONFIG_SPACE);
+
+ pci_set_master(pci_dev);
+ cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RESUME);
+
+ ret = driver_ops->resume(pci_dev);
+ }
+
+out:
+ return ret;
+}
+
+static int cnss_pci_suspend_noirq(struct device *dev)
+{
+ int ret = 0;
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
+ struct cnss_plat_data *plat_priv;
+ struct cnss_wlan_driver *driver_ops;
+
+ if (!pci_priv)
+ goto out;
+
+ plat_priv = pci_priv->plat_priv;
+ if (!plat_priv)
+ goto out;
+
+ driver_ops = plat_priv->driver_ops;
+ if (driver_ops && driver_ops->suspend_noirq)
+ ret = driver_ops->suspend_noirq(pci_dev);
+
+out:
+ return ret;
+}
+
+static int cnss_pci_resume_noirq(struct device *dev)
+{
+ int ret = 0;
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
+ struct cnss_plat_data *plat_priv;
+ struct cnss_wlan_driver *driver_ops;
+
+ if (!pci_priv)
+ goto out;
+
+ plat_priv = pci_priv->plat_priv;
+ if (!plat_priv)
+ goto out;
+
+ driver_ops = plat_priv->driver_ops;
+ if (driver_ops && driver_ops->resume_noirq &&
+ !pci_priv->pci_link_down_ind)
+ ret = driver_ops->resume_noirq(pci_dev);
+
+out:
+ return ret;
+}
+
+static int cnss_pci_runtime_suspend(struct device *dev)
+{
+ int ret = 0;
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
+ struct cnss_plat_data *plat_priv;
+ struct cnss_wlan_driver *driver_ops;
+
+ if (!pci_priv)
+ return -EAGAIN;
+
+ plat_priv = pci_priv->plat_priv;
+ if (!plat_priv)
+ return -EAGAIN;
+
+ if (pci_priv->pci_link_down_ind) {
+ cnss_pr_dbg("PCI link down recovery is in progress!\n");
+ return -EAGAIN;
+ }
+
+ cnss_pr_dbg("Runtime suspend start\n");
+
+ driver_ops = plat_priv->driver_ops;
+ if (driver_ops && driver_ops->runtime_ops &&
+ driver_ops->runtime_ops->runtime_suspend)
+ ret = driver_ops->runtime_ops->runtime_suspend(pci_dev);
+
+ cnss_pr_info("Runtime suspend status: %d\n", ret);
+
+ return ret;
+}
+
+static int cnss_pci_runtime_resume(struct device *dev)
+{
+ int ret = 0;
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
+ struct cnss_plat_data *plat_priv;
+ struct cnss_wlan_driver *driver_ops;
+
+ if (!pci_priv)
+ return -EAGAIN;
+
+ plat_priv = pci_priv->plat_priv;
+ if (!plat_priv)
+ return -EAGAIN;
+
+ if (pci_priv->pci_link_down_ind) {
+ cnss_pr_dbg("PCI link down recovery is in progress!\n");
+ return -EAGAIN;
+ }
+
+ cnss_pr_dbg("Runtime resume start\n");
+
+ driver_ops = plat_priv->driver_ops;
+ if (driver_ops && driver_ops->runtime_ops &&
+ driver_ops->runtime_ops->runtime_resume)
+ ret = driver_ops->runtime_ops->runtime_resume(pci_dev);
+
+ cnss_pr_info("Runtime resume status: %d\n", ret);
+
+ return ret;
+}
+
+static int cnss_pci_runtime_idle(struct device *dev)
+{
+ cnss_pr_dbg("Runtime idle\n");
+
+ pm_request_autosuspend(dev);
+
+ return -EBUSY;
+}
+
+int cnss_wlan_pm_control(bool vote)
+{
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL);
+ struct cnss_pci_data *pci_priv;
+ struct pci_dev *pci_dev;
+
+ if (!plat_priv)
+ return -ENODEV;
+
+ pci_priv = plat_priv->bus_priv;
+ if (!pci_priv)
+ return -ENODEV;
+
+ pci_dev = pci_priv->pci_dev;
+
+ return msm_pcie_pm_control(vote ? MSM_PCIE_DISABLE_PC :
+ MSM_PCIE_ENABLE_PC,
+ pci_dev->bus->number, pci_dev,
+ NULL, PM_OPTIONS_DEFAULT);
+}
+EXPORT_SYMBOL(cnss_wlan_pm_control);
+
+int cnss_auto_suspend(void)
+{
+ int ret = 0;
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL);
+ struct pci_dev *pci_dev;
+ struct cnss_pci_data *pci_priv;
+ struct cnss_bus_bw_info *bus_bw_info;
+
+ if (!plat_priv)
+ return -ENODEV;
+
+ pci_priv = plat_priv->bus_priv;
+ if (!pci_priv)
+ return -ENODEV;
+
+ pci_dev = pci_priv->pci_dev;
+
+ if (pci_priv->pci_link_state) {
+ if (cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_SUSPEND)) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ cnss_set_pci_config_space(pci_priv, SAVE_PCI_CONFIG_SPACE);
+ pci_disable_device(pci_dev);
+
+ ret = pci_set_power_state(pci_dev, PCI_D3hot);
+ if (ret)
+ cnss_pr_err("Failed to set D3Hot, err = %d\n", ret);
+ if (cnss_set_pci_link(pci_priv, PCI_LINK_DOWN)) {
+ cnss_pr_err("Failed to shutdown PCI link!\n");
+ ret = -EAGAIN;
+ goto resume_mhi;
+ }
+ }
+
+ pci_priv->pci_link_state = PCI_LINK_DOWN;
+ cnss_pci_set_auto_suspended(pci_priv, 1);
+ cnss_pci_set_monitor_wake_intr(pci_priv, true);
+
+ bus_bw_info = &plat_priv->bus_bw_info;
+ msm_bus_scale_client_update_request(bus_bw_info->bus_client,
+ CNSS_BUS_WIDTH_NONE);
+
+ return 0;
+
+resume_mhi:
+ if (pci_enable_device(pci_dev))
+ cnss_pr_err("Failed to enable PCI device!\n");
+ cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RESUME);
+out:
+ return ret;
+}
+EXPORT_SYMBOL(cnss_auto_suspend);
+
+int cnss_auto_resume(void)
+{
+ int ret = 0;
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL);
+ struct pci_dev *pci_dev;
+ struct cnss_pci_data *pci_priv;
+ struct cnss_bus_bw_info *bus_bw_info;
+
+ if (!plat_priv)
+ return -ENODEV;
+
+ pci_priv = plat_priv->bus_priv;
+ if (!pci_priv)
+ return -ENODEV;
+
+ pci_dev = pci_priv->pci_dev;
+ if (!pci_priv->pci_link_state) {
+ if (cnss_set_pci_link(pci_priv, PCI_LINK_UP)) {
+ cnss_pr_err("Failed to resume PCI link!\n");
+ ret = -EAGAIN;
+ goto out;
+ }
+ pci_priv->pci_link_state = PCI_LINK_UP;
+ ret = pci_enable_device(pci_dev);
+ if (ret)
+ cnss_pr_err("Failed to enable PCI device, err = %d\n",
+ ret);
+ }
+
+ cnss_set_pci_config_space(pci_priv, RESTORE_PCI_CONFIG_SPACE);
+ pci_set_master(pci_dev);
+ cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RESUME);
+ cnss_pci_set_auto_suspended(pci_priv, 0);
+
+ bus_bw_info = &plat_priv->bus_bw_info;
+ msm_bus_scale_client_update_request(bus_bw_info->bus_client,
+ bus_bw_info->current_bw_vote);
+out:
+ return ret;
+}
+EXPORT_SYMBOL(cnss_auto_resume);
+
+int cnss_pci_alloc_fw_mem(struct cnss_pci_data *pci_priv)
+{
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+ struct cnss_fw_mem *fw_mem = &plat_priv->fw_mem;
+
+ if (!fw_mem->va && fw_mem->size) {
+ fw_mem->va = dma_alloc_coherent(&pci_priv->pci_dev->dev,
+ fw_mem->size, &fw_mem->pa,
+ GFP_KERNEL);
+ if (!fw_mem->va) {
+ cnss_pr_err("Failed to allocate memory for FW, size: 0x%zx\n",
+ fw_mem->size);
+ fw_mem->size = 0;
+
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static void cnss_pci_free_fw_mem(struct cnss_pci_data *pci_priv)
+{
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+ struct cnss_fw_mem *fw_mem = &plat_priv->fw_mem;
+
+ if (fw_mem->va && fw_mem->size) {
+ cnss_pr_dbg("Freeing memory for FW, va: 0x%pK, pa: %pa, size: 0x%zx\n",
+ fw_mem->va, &fw_mem->pa, fw_mem->size);
+ dma_free_coherent(&pci_priv->pci_dev->dev, fw_mem->size,
+ fw_mem->va, fw_mem->pa);
+ fw_mem->va = NULL;
+ fw_mem->pa = 0;
+ fw_mem->size = 0;
+ }
+}
+
+int cnss_pci_load_m3(struct cnss_pci_data *pci_priv)
+{
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+ struct cnss_fw_mem *m3_mem = &plat_priv->m3_mem;
+ char filename[MAX_M3_FILE_NAME_LENGTH];
+ const struct firmware *fw_entry;
+ int ret = 0;
+
+ if (!m3_mem->va && !m3_mem->size) {
+ snprintf(filename, sizeof(filename), DEFAULT_M3_FILE_NAME);
+
+ ret = request_firmware(&fw_entry, filename,
+ &pci_priv->pci_dev->dev);
+ if (ret) {
+ cnss_pr_err("Failed to load M3 image: %s\n", filename);
+ return ret;
+ }
+
+ m3_mem->va = dma_alloc_coherent(&pci_priv->pci_dev->dev,
+ fw_entry->size, &m3_mem->pa,
+ GFP_KERNEL);
+ if (!m3_mem->va) {
+ cnss_pr_err("Failed to allocate memory for M3, size: 0x%zx\n",
+ fw_entry->size);
+ release_firmware(fw_entry);
+ return -ENOMEM;
+ }
+
+ memcpy(m3_mem->va, fw_entry->data, fw_entry->size);
+ m3_mem->size = fw_entry->size;
+ release_firmware(fw_entry);
+ }
+
+ return 0;
+}
+
+static void cnss_pci_free_m3_mem(struct cnss_pci_data *pci_priv)
+{
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+ struct cnss_fw_mem *m3_mem = &plat_priv->m3_mem;
+
+ if (m3_mem->va && m3_mem->size) {
+ cnss_pr_dbg("Freeing memory for M3, va: 0x%pK, pa: %pa, size: 0x%zx\n",
+ m3_mem->va, &m3_mem->pa, m3_mem->size);
+ dma_free_coherent(&pci_priv->pci_dev->dev, m3_mem->size,
+ m3_mem->va, m3_mem->pa);
+ }
+
+ m3_mem->va = NULL;
+ m3_mem->pa = 0;
+ m3_mem->size = 0;
+}
+
+int cnss_pci_get_bar_info(struct cnss_pci_data *pci_priv, void __iomem **va,
+ phys_addr_t *pa)
+{
+ if (!pci_priv)
+ return -ENODEV;
+
+ *va = pci_priv->bar;
+ *pa = pci_resource_start(pci_priv->pci_dev, PCI_BAR_NUM);
+
+ return 0;
+}
+
+#ifdef CONFIG_CNSS_QCA6290
+#define PCI_MAX_BAR_SIZE 0xD00000
+
+static void __iomem *cnss_pci_iomap(struct pci_dev *dev, int bar,
+ unsigned long maxlen)
+{
+ resource_size_t start = pci_resource_start(dev, bar);
+ resource_size_t len = PCI_MAX_BAR_SIZE;
+ unsigned long flags = pci_resource_flags(dev, bar);
+
+ if (!len || !start)
+ return NULL;
+
+ if ((flags & IORESOURCE_IO) || (flags & IORESOURCE_MEM)) {
+ if (flags & IORESOURCE_CACHEABLE && !(flags & IORESOURCE_IO))
+ return ioremap(start, len);
+ else
+ return ioremap_nocache(start, len);
+ }
+
+ return NULL;
+}
+#else
+static void __iomem *cnss_pci_iomap(struct pci_dev *dev, int bar,
+ unsigned long maxlen)
+{
+ return pci_iomap(dev, bar, maxlen);
+}
+#endif
+
+static struct cnss_msi_config msi_config = {
+ .total_vectors = 32,
+ .total_users = 4,
+ .users = (struct cnss_msi_user[]) {
+ { .name = "MHI", .num_vectors = 2, .base_vector = 0 },
+ { .name = "CE", .num_vectors = 11, .base_vector = 2 },
+ { .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
+ { .name = "DP", .num_vectors = 18, .base_vector = 14 },
+ },
+};
+
+static int cnss_pci_get_msi_assignment(struct cnss_pci_data *pci_priv)
+{
+ pci_priv->msi_config = &msi_config;
+
+ return 0;
+}
+
+static int cnss_pci_enable_msi(struct cnss_pci_data *pci_priv)
+{
+ int ret = 0;
+ struct pci_dev *pci_dev = pci_priv->pci_dev;
+ int num_vectors;
+ struct cnss_msi_config *msi_config;
+ struct msi_desc *msi_desc;
+
+ ret = cnss_pci_get_msi_assignment(pci_priv);
+ if (ret) {
+ cnss_pr_err("Failed to get MSI assignment, err = %d\n", ret);
+ goto out;
+ }
+
+ msi_config = pci_priv->msi_config;
+ if (!msi_config) {
+ cnss_pr_err("msi_config is NULL!\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ num_vectors = pci_enable_msi_range(pci_dev,
+ msi_config->total_vectors,
+ msi_config->total_vectors);
+ if (num_vectors != msi_config->total_vectors) {
+ cnss_pr_err("Failed to get enough MSI vectors (%d), available vectors = %d",
+ msi_config->total_vectors, num_vectors);
+ ret = -EINVAL;
+ goto reset_msi_config;
+ }
+
+ msi_desc = irq_get_msi_desc(pci_dev->irq);
+ if (!msi_desc) {
+ cnss_pr_err("msi_desc is NULL!\n");
+ ret = -EINVAL;
+ goto disable_msi;
+ }
+
+ pci_priv->msi_ep_base_data = msi_desc->msg.data;
+ if (!pci_priv->msi_ep_base_data) {
+ cnss_pr_err("Got 0 MSI base data!\n");
+ CNSS_ASSERT(0);
+ }
+
+ cnss_pr_dbg("MSI base data is %d\n", pci_priv->msi_ep_base_data);
+
+ return 0;
+
+disable_msi:
+ pci_disable_msi(pci_priv->pci_dev);
+reset_msi_config:
+ pci_priv->msi_config = NULL;
+out:
+ return ret;
+}
+
+static void cnss_pci_disable_msi(struct cnss_pci_data *pci_priv)
+{
+ pci_disable_msi(pci_priv->pci_dev);
+}
+
+int cnss_get_user_msi_assignment(struct device *dev, char *user_name,
+ int *num_vectors, u32 *user_base_data,
+ u32 *base_vector)
+{
+ struct cnss_pci_data *pci_priv = dev_get_drvdata(dev);
+ struct cnss_msi_config *msi_config;
+ int idx;
+
+ if (!pci_priv)
+ return -ENODEV;
+
+ msi_config = pci_priv->msi_config;
+ if (!msi_config) {
+ cnss_pr_err("MSI is not supported.\n");
+ return -EINVAL;
+ }
+
+ for (idx = 0; idx < msi_config->total_users; idx++) {
+ if (strcmp(user_name, msi_config->users[idx].name) == 0) {
+ *num_vectors = msi_config->users[idx].num_vectors;
+ *user_base_data = msi_config->users[idx].base_vector
+ + pci_priv->msi_ep_base_data;
+ *base_vector = msi_config->users[idx].base_vector;
+
+ cnss_pr_dbg("Assign MSI to user: %s, num_vectors: %d, user_base_data: %u, base_vector: %u\n",
+ user_name, *num_vectors, *user_base_data,
+ *base_vector);
+
+ return 0;
+ }
+ }
+
+ cnss_pr_err("Failed to find MSI assignment for %s!\n", user_name);
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL(cnss_get_user_msi_assignment);
+
+int cnss_get_msi_irq(struct device *dev, unsigned int vector)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+
+ return pci_dev->irq + vector;
+}
+EXPORT_SYMBOL(cnss_get_msi_irq);
+
+void cnss_get_msi_address(struct device *dev, u32 *msi_addr_low,
+ u32 *msi_addr_high)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+
+ pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_LO,
+ msi_addr_low);
+
+ pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_HI,
+ msi_addr_high);
+}
+EXPORT_SYMBOL(cnss_get_msi_address);
+
+static int cnss_pci_enable_bus(struct cnss_pci_data *pci_priv)
+{
+ int ret = 0;
+ struct pci_dev *pci_dev = pci_priv->pci_dev;
+ u16 device_id;
+
+ pci_read_config_word(pci_dev, PCI_DEVICE_ID, &device_id);
+ if (device_id != pci_priv->pci_device_id->device) {
+ cnss_pr_err("PCI device ID mismatch, config ID: 0x%x, probe ID: 0x%x\n",
+ device_id, pci_priv->pci_device_id->device);
+ ret = -EIO;
+ goto out;
+ }
+
+ ret = pci_assign_resource(pci_dev, PCI_BAR_NUM);
+ if (ret) {
+ pr_err("Failed to assign PCI resource, err = %d\n", ret);
+ goto out;
+ }
+
+ ret = pci_enable_device(pci_dev);
+ if (ret) {
+ cnss_pr_err("Failed to enable PCI device, err = %d\n", ret);
+ goto out;
+ }
+
+ ret = pci_request_region(pci_dev, PCI_BAR_NUM, "cnss");
+ if (ret) {
+ cnss_pr_err("Failed to request PCI region, err = %d\n", ret);
+ goto disable_device;
+ }
+
+ ret = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(PCI_DMA_MASK));
+ if (ret) {
+ cnss_pr_err("Failed to set PCI DMA mask (%d), err = %d\n",
+ ret, PCI_DMA_MASK);
+ goto release_region;
+ }
+
+ ret = pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(PCI_DMA_MASK));
+ if (ret) {
+ cnss_pr_err("Failed to set PCI consistent DMA mask (%d), err = %d\n",
+ ret, PCI_DMA_MASK);
+ goto release_region;
+ }
+
+ pci_set_master(pci_dev);
+
+ pci_priv->bar = cnss_pci_iomap(pci_dev, PCI_BAR_NUM, 0);
+ if (!pci_priv->bar) {
+ cnss_pr_err("Failed to do PCI IO map!\n");
+ ret = -EIO;
+ goto clear_master;
+ }
+ return 0;
+
+clear_master:
+ pci_clear_master(pci_dev);
+release_region:
+ pci_release_region(pci_dev, PCI_BAR_NUM);
+disable_device:
+ pci_disable_device(pci_dev);
+out:
+ return ret;
+}
+
+static void cnss_pci_disable_bus(struct cnss_pci_data *pci_priv)
+{
+ struct pci_dev *pci_dev = pci_priv->pci_dev;
+
+ if (pci_priv->bar) {
+ pci_iounmap(pci_dev, pci_priv->bar);
+ pci_priv->bar = NULL;
+ }
+
+ pci_clear_master(pci_dev);
+ pci_release_region(pci_dev, PCI_BAR_NUM);
+ pci_disable_device(pci_dev);
+}
+
+static int cnss_mhi_pm_runtime_get(struct pci_dev *pci_dev)
+{
+ return pm_runtime_get(&pci_dev->dev);
+}
+
+static void cnss_mhi_pm_runtime_put_noidle(struct pci_dev *pci_dev)
+{
+ pm_runtime_put_noidle(&pci_dev->dev);
+}
+
+static char *cnss_mhi_state_to_str(enum cnss_mhi_state mhi_state)
+{
+ switch (mhi_state) {
+ case CNSS_MHI_INIT:
+ return "INIT";
+ case CNSS_MHI_DEINIT:
+ return "DEINIT";
+ case CNSS_MHI_POWER_ON:
+ return "POWER_ON";
+ case CNSS_MHI_POWER_OFF:
+ return "POWER_OFF";
+ case CNSS_MHI_SUSPEND:
+ return "SUSPEND";
+ case CNSS_MHI_RESUME:
+ return "RESUME";
+ case CNSS_MHI_TRIGGER_RDDM:
+ return "TRIGGER_RDDM";
+ case CNSS_MHI_RDDM:
+ return "RDDM";
+ case CNSS_MHI_RDDM_KERNEL_PANIC:
+ return "RDDM_KERNEL_PANIC";
+ case CNSS_MHI_NOTIFY_LINK_ERROR:
+ return "NOTIFY_LINK_ERROR";
+ default:
+ return "UNKNOWN";
+ }
+};
+
+static void *cnss_pci_collect_dump_seg(struct cnss_pci_data *pci_priv,
+ enum mhi_rddm_segment type,
+ void *start_addr)
+{
+ int count;
+ struct scatterlist *sg_list, *s;
+ unsigned int i;
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+ struct cnss_dump_data *dump_data =
+ &plat_priv->ramdump_info_v2.dump_data;
+ struct cnss_dump_seg *dump_seg = start_addr;
+
+ count = mhi_xfer_rddm(&pci_priv->mhi_dev, type, &sg_list);
+ if (count <= 0 || !sg_list) {
+ cnss_pr_err("Invalid dump_seg for type %u, count %u, sg_list %pK\n",
+ type, count, sg_list);
+ return start_addr;
+ }
+
+ cnss_pr_dbg("Collect dump seg: type %u, nentries %d\n", type, count);
+
+ for_each_sg(sg_list, s, count, i) {
+ dump_seg->address = sg_dma_address(s);
+ dump_seg->v_address = sg_virt(s);
+ dump_seg->size = s->length;
+ dump_seg->type = type;
+ cnss_pr_dbg("seg-%d: address 0x%lx, v_address %pK, size 0x%lx\n",
+ i, dump_seg->address,
+ dump_seg->v_address, dump_seg->size);
+ dump_seg++;
+ }
+
+ dump_data->nentries += count;
+
+ return dump_seg;
+}
+
+void cnss_pci_collect_dump_info(struct cnss_pci_data *pci_priv)
+{
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+ struct cnss_dump_data *dump_data =
+ &plat_priv->ramdump_info_v2.dump_data;
+ void *start_addr, *end_addr;
+
+ dump_data->nentries = 0;
+
+ start_addr = plat_priv->ramdump_info_v2.dump_data_vaddr;
+ end_addr = cnss_pci_collect_dump_seg(pci_priv,
+ MHI_RDDM_FW_SEGMENT, start_addr);
+
+ start_addr = end_addr;
+ end_addr = cnss_pci_collect_dump_seg(pci_priv,
+ MHI_RDDM_RD_SEGMENT, start_addr);
+
+ if (dump_data->nentries > 0)
+ plat_priv->ramdump_info_v2.dump_data_valid = true;
+}
+
+void cnss_pci_clear_dump_info(struct cnss_pci_data *pci_priv)
+{
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+
+ plat_priv->ramdump_info_v2.dump_data.nentries = 0;
+ plat_priv->ramdump_info_v2.dump_data_valid = false;
+}
+
+static void cnss_mhi_notify_status(enum MHI_CB_REASON reason, void *priv)
+{
+ struct cnss_pci_data *pci_priv = priv;
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+ enum cnss_recovery_reason cnss_reason = CNSS_REASON_RDDM;
+
+ if (!pci_priv)
+ return;
+
+ cnss_pr_dbg("MHI status cb is called with reason %d\n", reason);
+
+ set_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state);
+ del_timer(&plat_priv->fw_boot_timer);
+
+ if (reason == MHI_CB_SYS_ERROR)
+ cnss_reason = CNSS_REASON_TIMEOUT;
+
+ cnss_schedule_recovery(&pci_priv->pci_dev->dev,
+ cnss_reason);
+}
+
+static int cnss_pci_register_mhi(struct cnss_pci_data *pci_priv)
+{
+ int ret = 0;
+ struct pci_dev *pci_dev = pci_priv->pci_dev;
+ struct mhi_device *mhi_dev = &pci_priv->mhi_dev;
+
+ mhi_dev->dev = &pci_priv->plat_priv->plat_dev->dev;
+ mhi_dev->pci_dev = pci_dev;
+
+ mhi_dev->resources[0].start = (resource_size_t)pci_priv->bar;
+ mhi_dev->resources[0].end = (resource_size_t)pci_priv->bar +
+ pci_resource_len(pci_dev, PCI_BAR_NUM);
+ mhi_dev->resources[0].flags =
+ pci_resource_flags(pci_dev, PCI_BAR_NUM);
+ mhi_dev->resources[0].name = "BAR";
+ cnss_pr_dbg("BAR start is %pa, BAR end is %pa\n",
+ &mhi_dev->resources[0].start, &mhi_dev->resources[0].end);
+
+ if (!mhi_dev->resources[1].start) {
+ mhi_dev->resources[1].start = pci_dev->irq;
+ mhi_dev->resources[1].end = pci_dev->irq + 1;
+ mhi_dev->resources[1].flags = IORESOURCE_IRQ;
+ mhi_dev->resources[1].name = "IRQ";
+ }
+ cnss_pr_dbg("IRQ start is %pa, IRQ end is %pa\n",
+ &mhi_dev->resources[1].start, &mhi_dev->resources[1].end);
+
+ mhi_dev->pm_runtime_get = cnss_mhi_pm_runtime_get;
+ mhi_dev->pm_runtime_put_noidle = cnss_mhi_pm_runtime_put_noidle;
+
+ mhi_dev->support_rddm = true;
+ mhi_dev->rddm_size = pci_priv->plat_priv->ramdump_info_v2.ramdump_size;
+ mhi_dev->status_cb = cnss_mhi_notify_status;
+
+ ret = mhi_register_device(mhi_dev, MHI_NODE_NAME, pci_priv);
+ if (ret) {
+ cnss_pr_err("Failed to register as MHI device, err = %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void cnss_pci_unregister_mhi(struct cnss_pci_data *pci_priv)
+{
+}
+
+static enum mhi_dev_ctrl cnss_to_mhi_dev_state(enum cnss_mhi_state state)
+{
+ switch (state) {
+ case CNSS_MHI_INIT:
+ return MHI_DEV_CTRL_INIT;
+ case CNSS_MHI_DEINIT:
+ return MHI_DEV_CTRL_DE_INIT;
+ case CNSS_MHI_POWER_ON:
+ return MHI_DEV_CTRL_POWER_ON;
+ case CNSS_MHI_POWER_OFF:
+ return MHI_DEV_CTRL_POWER_OFF;
+ case CNSS_MHI_SUSPEND:
+ return MHI_DEV_CTRL_SUSPEND;
+ case CNSS_MHI_RESUME:
+ return MHI_DEV_CTRL_RESUME;
+ case CNSS_MHI_TRIGGER_RDDM:
+ return MHI_DEV_CTRL_TRIGGER_RDDM;
+ case CNSS_MHI_RDDM:
+ return MHI_DEV_CTRL_RDDM;
+ case CNSS_MHI_RDDM_KERNEL_PANIC:
+ return MHI_DEV_CTRL_RDDM_KERNEL_PANIC;
+ case CNSS_MHI_NOTIFY_LINK_ERROR:
+ return MHI_DEV_CTRL_NOTIFY_LINK_ERROR;
+ default:
+ cnss_pr_err("Unknown CNSS MHI state (%d)\n", state);
+ return -EINVAL;
+ }
+}
+
+static int cnss_pci_check_mhi_state_bit(struct cnss_pci_data *pci_priv,
+ enum cnss_mhi_state mhi_state)
+{
+ switch (mhi_state) {
+ case CNSS_MHI_INIT:
+ if (!test_bit(CNSS_MHI_INIT, &pci_priv->mhi_state))
+ return 0;
+ break;
+ case CNSS_MHI_DEINIT:
+ case CNSS_MHI_POWER_ON:
+ if (test_bit(CNSS_MHI_INIT, &pci_priv->mhi_state) &&
+ !test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state))
+ return 0;
+ break;
+ case CNSS_MHI_POWER_OFF:
+ case CNSS_MHI_SUSPEND:
+ if (test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state) &&
+ !test_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state))
+ return 0;
+ break;
+ case CNSS_MHI_RESUME:
+ if (test_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state))
+ return 0;
+ break;
+ case CNSS_MHI_TRIGGER_RDDM:
+ case CNSS_MHI_RDDM:
+ case CNSS_MHI_RDDM_KERNEL_PANIC:
+ case CNSS_MHI_NOTIFY_LINK_ERROR:
+ return 0;
+ default:
+ cnss_pr_err("Unhandled MHI state: %s(%d)\n",
+ cnss_mhi_state_to_str(mhi_state), mhi_state);
+ }
+
+ cnss_pr_err("Cannot set MHI state %s(%d) in current MHI state (0x%lx)\n",
+ cnss_mhi_state_to_str(mhi_state), mhi_state,
+ pci_priv->mhi_state);
+
+ return -EINVAL;
+}
+
+static void cnss_pci_set_mhi_state_bit(struct cnss_pci_data *pci_priv,
+ enum cnss_mhi_state mhi_state)
+{
+ switch (mhi_state) {
+ case CNSS_MHI_INIT:
+ set_bit(CNSS_MHI_INIT, &pci_priv->mhi_state);
+ break;
+ case CNSS_MHI_DEINIT:
+ clear_bit(CNSS_MHI_INIT, &pci_priv->mhi_state);
+ break;
+ case CNSS_MHI_POWER_ON:
+ set_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state);
+ break;
+ case CNSS_MHI_POWER_OFF:
+ clear_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state);
+ break;
+ case CNSS_MHI_SUSPEND:
+ set_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state);
+ break;
+ case CNSS_MHI_RESUME:
+ clear_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state);
+ break;
+ case CNSS_MHI_TRIGGER_RDDM:
+ case CNSS_MHI_RDDM:
+ case CNSS_MHI_RDDM_KERNEL_PANIC:
+ case CNSS_MHI_NOTIFY_LINK_ERROR:
+ break;
+ default:
+ cnss_pr_err("Unhandled MHI state (%d)\n", mhi_state);
+ }
+}
+
+int cnss_pci_set_mhi_state(struct cnss_pci_data *pci_priv,
+ enum cnss_mhi_state mhi_state)
+{
+ int ret = 0;
+ enum mhi_dev_ctrl mhi_dev_state = cnss_to_mhi_dev_state(mhi_state);
+
+ if (!pci_priv) {
+ cnss_pr_err("pci_priv is NULL!\n");
+ return -ENODEV;
+ }
+
+ if (pci_priv->device_id == QCA6174_DEVICE_ID)
+ return 0;
+
+ if (mhi_dev_state < 0) {
+ cnss_pr_err("Invalid MHI DEV state (%d)\n", mhi_dev_state);
+ return -EINVAL;
+ }
+
+ ret = cnss_pci_check_mhi_state_bit(pci_priv, mhi_state);
+ if (ret)
+ goto out;
+
+ cnss_pr_dbg("Setting MHI state: %s(%d)\n",
+ cnss_mhi_state_to_str(mhi_state), mhi_state);
+ ret = mhi_pm_control_device(&pci_priv->mhi_dev, mhi_dev_state);
+ if (ret) {
+ cnss_pr_err("Failed to set MHI state: %s(%d)\n",
+ cnss_mhi_state_to_str(mhi_state), mhi_state);
+ goto out;
+ }
+
+ cnss_pci_set_mhi_state_bit(pci_priv, mhi_state);
+
+out:
+ return ret;
+}
+
+int cnss_pci_start_mhi(struct cnss_pci_data *pci_priv)
+{
+ int ret = 0;
+
+ if (!pci_priv) {
+ cnss_pr_err("pci_priv is NULL!\n");
+ return -ENODEV;
+ }
+
+ if (fbc_bypass)
+ return 0;
+
+ ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_INIT);
+ if (ret)
+ goto out;
+
+ ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_POWER_ON);
+ if (ret)
+ goto out;
+
+ return 0;
+
+out:
+ return ret;
+}
+
+void cnss_pci_stop_mhi(struct cnss_pci_data *pci_priv)
+{
+ struct cnss_plat_data *plat_priv;
+
+ if (!pci_priv) {
+ cnss_pr_err("pci_priv is NULL!\n");
+ return;
+ }
+
+ if (fbc_bypass)
+ return;
+
+ plat_priv = pci_priv->plat_priv;
+
+ cnss_pci_set_mhi_state_bit(pci_priv, CNSS_MHI_RESUME);
+ cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_POWER_OFF);
+
+ if (plat_priv->ramdump_info_v2.dump_data_valid ||
+ test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state))
+ return;
+
+ cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_DEINIT);
+}
+
+static int cnss_pci_probe(struct pci_dev *pci_dev,
+ const struct pci_device_id *id)
+{
+ int ret = 0;
+ struct cnss_pci_data *pci_priv;
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL);
+ struct resource *res;
+
+ cnss_pr_dbg("PCI is probing, vendor ID: 0x%x, device ID: 0x%x\n",
+ id->vendor, pci_dev->device);
+
+ switch (pci_dev->device) {
+ case QCA6290_EMULATION_DEVICE_ID:
+ case QCA6290_DEVICE_ID:
+ if (!mhi_is_device_ready(&plat_priv->plat_dev->dev,
+ MHI_NODE_NAME)) {
+ cnss_pr_err("MHI driver is not ready, defer PCI probe!\n");
+ ret = -EPROBE_DEFER;
+ goto out;
+ }
+ break;
+ default:
+ break;
+ }
+
+ pci_priv = devm_kzalloc(&pci_dev->dev, sizeof(*pci_priv),
+ GFP_KERNEL);
+ if (!pci_priv) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ pci_priv->pci_link_state = PCI_LINK_UP;
+ pci_priv->plat_priv = plat_priv;
+ pci_priv->pci_dev = pci_dev;
+ pci_priv->pci_device_id = id;
+ pci_priv->device_id = pci_dev->device;
+ cnss_set_pci_priv(pci_dev, pci_priv);
+ plat_priv->device_id = pci_dev->device;
+ plat_priv->bus_priv = pci_priv;
+
+ ret = cnss_register_subsys(plat_priv);
+ if (ret)
+ goto reset_ctx;
+
+ ret = cnss_register_ramdump(plat_priv);
+ if (ret)
+ goto unregister_subsys;
+
+ res = platform_get_resource_byname(plat_priv->plat_dev, IORESOURCE_MEM,
+ "smmu_iova_base");
+ if (res) {
+ pci_priv->smmu_iova_start = res->start;
+ pci_priv->smmu_iova_len = resource_size(res);
+ cnss_pr_dbg("smmu_iova_start: %pa, smmu_iova_len: %zu\n",
+ &pci_priv->smmu_iova_start,
+ pci_priv->smmu_iova_len);
+
+ ret = cnss_pci_init_smmu(pci_priv);
+ if (ret) {
+ cnss_pr_err("Failed to init SMMU, err = %d\n", ret);
+ goto unregister_ramdump;
+ }
+ }
+
+ ret = cnss_reg_pci_event(pci_priv);
+ if (ret) {
+ cnss_pr_err("Failed to register PCI event, err = %d\n", ret);
+ goto deinit_smmu;
+ }
+
+ ret = cnss_pci_enable_bus(pci_priv);
+ if (ret)
+ goto dereg_pci_event;
+
+ switch (pci_dev->device) {
+ case QCA6174_DEVICE_ID:
+ pci_read_config_word(pci_dev, QCA6174_REV_ID_OFFSET,
+ &pci_priv->revision_id);
+ ret = cnss_suspend_pci_link(pci_priv);
+ if (ret)
+ cnss_pr_err("Failed to suspend PCI link, err = %d\n",
+ ret);
+ cnss_power_off_device(plat_priv);
+ break;
+ case QCA6290_EMULATION_DEVICE_ID:
+ case QCA6290_DEVICE_ID:
+ ret = cnss_pci_enable_msi(pci_priv);
+ if (ret)
+ goto disable_bus;
+ ret = cnss_pci_register_mhi(pci_priv);
+ if (ret) {
+ cnss_pci_disable_msi(pci_priv);
+ goto disable_bus;
+ }
+ break;
+ default:
+ cnss_pr_err("Unknown PCI device found: 0x%x\n",
+ pci_dev->device);
+ ret = -ENODEV;
+ goto disable_bus;
+ }
+
+ return 0;
+
+disable_bus:
+ cnss_pci_disable_bus(pci_priv);
+dereg_pci_event:
+ cnss_dereg_pci_event(pci_priv);
+deinit_smmu:
+ if (pci_priv->smmu_mapping)
+ cnss_pci_deinit_smmu(pci_priv);
+unregister_ramdump:
+ cnss_unregister_ramdump(plat_priv);
+unregister_subsys:
+ cnss_unregister_subsys(plat_priv);
+reset_ctx:
+ plat_priv->bus_priv = NULL;
+out:
+ return ret;
+}
+
+static void cnss_pci_remove(struct pci_dev *pci_dev)
+{
+ struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
+ struct cnss_plat_data *plat_priv =
+ cnss_bus_dev_to_plat_priv(&pci_dev->dev);
+
+ cnss_pci_free_m3_mem(pci_priv);
+ cnss_pci_free_fw_mem(pci_priv);
+
+ switch (pci_dev->device) {
+ case QCA6290_EMULATION_DEVICE_ID:
+ case QCA6290_DEVICE_ID:
+ cnss_pci_unregister_mhi(pci_priv);
+ cnss_pci_disable_msi(pci_priv);
+ break;
+ default:
+ break;
+ }
+
+ cnss_pci_disable_bus(pci_priv);
+ cnss_dereg_pci_event(pci_priv);
+ if (pci_priv->smmu_mapping)
+ cnss_pci_deinit_smmu(pci_priv);
+ cnss_unregister_ramdump(plat_priv);
+ cnss_unregister_subsys(plat_priv);
+ plat_priv->bus_priv = NULL;
+}
+
+static const struct pci_device_id cnss_pci_id_table[] = {
+ { QCA6174_VENDOR_ID, QCA6174_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
+ { QCA6290_EMULATION_VENDOR_ID, QCA6290_EMULATION_DEVICE_ID,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { QCA6290_VENDOR_ID, QCA6290_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
+ { 0 }
+};
+MODULE_DEVICE_TABLE(pci, cnss_pci_id_table);
+
+static const struct dev_pm_ops cnss_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(cnss_pci_suspend, cnss_pci_resume)
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(cnss_pci_suspend_noirq,
+ cnss_pci_resume_noirq)
+ SET_RUNTIME_PM_OPS(cnss_pci_runtime_suspend, cnss_pci_runtime_resume,
+ cnss_pci_runtime_idle)
+};
+
+struct pci_driver cnss_pci_driver = {
+ .name = "cnss_pci",
+ .id_table = cnss_pci_id_table,
+ .probe = cnss_pci_probe,
+ .remove = cnss_pci_remove,
+ .driver = {
+ .pm = &cnss_pm_ops,
+ },
+};
+
+int cnss_pci_init(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+ struct device *dev = &plat_priv->plat_dev->dev;
+ u32 rc_num;
+
+ ret = of_property_read_u32(dev->of_node, "qcom,wlan-rc-num", &rc_num);
+ if (ret) {
+ cnss_pr_err("Failed to find PCIe RC number, err = %d\n", ret);
+ goto out;
+ }
+
+ ret = msm_pcie_enumerate(rc_num);
+ if (ret) {
+ cnss_pr_err("Failed to enable PCIe RC%x, err = %d\n",
+ rc_num, ret);
+ goto out;
+ }
+
+ ret = pci_register_driver(&cnss_pci_driver);
+ if (ret) {
+ cnss_pr_err("Failed to register to PCI framework, err = %d\n",
+ ret);
+ goto out;
+ }
+
+ return 0;
+out:
+ return ret;
+}
+
+void cnss_pci_deinit(struct cnss_plat_data *plat_priv)
+{
+ pci_unregister_driver(&cnss_pci_driver);
+}
diff --git a/drivers/net/wireless/cnss2/pci.h b/drivers/net/wireless/cnss2/pci.h
new file mode 100644
index 000000000000..4dc29c3c1f10
--- /dev/null
+++ b/drivers/net/wireless/cnss2/pci.h
@@ -0,0 +1,141 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CNSS_PCI_H
+#define _CNSS_PCI_H
+
+#include <asm/dma-iommu.h>
+#include <linux/iommu.h>
+#include <linux/msm_mhi.h>
+#include <linux/msm_pcie.h>
+#include <linux/pci.h>
+
+#include "main.h"
+
+#define QCA6174_VENDOR_ID 0x168C
+#define QCA6174_DEVICE_ID 0x003E
+#define QCA6174_REV_ID_OFFSET 0x08
+#define QCA6174_REV3_VERSION 0x5020000
+#define QCA6174_REV3_2_VERSION 0x5030000
+#define QCA6290_VENDOR_ID 0x17CB
+#define QCA6290_DEVICE_ID 0x1100
+#define QCA6290_EMULATION_VENDOR_ID 0x168C
+#define QCA6290_EMULATION_DEVICE_ID 0xABCD
+
+enum cnss_mhi_state {
+ CNSS_MHI_INIT,
+ CNSS_MHI_DEINIT,
+ CNSS_MHI_SUSPEND,
+ CNSS_MHI_RESUME,
+ CNSS_MHI_POWER_OFF,
+ CNSS_MHI_POWER_ON,
+ CNSS_MHI_TRIGGER_RDDM,
+ CNSS_MHI_RDDM,
+ CNSS_MHI_RDDM_KERNEL_PANIC,
+ CNSS_MHI_NOTIFY_LINK_ERROR,
+};
+
+struct cnss_msi_user {
+ char *name;
+ int num_vectors;
+ u32 base_vector;
+};
+
+struct cnss_msi_config {
+ int total_vectors;
+ int total_users;
+ struct cnss_msi_user *users;
+};
+
+struct cnss_pci_data {
+ struct pci_dev *pci_dev;
+ struct cnss_plat_data *plat_priv;
+ const struct pci_device_id *pci_device_id;
+ u32 device_id;
+ u16 revision_id;
+ bool pci_link_state;
+ bool pci_link_down_ind;
+ struct pci_saved_state *saved_state;
+ struct msm_pcie_register_event msm_pci_event;
+ atomic_t auto_suspended;
+ bool monitor_wake_intr;
+ struct dma_iommu_mapping *smmu_mapping;
+ dma_addr_t smmu_iova_start;
+ size_t smmu_iova_len;
+ void __iomem *bar;
+ struct cnss_msi_config *msi_config;
+ u32 msi_ep_base_data;
+ struct mhi_device mhi_dev;
+ unsigned long mhi_state;
+};
+
+static inline void cnss_set_pci_priv(struct pci_dev *pci_dev, void *data)
+{
+ pci_set_drvdata(pci_dev, data);
+}
+
+static inline struct cnss_pci_data *cnss_get_pci_priv(struct pci_dev *pci_dev)
+{
+ return pci_get_drvdata(pci_dev);
+}
+
+static inline struct cnss_plat_data *cnss_pci_priv_to_plat_priv(void *bus_priv)
+{
+ struct cnss_pci_data *pci_priv = bus_priv;
+
+ return pci_priv->plat_priv;
+}
+
+static inline void cnss_pci_set_monitor_wake_intr(void *bus_priv, bool val)
+{
+ struct cnss_pci_data *pci_priv = bus_priv;
+
+ pci_priv->monitor_wake_intr = val;
+}
+
+static inline bool cnss_pci_get_monitor_wake_intr(void *bus_priv)
+{
+ struct cnss_pci_data *pci_priv = bus_priv;
+
+ return pci_priv->monitor_wake_intr;
+}
+
+static inline void cnss_pci_set_auto_suspended(void *bus_priv, int val)
+{
+ struct cnss_pci_data *pci_priv = bus_priv;
+
+ atomic_set(&pci_priv->auto_suspended, val);
+}
+
+static inline int cnss_pci_get_auto_suspended(void *bus_priv)
+{
+ struct cnss_pci_data *pci_priv = bus_priv;
+
+ return atomic_read(&pci_priv->auto_suspended);
+}
+
+int cnss_suspend_pci_link(struct cnss_pci_data *pci_priv);
+int cnss_resume_pci_link(struct cnss_pci_data *pci_priv);
+int cnss_pci_init(struct cnss_plat_data *plat_priv);
+void cnss_pci_deinit(struct cnss_plat_data *plat_priv);
+int cnss_pci_alloc_fw_mem(struct cnss_pci_data *pci_priv);
+int cnss_pci_load_m3(struct cnss_pci_data *pci_priv);
+int cnss_pci_get_bar_info(struct cnss_pci_data *pci_priv, void __iomem **va,
+ phys_addr_t *pa);
+int cnss_pci_set_mhi_state(struct cnss_pci_data *pci_priv,
+ enum cnss_mhi_state state);
+int cnss_pci_start_mhi(struct cnss_pci_data *pci_priv);
+void cnss_pci_stop_mhi(struct cnss_pci_data *pci_priv);
+void cnss_pci_collect_dump_info(struct cnss_pci_data *pci_priv);
+void cnss_pci_clear_dump_info(struct cnss_pci_data *pci_priv);
+
+#endif /* _CNSS_PCI_H */
diff --git a/drivers/net/wireless/cnss2/power.c b/drivers/net/wireless/cnss2/power.c
new file mode 100644
index 000000000000..8ed1507bde11
--- /dev/null
+++ b/drivers/net/wireless/cnss2/power.c
@@ -0,0 +1,386 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/regulator/consumer.h>
+
+#include "main.h"
+#include "debug.h"
+
+static struct cnss_vreg_info cnss_vreg_info[] = {
+ {NULL, "vdd-wlan-core", 1300000, 1300000, 0, 0},
+ {NULL, "vdd-wlan-io", 1800000, 1800000, 0, 0},
+ {NULL, "vdd-wlan-xtal-aon", 0, 0, 0, 0},
+ {NULL, "vdd-wlan-xtal", 1800000, 1800000, 0, 2},
+ {NULL, "vdd-wlan", 0, 0, 0, 0},
+ {NULL, "vdd-wlan-sp2t", 2700000, 2700000, 0, 0},
+ {NULL, "wlan-ant-switch", 2700000, 2700000, 20000, 0},
+ {NULL, "wlan-soc-swreg", 1200000, 1200000, 0, 0},
+ {NULL, "vdd-wlan-en", 0, 0, 0, 10},
+};
+
+#define CNSS_VREG_INFO_SIZE ARRAY_SIZE(cnss_vreg_info)
+#define MAX_PROP_SIZE 32
+
+#define BOOTSTRAP_GPIO "qcom,enable-bootstrap-gpio"
+#define BOOTSTRAP_ACTIVE "bootstrap_active"
+#define WLAN_EN_GPIO "wlan-en-gpio"
+#define WLAN_EN_ACTIVE "wlan_en_active"
+#define WLAN_EN_SLEEP "wlan_en_sleep"
+
+#define BOOTSTRAP_DELAY 1000
+#define WLAN_ENABLE_DELAY 1000
+
+int cnss_get_vreg(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+ int i;
+ struct cnss_vreg_info *vreg_info;
+ struct device *dev;
+ struct regulator *reg;
+ const __be32 *prop;
+ char prop_name[MAX_PROP_SIZE];
+ int len;
+
+ dev = &plat_priv->plat_dev->dev;
+
+ plat_priv->vreg_info = devm_kzalloc(dev, sizeof(cnss_vreg_info),
+ GFP_KERNEL);
+ if (!plat_priv->vreg_info) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ memcpy(plat_priv->vreg_info, cnss_vreg_info, sizeof(cnss_vreg_info));
+
+ for (i = 0; i < CNSS_VREG_INFO_SIZE; i++) {
+ vreg_info = &plat_priv->vreg_info[i];
+ reg = devm_regulator_get_optional(dev, vreg_info->name);
+ if (IS_ERR(reg)) {
+ ret = PTR_ERR(reg);
+ if (ret == -ENODEV)
+ continue;
+ else if (ret == -EPROBE_DEFER)
+ cnss_pr_info("EPROBE_DEFER for regulator: %s\n",
+ vreg_info->name);
+ else
+ cnss_pr_err("Failed to get regulator %s, err = %d\n",
+ vreg_info->name, ret);
+ goto out;
+ }
+
+ vreg_info->reg = reg;
+
+ snprintf(prop_name, MAX_PROP_SIZE, "qcom,%s-info",
+ vreg_info->name);
+
+ prop = of_get_property(dev->of_node, prop_name, &len);
+ cnss_pr_dbg("Got regulator info, name: %s, len: %d\n",
+ prop_name, len);
+
+ if (!prop || len != (4 * sizeof(__be32))) {
+ cnss_pr_dbg("Property %s %s, use default\n", prop_name,
+ prop ? "invalid format" : "doesn't exist");
+ } else {
+ vreg_info->min_uv = be32_to_cpup(&prop[0]);
+ vreg_info->max_uv = be32_to_cpup(&prop[1]);
+ vreg_info->load_ua = be32_to_cpup(&prop[2]);
+ vreg_info->delay_us = be32_to_cpup(&prop[3]);
+ }
+
+ cnss_pr_dbg("Got regulator: %s, min_uv: %u, max_uv: %u, load_ua: %u, delay_us: %u\n",
+ vreg_info->name, vreg_info->min_uv,
+ vreg_info->max_uv, vreg_info->load_ua,
+ vreg_info->delay_us);
+ }
+
+ return 0;
+out:
+ return ret;
+}
+
+static int cnss_vreg_on(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+ struct cnss_vreg_info *vreg_info;
+ int i;
+
+ if (!plat_priv) {
+ cnss_pr_err("plat_priv is NULL!\n");
+ return -ENODEV;
+ }
+
+ for (i = 0; i < CNSS_VREG_INFO_SIZE; i++) {
+ vreg_info = &plat_priv->vreg_info[i];
+
+ if (!vreg_info->reg)
+ continue;
+
+ cnss_pr_dbg("Regulator %s is being enabled\n", vreg_info->name);
+
+ if (vreg_info->min_uv != 0 && vreg_info->max_uv != 0) {
+ ret = regulator_set_voltage(vreg_info->reg,
+ vreg_info->min_uv,
+ vreg_info->max_uv);
+
+ if (ret) {
+ cnss_pr_err("Failed to set voltage for regulator %s, min_uv: %u, max_uv: %u, err = %d\n",
+ vreg_info->name, vreg_info->min_uv,
+ vreg_info->max_uv, ret);
+ break;
+ }
+ }
+
+ if (vreg_info->load_ua) {
+ ret = regulator_set_load(vreg_info->reg,
+ vreg_info->load_ua);
+
+ if (ret < 0) {
+ cnss_pr_err("Failed to set load for regulator %s, load: %u, err = %d\n",
+ vreg_info->name, vreg_info->load_ua,
+ ret);
+ break;
+ }
+ }
+
+ if (vreg_info->delay_us)
+ udelay(vreg_info->delay_us);
+
+ ret = regulator_enable(vreg_info->reg);
+ if (ret) {
+ cnss_pr_err("Failed to enable regulator %s, err = %d\n",
+ vreg_info->name, ret);
+ break;
+ }
+ }
+
+ if (ret) {
+ for (; i >= 0; i--) {
+ vreg_info = &plat_priv->vreg_info[i];
+
+ if (!vreg_info->reg)
+ continue;
+
+ regulator_disable(vreg_info->reg);
+ if (vreg_info->load_ua)
+ regulator_set_load(vreg_info->reg, 0);
+ if (vreg_info->min_uv != 0 && vreg_info->max_uv != 0)
+ regulator_set_voltage(vreg_info->reg, 0,
+ vreg_info->max_uv);
+ }
+
+ return ret;
+ }
+
+ return 0;
+}
+
+static int cnss_vreg_off(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+ struct cnss_vreg_info *vreg_info;
+ int i;
+
+ if (!plat_priv) {
+ cnss_pr_err("plat_priv is NULL!\n");
+ return -ENODEV;
+ }
+
+ for (i = CNSS_VREG_INFO_SIZE - 1; i >= 0; i--) {
+ vreg_info = &plat_priv->vreg_info[i];
+
+ if (!vreg_info->reg)
+ continue;
+
+ cnss_pr_dbg("Regulator %s is being disabled\n",
+ vreg_info->name);
+
+ ret = regulator_disable(vreg_info->reg);
+ if (ret)
+ cnss_pr_err("Failed to disable regulator %s, err = %d\n",
+ vreg_info->name, ret);
+
+ if (vreg_info->load_ua) {
+ ret = regulator_set_load(vreg_info->reg, 0);
+ if (ret < 0)
+ cnss_pr_err("Failed to set load for regulator %s, err = %d\n",
+ vreg_info->name, ret);
+ }
+
+ if (vreg_info->min_uv != 0 && vreg_info->max_uv != 0) {
+ ret = regulator_set_voltage(vreg_info->reg, 0,
+ vreg_info->max_uv);
+ if (ret)
+ cnss_pr_err("Failed to set voltage for regulator %s, err = %d\n",
+ vreg_info->name, ret);
+ }
+ }
+
+ return ret;
+}
+
+int cnss_get_pinctrl(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+ struct device *dev;
+ struct cnss_pinctrl_info *pinctrl_info;
+
+ dev = &plat_priv->plat_dev->dev;
+ pinctrl_info = &plat_priv->pinctrl_info;
+
+ pinctrl_info->pinctrl = devm_pinctrl_get(dev);
+ if (IS_ERR_OR_NULL(pinctrl_info->pinctrl)) {
+ ret = PTR_ERR(pinctrl_info->pinctrl);
+ cnss_pr_err("Failed to get pinctrl, err = %d\n", ret);
+ goto out;
+ }
+
+ if (of_find_property(dev->of_node, BOOTSTRAP_GPIO, NULL)) {
+ pinctrl_info->bootstrap_active =
+ pinctrl_lookup_state(pinctrl_info->pinctrl,
+ BOOTSTRAP_ACTIVE);
+ if (IS_ERR_OR_NULL(pinctrl_info->bootstrap_active)) {
+ ret = PTR_ERR(pinctrl_info->bootstrap_active);
+ cnss_pr_err("Failed to get bootstrap active state, err = %d\n",
+ ret);
+ goto out;
+ }
+ }
+
+ if (of_find_property(dev->of_node, WLAN_EN_GPIO, NULL)) {
+ pinctrl_info->wlan_en_active =
+ pinctrl_lookup_state(pinctrl_info->pinctrl,
+ WLAN_EN_ACTIVE);
+ if (IS_ERR_OR_NULL(pinctrl_info->wlan_en_active)) {
+ ret = PTR_ERR(pinctrl_info->wlan_en_active);
+ cnss_pr_err("Failed to get wlan_en active state, err = %d\n",
+ ret);
+ goto out;
+ }
+
+ pinctrl_info->wlan_en_sleep =
+ pinctrl_lookup_state(pinctrl_info->pinctrl,
+ WLAN_EN_SLEEP);
+ if (IS_ERR_OR_NULL(pinctrl_info->wlan_en_sleep)) {
+ ret = PTR_ERR(pinctrl_info->wlan_en_sleep);
+ cnss_pr_err("Failed to get wlan_en sleep state, err = %d\n",
+ ret);
+ goto out;
+ }
+ }
+
+ return 0;
+out:
+ return ret;
+}
+
+static int cnss_select_pinctrl_state(struct cnss_plat_data *plat_priv,
+ bool state)
+{
+ int ret = 0;
+ struct cnss_pinctrl_info *pinctrl_info;
+
+ if (!plat_priv) {
+ cnss_pr_err("plat_priv is NULL!\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ pinctrl_info = &plat_priv->pinctrl_info;
+
+ if (state) {
+ if (!IS_ERR_OR_NULL(pinctrl_info->bootstrap_active)) {
+ ret = pinctrl_select_state(pinctrl_info->pinctrl,
+ pinctrl_info->
+ bootstrap_active);
+ if (ret) {
+ cnss_pr_err("Failed to select bootstrap active state, err = %d\n",
+ ret);
+ goto out;
+ }
+ udelay(BOOTSTRAP_DELAY);
+ }
+
+ if (!IS_ERR_OR_NULL(pinctrl_info->wlan_en_active)) {
+ ret = pinctrl_select_state(pinctrl_info->pinctrl,
+ pinctrl_info->
+ wlan_en_active);
+ if (ret) {
+ cnss_pr_err("Failed to select wlan_en active state, err = %d\n",
+ ret);
+ goto out;
+ }
+ udelay(WLAN_ENABLE_DELAY);
+ }
+ } else {
+ if (!IS_ERR_OR_NULL(pinctrl_info->wlan_en_sleep)) {
+ ret = pinctrl_select_state(pinctrl_info->pinctrl,
+ pinctrl_info->wlan_en_sleep);
+ if (ret) {
+ cnss_pr_err("Failed to select wlan_en sleep state, err = %d\n",
+ ret);
+ goto out;
+ }
+ }
+ }
+
+ return 0;
+out:
+ return ret;
+}
+
+int cnss_power_on_device(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+
+ ret = cnss_vreg_on(plat_priv);
+ if (ret) {
+ cnss_pr_err("Failed to turn on vreg, err = %d\n", ret);
+ goto out;
+ }
+
+ ret = cnss_select_pinctrl_state(plat_priv, true);
+ if (ret) {
+ cnss_pr_err("Failed to select pinctrl state, err = %d\n", ret);
+ goto vreg_off;
+ }
+
+ return 0;
+vreg_off:
+ cnss_vreg_off(plat_priv);
+out:
+ return ret;
+}
+
+void cnss_power_off_device(struct cnss_plat_data *plat_priv)
+{
+ cnss_select_pinctrl_state(plat_priv, false);
+ cnss_vreg_off(plat_priv);
+}
+
+void cnss_set_pin_connect_status(struct cnss_plat_data *plat_priv)
+{
+ unsigned long pin_status = 0;
+
+ set_bit(CNSS_WLAN_EN, &pin_status);
+ set_bit(CNSS_PCIE_TXN, &pin_status);
+ set_bit(CNSS_PCIE_TXP, &pin_status);
+ set_bit(CNSS_PCIE_RXN, &pin_status);
+ set_bit(CNSS_PCIE_RXP, &pin_status);
+ set_bit(CNSS_PCIE_REFCLKN, &pin_status);
+ set_bit(CNSS_PCIE_REFCLKP, &pin_status);
+ set_bit(CNSS_PCIE_RST, &pin_status);
+
+ plat_priv->pin_result.host_pin_result = pin_status;
+}
diff --git a/drivers/net/wireless/cnss2/qmi.c b/drivers/net/wireless/cnss2/qmi.c
new file mode 100644
index 000000000000..d1c0423b4517
--- /dev/null
+++ b/drivers/net/wireless/cnss2/qmi.c
@@ -0,0 +1,1037 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/qmi_encdec.h>
+#include <soc/qcom/msm_qmi_interface.h>
+
+#include "main.h"
+#include "debug.h"
+#include "qmi.h"
+
+#define WLFW_SERVICE_INS_ID_V01 1
+#define WLFW_CLIENT_ID 0x4b4e454c
+#define MAX_BDF_FILE_NAME 11
+#define DEFAULT_BDF_FILE_NAME "bdwlan.elf"
+#define BDF_FILE_NAME_PREFIX "bdwlan.e"
+
+#ifdef CONFIG_CNSS2_DEBUG
+static unsigned int qmi_timeout = 10000;
+module_param(qmi_timeout, uint, 0600);
+MODULE_PARM_DESC(qmi_timeout, "Timeout for QMI message in milliseconds");
+
+#define QMI_WLFW_TIMEOUT_MS qmi_timeout
+#else
+#define QMI_WLFW_TIMEOUT_MS 10000
+#endif
+
+static bool daemon_support;
+module_param(daemon_support, bool, 0600);
+MODULE_PARM_DESC(daemon_support, "User space has cnss-daemon support or not");
+
+static bool bdf_bypass = true;
+#ifdef CONFIG_CNSS2_DEBUG
+module_param(bdf_bypass, bool, 0600);
+MODULE_PARM_DESC(bdf_bypass, "If BDF is not found, send dummy BDF to FW");
+#endif
+
+enum cnss_bdf_type {
+ CNSS_BDF_BIN,
+ CNSS_BDF_ELF,
+};
+
+static char *cnss_qmi_mode_to_str(enum wlfw_driver_mode_enum_v01 mode)
+{
+ switch (mode) {
+ case QMI_WLFW_MISSION_V01:
+ return "MISSION";
+ case QMI_WLFW_FTM_V01:
+ return "FTM";
+ case QMI_WLFW_EPPING_V01:
+ return "EPPING";
+ case QMI_WLFW_WALTEST_V01:
+ return "WALTEST";
+ case QMI_WLFW_OFF_V01:
+ return "OFF";
+ case QMI_WLFW_CCPM_V01:
+ return "CCPM";
+ case QMI_WLFW_QVIT_V01:
+ return "QVIT";
+ case QMI_WLFW_CALIBRATION_V01:
+ return "CALIBRATION";
+ default:
+ return "UNKNOWN";
+ }
+};
+
+static void cnss_wlfw_clnt_notifier_work(struct work_struct *work)
+{
+ struct cnss_plat_data *plat_priv =
+ container_of(work, struct cnss_plat_data, qmi_recv_msg_work);
+ int ret = 0;
+
+ cnss_pr_dbg("Receiving QMI WLFW event in work queue context\n");
+
+ do {
+ ret = qmi_recv_msg(plat_priv->qmi_wlfw_clnt);
+ } while (ret == 0);
+
+ if (ret != -ENOMSG)
+ cnss_pr_err("Error receiving message: %d\n", ret);
+
+ cnss_pr_dbg("Receiving QMI event completed\n");
+}
+
+static void cnss_wlfw_clnt_notifier(struct qmi_handle *handle,
+ enum qmi_event_type event,
+ void *notify_priv)
+{
+ struct cnss_plat_data *plat_priv = notify_priv;
+
+ cnss_pr_dbg("Received QMI WLFW event: %d\n", event);
+
+ if (!plat_priv) {
+ cnss_pr_err("plat_priv is NULL!\n");
+ return;
+ }
+
+ switch (event) {
+ case QMI_RECV_MSG:
+ schedule_work(&plat_priv->qmi_recv_msg_work);
+ break;
+ case QMI_SERVER_EXIT:
+ break;
+ default:
+ cnss_pr_dbg("Unhandled QMI event: %d\n", event);
+ break;
+ }
+}
+
+static int cnss_wlfw_clnt_svc_event_notifier(struct notifier_block *nb,
+ unsigned long code, void *_cmd)
+{
+ struct cnss_plat_data *plat_priv =
+ container_of(nb, struct cnss_plat_data, qmi_wlfw_clnt_nb);
+ int ret = 0;
+
+ cnss_pr_dbg("Received QMI WLFW service event: %ld\n", code);
+
+ switch (code) {
+ case QMI_SERVER_ARRIVE:
+ ret = cnss_driver_event_post(plat_priv,
+ CNSS_DRIVER_EVENT_SERVER_ARRIVE,
+ false, NULL);
+ break;
+
+ case QMI_SERVER_EXIT:
+ ret = cnss_driver_event_post(plat_priv,
+ CNSS_DRIVER_EVENT_SERVER_EXIT,
+ false, NULL);
+ break;
+ default:
+ cnss_pr_dbg("Invalid QMI service event: %ld\n", code);
+ break;
+ }
+
+ return ret;
+}
+
+static int cnss_wlfw_host_cap_send_sync(struct cnss_plat_data *plat_priv)
+{
+ struct wlfw_host_cap_req_msg_v01 req;
+ struct wlfw_host_cap_resp_msg_v01 resp;
+ struct msg_desc req_desc, resp_desc;
+ int ret = 0;
+
+ cnss_pr_dbg("Sending host capability message, state: 0x%lx\n",
+ plat_priv->driver_state);
+
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+
+ req.daemon_support_valid = 1;
+ req.daemon_support = daemon_support;
+
+ cnss_pr_dbg("daemon_support is %d\n", req.daemon_support);
+
+ req.wake_msi = cnss_get_wake_msi(plat_priv);
+ if (req.wake_msi) {
+ cnss_pr_dbg("WAKE MSI base data is %d\n", req.wake_msi);
+ req.wake_msi_valid = 1;
+ }
+
+ req_desc.max_msg_len = WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN;
+ req_desc.msg_id = QMI_WLFW_HOST_CAP_REQ_V01;
+ req_desc.ei_array = wlfw_host_cap_req_msg_v01_ei;
+
+ resp_desc.max_msg_len = WLFW_HOST_CAP_RESP_MSG_V01_MAX_MSG_LEN;
+ resp_desc.msg_id = QMI_WLFW_HOST_CAP_RESP_V01;
+ resp_desc.ei_array = wlfw_host_cap_resp_msg_v01_ei;
+
+ ret = qmi_send_req_wait(plat_priv->qmi_wlfw_clnt, &req_desc, &req,
+ sizeof(req), &resp_desc, &resp, sizeof(resp),
+ QMI_WLFW_TIMEOUT_MS);
+ if (ret < 0) {
+ cnss_pr_err("Failed to send host capability request, err = %d\n",
+ ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ cnss_pr_err("Host capability request failed, result: %d, err: %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = resp.resp.result;
+ goto out;
+ }
+
+ return 0;
+out:
+ CNSS_ASSERT(0);
+ return ret;
+}
+
+static int cnss_wlfw_ind_register_send_sync(struct cnss_plat_data *plat_priv)
+{
+ struct wlfw_ind_register_req_msg_v01 req;
+ struct wlfw_ind_register_resp_msg_v01 resp;
+ struct msg_desc req_desc, resp_desc;
+ int ret = 0;
+
+ cnss_pr_dbg("Sending indication register message, state: 0x%lx\n",
+ plat_priv->driver_state);
+
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+
+ req.client_id_valid = 1;
+ req.client_id = WLFW_CLIENT_ID;
+ req.fw_ready_enable_valid = 1;
+ req.fw_ready_enable = 1;
+ req.request_mem_enable_valid = 1;
+ req.request_mem_enable = 1;
+ req.fw_mem_ready_enable_valid = 1;
+ req.fw_mem_ready_enable = 1;
+ req.cold_boot_cal_done_enable_valid = 1;
+ req.cold_boot_cal_done_enable = 1;
+ req.pin_connect_result_enable_valid = 1;
+ req.pin_connect_result_enable = 1;
+
+ req_desc.max_msg_len = WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN;
+ req_desc.msg_id = QMI_WLFW_IND_REGISTER_REQ_V01;
+ req_desc.ei_array = wlfw_ind_register_req_msg_v01_ei;
+
+ resp_desc.max_msg_len = WLFW_IND_REGISTER_RESP_MSG_V01_MAX_MSG_LEN;
+ resp_desc.msg_id = QMI_WLFW_IND_REGISTER_RESP_V01;
+ resp_desc.ei_array = wlfw_ind_register_resp_msg_v01_ei;
+
+ ret = qmi_send_req_wait(plat_priv->qmi_wlfw_clnt, &req_desc, &req,
+ sizeof(req), &resp_desc, &resp, sizeof(resp),
+ QMI_WLFW_TIMEOUT_MS);
+ if (ret < 0) {
+ cnss_pr_err("Failed to send indication register request, err = %d\n",
+ ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ cnss_pr_err("Indication register request failed, result: %d, err: %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = resp.resp.result;
+ goto out;
+ }
+
+ return 0;
+out:
+ CNSS_ASSERT(0);
+ return ret;
+}
+
+static int cnss_wlfw_request_mem_ind_hdlr(struct cnss_plat_data *plat_priv,
+ void *msg, unsigned int msg_len)
+{
+ struct msg_desc ind_desc;
+ struct wlfw_request_mem_ind_msg_v01 ind_msg;
+ struct cnss_fw_mem *fw_mem = &plat_priv->fw_mem;
+ int ret = 0;
+
+ ind_desc.msg_id = QMI_WLFW_REQUEST_MEM_IND_V01;
+ ind_desc.max_msg_len = WLFW_REQUEST_MEM_IND_MSG_V01_MAX_MSG_LEN;
+ ind_desc.ei_array = wlfw_request_mem_ind_msg_v01_ei;
+
+ ret = qmi_kernel_decode(&ind_desc, &ind_msg, msg, msg_len);
+ if (ret < 0) {
+ cnss_pr_err("Failed to decode request memory indication, msg_len: %u, err = %d\n",
+ ret, msg_len);
+ return ret;
+ }
+
+ fw_mem->size = ind_msg.size;
+
+ cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_REQUEST_MEM,
+ false, NULL);
+
+ return 0;
+}
+
+static int cnss_qmi_pin_result_ind_hdlr(struct cnss_plat_data *plat_priv,
+ void *msg, unsigned int msg_len)
+{
+ struct msg_desc ind_desc;
+ struct wlfw_pin_connect_result_ind_msg_v01 ind_msg;
+ int ret = 0;
+
+ ind_desc.msg_id = QMI_WLFW_PIN_CONNECT_RESULT_IND_V01;
+ ind_desc.max_msg_len = WLFW_PIN_CONNECT_RESULT_IND_MSG_V01_MAX_MSG_LEN;
+ ind_desc.ei_array = wlfw_pin_connect_result_ind_msg_v01_ei;
+
+ ret = qmi_kernel_decode(&ind_desc, &ind_msg, msg, msg_len);
+ if (ret < 0) {
+ cnss_pr_err("Failed to decode pin connect result indication, msg_len: %u, err = %d\n",
+ msg_len, ret);
+ return ret;
+ }
+ if (ind_msg.pwr_pin_result_valid)
+ plat_priv->pin_result.fw_pwr_pin_result =
+ ind_msg.pwr_pin_result;
+ if (ind_msg.phy_io_pin_result_valid)
+ plat_priv->pin_result.fw_phy_io_pin_result =
+ ind_msg.phy_io_pin_result;
+ if (ind_msg.rf_pin_result_valid)
+ plat_priv->pin_result.fw_rf_pin_result = ind_msg.rf_pin_result;
+
+ cnss_pr_dbg("Pin connect Result: pwr_pin: 0x%x phy_io_pin: 0x%x rf_io_pin: 0x%x\n",
+ ind_msg.pwr_pin_result, ind_msg.phy_io_pin_result,
+ ind_msg.rf_pin_result);
+ return ret;
+}
+
+int cnss_wlfw_respond_mem_send_sync(struct cnss_plat_data *plat_priv)
+{
+ struct wlfw_respond_mem_req_msg_v01 req;
+ struct wlfw_respond_mem_resp_msg_v01 resp;
+ struct msg_desc req_desc, resp_desc;
+ struct cnss_fw_mem *fw_mem = &plat_priv->fw_mem;
+ int ret = 0;
+
+ cnss_pr_dbg("Sending respond memory message, state: 0x%lx\n",
+ plat_priv->driver_state);
+
+ if (!fw_mem->pa || !fw_mem->size) {
+ cnss_pr_err("Memory for FW is not available!\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ cnss_pr_dbg("Memory for FW, va: 0x%pK, pa: %pa, size: 0x%zx\n",
+ fw_mem->va, &fw_mem->pa, fw_mem->size);
+
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+
+ req.addr = fw_mem->pa;
+ req.size = fw_mem->size;
+
+ req_desc.max_msg_len = WLFW_RESPOND_MEM_REQ_MSG_V01_MAX_MSG_LEN;
+ req_desc.msg_id = QMI_WLFW_RESPOND_MEM_REQ_V01;
+ req_desc.ei_array = wlfw_respond_mem_req_msg_v01_ei;
+
+ resp_desc.max_msg_len = WLFW_RESPOND_MEM_RESP_MSG_V01_MAX_MSG_LEN;
+ resp_desc.msg_id = QMI_WLFW_RESPOND_MEM_RESP_V01;
+ resp_desc.ei_array = wlfw_respond_mem_resp_msg_v01_ei;
+
+ ret = qmi_send_req_wait(plat_priv->qmi_wlfw_clnt, &req_desc, &req,
+ sizeof(req), &resp_desc, &resp, sizeof(resp),
+ QMI_WLFW_TIMEOUT_MS);
+ if (ret < 0) {
+ cnss_pr_err("Failed to send respond memory request, err = %d\n",
+ ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ cnss_pr_err("Respond memory request failed, result: %d, err: %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = resp.resp.result;
+ goto out;
+ }
+
+ return 0;
+out:
+ CNSS_ASSERT(0);
+ return ret;
+}
+
+int cnss_wlfw_tgt_cap_send_sync(struct cnss_plat_data *plat_priv)
+{
+ struct wlfw_cap_req_msg_v01 req;
+ struct wlfw_cap_resp_msg_v01 resp;
+ struct msg_desc req_desc, resp_desc;
+ int ret = 0;
+
+ cnss_pr_dbg("Sending target capability message, state: 0x%lx\n",
+ plat_priv->driver_state);
+
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+
+ req_desc.max_msg_len = WLFW_CAP_REQ_MSG_V01_MAX_MSG_LEN;
+ req_desc.msg_id = QMI_WLFW_CAP_REQ_V01;
+ req_desc.ei_array = wlfw_cap_req_msg_v01_ei;
+
+ resp_desc.max_msg_len = WLFW_CAP_RESP_MSG_V01_MAX_MSG_LEN;
+ resp_desc.msg_id = QMI_WLFW_CAP_RESP_V01;
+ resp_desc.ei_array = wlfw_cap_resp_msg_v01_ei;
+
+ ret = qmi_send_req_wait(plat_priv->qmi_wlfw_clnt, &req_desc, &req,
+ sizeof(req), &resp_desc, &resp, sizeof(resp),
+ QMI_WLFW_TIMEOUT_MS);
+ if (ret < 0) {
+ cnss_pr_err("Failed to send target capability request, err = %d\n",
+ ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ cnss_pr_err("Target capability request failed, result: %d, err: %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = resp.resp.result;
+ goto out;
+ }
+
+ if (resp.chip_info_valid)
+ plat_priv->chip_info = resp.chip_info;
+ if (resp.board_info_valid)
+ plat_priv->board_info = resp.board_info;
+ else
+ plat_priv->board_info.board_id = 0xFF;
+ if (resp.soc_info_valid)
+ plat_priv->soc_info = resp.soc_info;
+ if (resp.fw_version_info_valid)
+ plat_priv->fw_version_info = resp.fw_version_info;
+
+ cnss_pr_dbg("Target capability: chip_id: 0x%x, chip_family: 0x%x, board_id: 0x%x, soc_id: 0x%x, fw_version: 0x%x, fw_build_timestamp: %s",
+ plat_priv->chip_info.chip_id,
+ plat_priv->chip_info.chip_family,
+ plat_priv->board_info.board_id, plat_priv->soc_info.soc_id,
+ plat_priv->fw_version_info.fw_version,
+ plat_priv->fw_version_info.fw_build_timestamp);
+
+ return 0;
+out:
+ CNSS_ASSERT(0);
+ return ret;
+}
+
+int cnss_wlfw_bdf_dnld_send_sync(struct cnss_plat_data *plat_priv)
+{
+ struct wlfw_bdf_download_req_msg_v01 *req;
+ struct wlfw_bdf_download_resp_msg_v01 resp;
+ struct msg_desc req_desc, resp_desc;
+ char filename[MAX_BDF_FILE_NAME];
+ const struct firmware *fw_entry;
+ const u8 *temp;
+ unsigned int remaining;
+ int ret = 0;
+
+ cnss_pr_dbg("Sending BDF download message, state: 0x%lx\n",
+ plat_priv->driver_state);
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (plat_priv->board_info.board_id == 0xFF)
+ snprintf(filename, sizeof(filename), DEFAULT_BDF_FILE_NAME);
+ else
+ snprintf(filename, sizeof(filename),
+ BDF_FILE_NAME_PREFIX "%02x",
+ plat_priv->board_info.board_id);
+
+ ret = request_firmware(&fw_entry, filename, &plat_priv->plat_dev->dev);
+ if (ret) {
+ cnss_pr_err("Failed to load BDF: %s\n", filename);
+ if (bdf_bypass) {
+ cnss_pr_info("bdf_bypass is enabled, sending dummy BDF\n");
+ temp = filename;
+ remaining = MAX_BDF_FILE_NAME;
+ goto bypass_bdf;
+ } else {
+ goto err_req_fw;
+ }
+ }
+
+ temp = fw_entry->data;
+ remaining = fw_entry->size;
+
+bypass_bdf:
+ cnss_pr_dbg("Downloading BDF: %s, size: %u\n", filename, remaining);
+
+ memset(&resp, 0, sizeof(resp));
+
+ req_desc.max_msg_len = WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN;
+ req_desc.msg_id = QMI_WLFW_BDF_DOWNLOAD_REQ_V01;
+ req_desc.ei_array = wlfw_bdf_download_req_msg_v01_ei;
+
+ resp_desc.max_msg_len = WLFW_BDF_DOWNLOAD_RESP_MSG_V01_MAX_MSG_LEN;
+ resp_desc.msg_id = QMI_WLFW_BDF_DOWNLOAD_RESP_V01;
+ resp_desc.ei_array = wlfw_bdf_download_resp_msg_v01_ei;
+
+ while (remaining) {
+ req->valid = 1;
+ req->file_id_valid = 1;
+ req->file_id = plat_priv->board_info.board_id;
+ req->total_size_valid = 1;
+ req->total_size = remaining;
+ req->seg_id_valid = 1;
+ req->data_valid = 1;
+ req->end_valid = 1;
+ req->bdf_type_valid = 1;
+ req->bdf_type = CNSS_BDF_ELF;
+
+ if (remaining > QMI_WLFW_MAX_DATA_SIZE_V01) {
+ req->data_len = QMI_WLFW_MAX_DATA_SIZE_V01;
+ } else {
+ req->data_len = remaining;
+ req->end = 1;
+ }
+
+ memcpy(req->data, temp, req->data_len);
+
+ ret = qmi_send_req_wait(plat_priv->qmi_wlfw_clnt, &req_desc,
+ req, sizeof(*req), &resp_desc, &resp,
+ sizeof(resp), QMI_WLFW_TIMEOUT_MS);
+ if (ret < 0) {
+ cnss_pr_err("Failed to send BDF download request, err = %d\n",
+ ret);
+ goto err_send;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ cnss_pr_err("BDF download request failed, result: %d, err: %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = resp.resp.result;
+ goto err_send;
+ }
+
+ remaining -= req->data_len;
+ temp += req->data_len;
+ req->seg_id++;
+ }
+
+err_send:
+ if (!bdf_bypass)
+ release_firmware(fw_entry);
+err_req_fw:
+ kfree(req);
+out:
+ if (ret)
+ CNSS_ASSERT(0);
+ return ret;
+}
+
+int cnss_wlfw_m3_dnld_send_sync(struct cnss_plat_data *plat_priv)
+{
+ struct wlfw_m3_info_req_msg_v01 req;
+ struct wlfw_m3_info_resp_msg_v01 resp;
+ struct msg_desc req_desc, resp_desc;
+ struct cnss_fw_mem *m3_mem = &plat_priv->m3_mem;
+ int ret = 0;
+
+ cnss_pr_dbg("Sending M3 information message, state: 0x%lx\n",
+ plat_priv->driver_state);
+
+ if (!m3_mem->pa || !m3_mem->size) {
+ cnss_pr_err("Memory for M3 is not available!\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ cnss_pr_dbg("M3 memory, va: 0x%pK, pa: %pa, size: 0x%zx\n",
+ m3_mem->va, &m3_mem->pa, m3_mem->size);
+
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+
+ req.addr = plat_priv->m3_mem.pa;
+ req.size = plat_priv->m3_mem.size;
+
+ req_desc.max_msg_len = WLFW_M3_INFO_REQ_MSG_V01_MAX_MSG_LEN;
+ req_desc.msg_id = QMI_WLFW_M3_INFO_REQ_V01;
+ req_desc.ei_array = wlfw_m3_info_req_msg_v01_ei;
+
+ resp_desc.max_msg_len = WLFW_M3_INFO_RESP_MSG_V01_MAX_MSG_LEN;
+ resp_desc.msg_id = QMI_WLFW_M3_INFO_RESP_V01;
+ resp_desc.ei_array = wlfw_m3_info_resp_msg_v01_ei;
+
+ ret = qmi_send_req_wait(plat_priv->qmi_wlfw_clnt, &req_desc, &req,
+ sizeof(req), &resp_desc, &resp, sizeof(resp),
+ QMI_WLFW_TIMEOUT_MS);
+ if (ret < 0) {
+ cnss_pr_err("Failed to send M3 information request, err = %d\n",
+ ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ cnss_pr_err("M3 information request failed, result: %d, err: %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = resp.resp.result;
+ goto out;
+ }
+
+ return 0;
+
+out:
+ CNSS_ASSERT(0);
+ return ret;
+}
+
+int cnss_wlfw_wlan_mode_send_sync(struct cnss_plat_data *plat_priv,
+ enum wlfw_driver_mode_enum_v01 mode)
+{
+ struct wlfw_wlan_mode_req_msg_v01 req;
+ struct wlfw_wlan_mode_resp_msg_v01 resp;
+ struct msg_desc req_desc, resp_desc;
+ int ret = 0;
+
+ if (!plat_priv)
+ return -ENODEV;
+
+ cnss_pr_dbg("Sending mode message, mode: %s(%d), state: 0x%lx\n",
+ cnss_qmi_mode_to_str(mode), mode, plat_priv->driver_state);
+
+ if (mode == QMI_WLFW_OFF_V01 &&
+ test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
+ cnss_pr_dbg("Recovery is in progress, ignore mode off request.\n");
+ return 0;
+ }
+
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+
+ req.mode = mode;
+ req.hw_debug_valid = 1;
+ req.hw_debug = 0;
+
+ req_desc.max_msg_len = WLFW_WLAN_MODE_REQ_MSG_V01_MAX_MSG_LEN;
+ req_desc.msg_id = QMI_WLFW_WLAN_MODE_REQ_V01;
+ req_desc.ei_array = wlfw_wlan_mode_req_msg_v01_ei;
+
+ resp_desc.max_msg_len = WLFW_WLAN_MODE_RESP_MSG_V01_MAX_MSG_LEN;
+ resp_desc.msg_id = QMI_WLFW_WLAN_MODE_RESP_V01;
+ resp_desc.ei_array = wlfw_wlan_mode_resp_msg_v01_ei;
+
+ ret = qmi_send_req_wait(plat_priv->qmi_wlfw_clnt, &req_desc, &req,
+ sizeof(req), &resp_desc, &resp, sizeof(resp),
+ QMI_WLFW_TIMEOUT_MS);
+ if (ret < 0) {
+ if (mode == QMI_WLFW_OFF_V01 && ret == -ENETRESET) {
+ cnss_pr_dbg("WLFW service is disconnected while sending mode off request.\n");
+ return 0;
+ }
+ cnss_pr_err("Failed to send mode request, mode: %s(%d), err: %d\n",
+ cnss_qmi_mode_to_str(mode), mode, ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ cnss_pr_err("Mode request failed, mode: %s(%d), result: %d, err: %d\n",
+ cnss_qmi_mode_to_str(mode), mode, resp.resp.result,
+ resp.resp.error);
+ ret = resp.resp.result;
+ goto out;
+ }
+
+ return 0;
+out:
+ if (mode != QMI_WLFW_OFF_V01)
+ CNSS_ASSERT(0);
+ return ret;
+}
+
+int cnss_wlfw_wlan_cfg_send_sync(struct cnss_plat_data *plat_priv,
+ struct wlfw_wlan_cfg_req_msg_v01 *data)
+{
+ struct wlfw_wlan_cfg_req_msg_v01 req;
+ struct wlfw_wlan_cfg_resp_msg_v01 resp;
+ struct msg_desc req_desc, resp_desc;
+ int ret = 0;
+
+ cnss_pr_dbg("Sending WLAN config message, state: 0x%lx\n",
+ plat_priv->driver_state);
+
+ if (!plat_priv)
+ return -ENODEV;
+
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+
+ memcpy(&req, data, sizeof(req));
+
+ req_desc.max_msg_len = WLFW_WLAN_CFG_REQ_MSG_V01_MAX_MSG_LEN;
+ req_desc.msg_id = QMI_WLFW_WLAN_CFG_REQ_V01;
+ req_desc.ei_array = wlfw_wlan_cfg_req_msg_v01_ei;
+
+ resp_desc.max_msg_len = WLFW_WLAN_CFG_RESP_MSG_V01_MAX_MSG_LEN;
+ resp_desc.msg_id = QMI_WLFW_WLAN_CFG_RESP_V01;
+ resp_desc.ei_array = wlfw_wlan_cfg_resp_msg_v01_ei;
+
+ ret = qmi_send_req_wait(plat_priv->qmi_wlfw_clnt, &req_desc, &req,
+ sizeof(req), &resp_desc, &resp, sizeof(resp),
+ QMI_WLFW_TIMEOUT_MS);
+ if (ret < 0) {
+ cnss_pr_err("Failed to send WLAN config request, err = %d\n",
+ ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ cnss_pr_err("WLAN config request failed, result: %d, err: %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = resp.resp.result;
+ goto out;
+ }
+
+ return 0;
+out:
+ CNSS_ASSERT(0);
+ return ret;
+}
+
+int cnss_wlfw_athdiag_read_send_sync(struct cnss_plat_data *plat_priv,
+ u32 offset, u32 mem_type,
+ u32 data_len, u8 *data)
+{
+ struct wlfw_athdiag_read_req_msg_v01 req;
+ struct wlfw_athdiag_read_resp_msg_v01 *resp;
+ struct msg_desc req_desc, resp_desc;
+ int ret = 0;
+
+ if (!plat_priv)
+ return -ENODEV;
+
+ if (!plat_priv->qmi_wlfw_clnt)
+ return -EINVAL;
+
+ cnss_pr_dbg("athdiag read: state 0x%lx, offset %x, mem_type %x, data_len %u\n",
+ plat_priv->driver_state, offset, mem_type, data_len);
+
+ resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+ if (!resp)
+ return -ENOMEM;
+
+ memset(&req, 0, sizeof(req));
+
+ req.offset = offset;
+ req.mem_type = mem_type;
+ req.data_len = data_len;
+
+ req_desc.max_msg_len = WLFW_ATHDIAG_READ_REQ_MSG_V01_MAX_MSG_LEN;
+ req_desc.msg_id = QMI_WLFW_ATHDIAG_READ_REQ_V01;
+ req_desc.ei_array = wlfw_athdiag_read_req_msg_v01_ei;
+
+ resp_desc.max_msg_len = WLFW_ATHDIAG_READ_RESP_MSG_V01_MAX_MSG_LEN;
+ resp_desc.msg_id = QMI_WLFW_ATHDIAG_READ_RESP_V01;
+ resp_desc.ei_array = wlfw_athdiag_read_resp_msg_v01_ei;
+
+ ret = qmi_send_req_wait(plat_priv->qmi_wlfw_clnt, &req_desc, &req,
+ sizeof(req), &resp_desc, resp, sizeof(*resp),
+ QMI_WLFW_TIMEOUT_MS);
+ if (ret < 0) {
+ cnss_pr_err("Failed to send athdiag read request, err = %d\n",
+ ret);
+ goto out;
+ }
+
+ if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+ cnss_pr_err("athdiag read request failed, result: %d, err: %d\n",
+ resp->resp.result, resp->resp.error);
+ ret = resp->resp.result;
+ goto out;
+ }
+
+ if (!resp->data_valid || resp->data_len != data_len) {
+ cnss_pr_err("athdiag read data is invalid, data_valid = %u, data_len = %u\n",
+ resp->data_valid, resp->data_len);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ memcpy(data, resp->data, resp->data_len);
+
+out:
+ kfree(resp);
+ return ret;
+}
+
+int cnss_wlfw_athdiag_write_send_sync(struct cnss_plat_data *plat_priv,
+ u32 offset, u32 mem_type,
+ u32 data_len, u8 *data)
+{
+ struct wlfw_athdiag_write_req_msg_v01 *req;
+ struct wlfw_athdiag_write_resp_msg_v01 resp;
+ struct msg_desc req_desc, resp_desc;
+ int ret = 0;
+
+ if (!plat_priv)
+ return -ENODEV;
+
+ if (!plat_priv->qmi_wlfw_clnt)
+ return -EINVAL;
+
+ cnss_pr_dbg("athdiag write: state 0x%lx, offset %x, mem_type %x, data_len %u, data %p\n",
+ plat_priv->driver_state, offset, mem_type, data_len, data);
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ memset(&resp, 0, sizeof(resp));
+
+ req->offset = offset;
+ req->mem_type = mem_type;
+ req->data_len = data_len;
+ memcpy(req->data, data, data_len);
+
+ req_desc.max_msg_len = WLFW_ATHDIAG_WRITE_REQ_MSG_V01_MAX_MSG_LEN;
+ req_desc.msg_id = QMI_WLFW_ATHDIAG_WRITE_REQ_V01;
+ req_desc.ei_array = wlfw_athdiag_write_req_msg_v01_ei;
+
+ resp_desc.max_msg_len = WLFW_ATHDIAG_WRITE_RESP_MSG_V01_MAX_MSG_LEN;
+ resp_desc.msg_id = QMI_WLFW_ATHDIAG_WRITE_RESP_V01;
+ resp_desc.ei_array = wlfw_athdiag_write_resp_msg_v01_ei;
+
+ ret = qmi_send_req_wait(plat_priv->qmi_wlfw_clnt, &req_desc, req,
+ sizeof(*req), &resp_desc, &resp, sizeof(resp),
+ QMI_WLFW_TIMEOUT_MS);
+ if (ret < 0) {
+ cnss_pr_err("Failed to send athdiag write request, err = %d\n",
+ ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ cnss_pr_err("athdiag write request failed, result: %d, err: %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = resp.resp.result;
+ goto out;
+ }
+
+out:
+ kfree(req);
+ return ret;
+}
+
+int cnss_wlfw_ini_send_sync(struct cnss_plat_data *plat_priv,
+ u8 fw_log_mode)
+{
+ int ret;
+ struct wlfw_ini_req_msg_v01 req;
+ struct wlfw_ini_resp_msg_v01 resp;
+ struct msg_desc req_desc, resp_desc;
+
+ if (!plat_priv)
+ return -ENODEV;
+
+ cnss_pr_dbg("Sending ini sync request, state: 0x%lx, fw_log_mode: %d\n",
+ plat_priv->driver_state, fw_log_mode);
+
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+
+ req.enablefwlog_valid = 1;
+ req.enablefwlog = fw_log_mode;
+
+ req_desc.max_msg_len = WLFW_INI_REQ_MSG_V01_MAX_MSG_LEN;
+ req_desc.msg_id = QMI_WLFW_INI_REQ_V01;
+ req_desc.ei_array = wlfw_ini_req_msg_v01_ei;
+
+ resp_desc.max_msg_len = WLFW_INI_RESP_MSG_V01_MAX_MSG_LEN;
+ resp_desc.msg_id = QMI_WLFW_INI_RESP_V01;
+ resp_desc.ei_array = wlfw_ini_resp_msg_v01_ei;
+
+ ret = qmi_send_req_wait(plat_priv->qmi_wlfw_clnt,
+ &req_desc, &req, sizeof(req),
+ &resp_desc, &resp, sizeof(resp),
+ QMI_WLFW_TIMEOUT_MS);
+ if (ret < 0) {
+ cnss_pr_err("Send INI req failed fw_log_mode: %d, ret: %d\n",
+ fw_log_mode, ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ cnss_pr_err("QMI INI request rejected, fw_log_mode:%d result:%d error:%d\n",
+ fw_log_mode, resp.resp.result, resp.resp.error);
+ ret = resp.resp.result;
+ goto out;
+ }
+
+ return 0;
+
+out:
+ return ret;
+}
+
+static void cnss_wlfw_clnt_ind(struct qmi_handle *handle,
+ unsigned int msg_id, void *msg,
+ unsigned int msg_len, void *ind_cb_priv)
+{
+ struct cnss_plat_data *plat_priv = ind_cb_priv;
+
+ cnss_pr_dbg("Received QMI WLFW indication, msg_id: 0x%x, msg_len: %d\n",
+ msg_id, msg_len);
+
+ if (!plat_priv) {
+ cnss_pr_err("plat_priv is NULL!\n");
+ return;
+ }
+
+ switch (msg_id) {
+ case QMI_WLFW_REQUEST_MEM_IND_V01:
+ cnss_wlfw_request_mem_ind_hdlr(plat_priv, msg, msg_len);
+ break;
+ case QMI_WLFW_FW_MEM_READY_IND_V01:
+ cnss_driver_event_post(plat_priv,
+ CNSS_DRIVER_EVENT_FW_MEM_READY,
+ false, NULL);
+ break;
+ case QMI_WLFW_COLD_BOOT_CAL_DONE_IND_V01:
+ cnss_driver_event_post(plat_priv,
+ CNSS_DRIVER_EVENT_COLD_BOOT_CAL_DONE,
+ false, NULL);
+ break;
+ case QMI_WLFW_FW_READY_IND_V01:
+ cnss_driver_event_post(plat_priv,
+ CNSS_DRIVER_EVENT_FW_READY,
+ false, NULL);
+ break;
+ case QMI_WLFW_PIN_CONNECT_RESULT_IND_V01:
+ cnss_qmi_pin_result_ind_hdlr(plat_priv, msg, msg_len);
+ break;
+ default:
+ cnss_pr_err("Invalid QMI WLFW indication, msg_id: 0x%x\n",
+ msg_id);
+ break;
+ }
+}
+
+unsigned int cnss_get_qmi_timeout(void)
+{
+ cnss_pr_dbg("QMI timeout is %u ms\n", QMI_WLFW_TIMEOUT_MS);
+
+ return QMI_WLFW_TIMEOUT_MS;
+}
+EXPORT_SYMBOL(cnss_get_qmi_timeout);
+
+int cnss_wlfw_server_arrive(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+
+ if (!plat_priv)
+ return -ENODEV;
+
+ plat_priv->qmi_wlfw_clnt =
+ qmi_handle_create(cnss_wlfw_clnt_notifier, plat_priv);
+ if (!plat_priv->qmi_wlfw_clnt) {
+ cnss_pr_err("Failed to create QMI client handle!\n");
+ ret = -ENOMEM;
+ goto err_create_handle;
+ }
+
+ ret = qmi_connect_to_service(plat_priv->qmi_wlfw_clnt,
+ WLFW_SERVICE_ID_V01,
+ WLFW_SERVICE_VERS_V01,
+ WLFW_SERVICE_INS_ID_V01);
+ if (ret < 0) {
+ cnss_pr_err("Failed to connect to QMI WLFW service, err = %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_register_ind_cb(plat_priv->qmi_wlfw_clnt,
+ cnss_wlfw_clnt_ind, plat_priv);
+ if (ret < 0) {
+ cnss_pr_err("Failed to register QMI WLFW service indication callback, err = %d\n",
+ ret);
+ goto out;
+ }
+
+ set_bit(CNSS_QMI_WLFW_CONNECTED, &plat_priv->driver_state);
+
+ cnss_pr_info("QMI WLFW service connected, state: 0x%lx\n",
+ plat_priv->driver_state);
+
+ ret = cnss_wlfw_host_cap_send_sync(plat_priv);
+ if (ret < 0)
+ goto out;
+
+ ret = cnss_wlfw_ind_register_send_sync(plat_priv);
+ if (ret < 0)
+ goto out;
+
+ return 0;
+out:
+ qmi_handle_destroy(plat_priv->qmi_wlfw_clnt);
+ plat_priv->qmi_wlfw_clnt = NULL;
+err_create_handle:
+ CNSS_ASSERT(0);
+ return ret;
+}
+
+int cnss_wlfw_server_exit(struct cnss_plat_data *plat_priv)
+{
+ if (!plat_priv)
+ return -ENODEV;
+
+ qmi_handle_destroy(plat_priv->qmi_wlfw_clnt);
+ plat_priv->qmi_wlfw_clnt = NULL;
+
+ clear_bit(CNSS_QMI_WLFW_CONNECTED, &plat_priv->driver_state);
+
+ cnss_pr_info("QMI WLFW service disconnected, state: 0x%lx\n",
+ plat_priv->driver_state);
+
+ return 0;
+}
+
+int cnss_qmi_init(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+
+ INIT_WORK(&plat_priv->qmi_recv_msg_work,
+ cnss_wlfw_clnt_notifier_work);
+
+ plat_priv->qmi_wlfw_clnt_nb.notifier_call =
+ cnss_wlfw_clnt_svc_event_notifier;
+
+ ret = qmi_svc_event_notifier_register(WLFW_SERVICE_ID_V01,
+ WLFW_SERVICE_VERS_V01,
+ WLFW_SERVICE_INS_ID_V01,
+ &plat_priv->qmi_wlfw_clnt_nb);
+ if (ret < 0)
+ cnss_pr_err("Failed to register QMI event notifier, err = %d\n",
+ ret);
+
+ return ret;
+}
+
+void cnss_qmi_deinit(struct cnss_plat_data *plat_priv)
+{
+ qmi_svc_event_notifier_unregister(WLFW_SERVICE_ID_V01,
+ WLFW_SERVICE_VERS_V01,
+ WLFW_SERVICE_INS_ID_V01,
+ &plat_priv->qmi_wlfw_clnt_nb);
+}
diff --git a/drivers/net/wireless/cnss2/qmi.h b/drivers/net/wireless/cnss2/qmi.h
new file mode 100644
index 000000000000..70d8d404c48e
--- /dev/null
+++ b/drivers/net/wireless/cnss2/qmi.h
@@ -0,0 +1,41 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CNSS_QMI_H
+#define _CNSS_QMI_H
+
+#include "wlan_firmware_service_v01.h"
+
+struct cnss_plat_data;
+
+int cnss_qmi_init(struct cnss_plat_data *plat_priv);
+void cnss_qmi_deinit(struct cnss_plat_data *plat_priv);
+int cnss_wlfw_server_arrive(struct cnss_plat_data *plat_priv);
+int cnss_wlfw_server_exit(struct cnss_plat_data *plat_priv);
+int cnss_wlfw_respond_mem_send_sync(struct cnss_plat_data *plat_priv);
+int cnss_wlfw_tgt_cap_send_sync(struct cnss_plat_data *plat_priv);
+int cnss_wlfw_bdf_dnld_send_sync(struct cnss_plat_data *plat_priv);
+int cnss_wlfw_m3_dnld_send_sync(struct cnss_plat_data *plat_priv);
+int cnss_wlfw_wlan_mode_send_sync(struct cnss_plat_data *plat_priv,
+ enum wlfw_driver_mode_enum_v01 mode);
+int cnss_wlfw_wlan_cfg_send_sync(struct cnss_plat_data *plat_priv,
+ struct wlfw_wlan_cfg_req_msg_v01 *data);
+int cnss_wlfw_athdiag_read_send_sync(struct cnss_plat_data *plat_priv,
+ u32 offset, u32 mem_type,
+ u32 data_len, u8 *data);
+int cnss_wlfw_athdiag_write_send_sync(struct cnss_plat_data *plat_priv,
+ u32 offset, u32 mem_type,
+ u32 data_len, u8 *data);
+int cnss_wlfw_ini_send_sync(struct cnss_plat_data *plat_priv,
+ u8 fw_log_mode);
+
+#endif /* _CNSS_QMI_H */
diff --git a/drivers/net/wireless/cnss2/utils.c b/drivers/net/wireless/cnss2/utils.c
new file mode 100644
index 000000000000..9ffe386e3677
--- /dev/null
+++ b/drivers/net/wireless/cnss2/utils.c
@@ -0,0 +1,129 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define CNSS_MAX_CH_NUM 45
+
+#include <linux/module.h>
+#include <linux/slab.h>
+
+static DEFINE_MUTEX(unsafe_channel_list_lock);
+static DEFINE_MUTEX(dfs_nol_info_lock);
+
+static struct cnss_unsafe_channel_list {
+ u16 unsafe_ch_count;
+ u16 unsafe_ch_list[CNSS_MAX_CH_NUM];
+} unsafe_channel_list;
+
+static struct cnss_dfs_nol_info {
+ void *dfs_nol_info;
+ u16 dfs_nol_info_len;
+} dfs_nol_info;
+
+int cnss_set_wlan_unsafe_channel(u16 *unsafe_ch_list, u16 ch_count)
+{
+ mutex_lock(&unsafe_channel_list_lock);
+ if ((!unsafe_ch_list) || (ch_count > CNSS_MAX_CH_NUM)) {
+ mutex_unlock(&unsafe_channel_list_lock);
+ return -EINVAL;
+ }
+
+ unsafe_channel_list.unsafe_ch_count = ch_count;
+
+ if (ch_count != 0) {
+ memcpy((char *)unsafe_channel_list.unsafe_ch_list,
+ (char *)unsafe_ch_list, ch_count * sizeof(u16));
+ }
+ mutex_unlock(&unsafe_channel_list_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(cnss_set_wlan_unsafe_channel);
+
+int cnss_get_wlan_unsafe_channel(u16 *unsafe_ch_list,
+ u16 *ch_count, u16 buf_len)
+{
+ mutex_lock(&unsafe_channel_list_lock);
+ if (!unsafe_ch_list || !ch_count) {
+ mutex_unlock(&unsafe_channel_list_lock);
+ return -EINVAL;
+ }
+
+ if (buf_len < (unsafe_channel_list.unsafe_ch_count * sizeof(u16))) {
+ mutex_unlock(&unsafe_channel_list_lock);
+ return -ENOMEM;
+ }
+
+ *ch_count = unsafe_channel_list.unsafe_ch_count;
+ memcpy((char *)unsafe_ch_list,
+ (char *)unsafe_channel_list.unsafe_ch_list,
+ unsafe_channel_list.unsafe_ch_count * sizeof(u16));
+ mutex_unlock(&unsafe_channel_list_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(cnss_get_wlan_unsafe_channel);
+
+int cnss_wlan_set_dfs_nol(const void *info, u16 info_len)
+{
+ void *temp;
+ struct cnss_dfs_nol_info *dfs_info;
+
+ mutex_lock(&dfs_nol_info_lock);
+ if (!info || !info_len) {
+ mutex_unlock(&dfs_nol_info_lock);
+ return -EINVAL;
+ }
+
+ temp = kmalloc(info_len, GFP_KERNEL);
+ if (!temp) {
+ mutex_unlock(&dfs_nol_info_lock);
+ return -ENOMEM;
+ }
+
+ memcpy(temp, info, info_len);
+ dfs_info = &dfs_nol_info;
+ kfree(dfs_info->dfs_nol_info);
+
+ dfs_info->dfs_nol_info = temp;
+ dfs_info->dfs_nol_info_len = info_len;
+ mutex_unlock(&dfs_nol_info_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(cnss_wlan_set_dfs_nol);
+
+int cnss_wlan_get_dfs_nol(void *info, u16 info_len)
+{
+ int len;
+ struct cnss_dfs_nol_info *dfs_info;
+
+ mutex_lock(&dfs_nol_info_lock);
+ if (!info || !info_len) {
+ mutex_unlock(&dfs_nol_info_lock);
+ return -EINVAL;
+ }
+
+ dfs_info = &dfs_nol_info;
+
+ if (!dfs_info->dfs_nol_info || dfs_info->dfs_nol_info_len == 0) {
+ mutex_unlock(&dfs_nol_info_lock);
+ return -ENOENT;
+ }
+
+ len = min(info_len, dfs_info->dfs_nol_info_len);
+
+ memcpy(info, dfs_info->dfs_nol_info, len);
+ mutex_unlock(&dfs_nol_info_lock);
+
+ return len;
+}
+EXPORT_SYMBOL(cnss_wlan_get_dfs_nol);
diff --git a/drivers/net/wireless/cnss2/wlan_firmware_service_v01.c b/drivers/net/wireless/cnss2/wlan_firmware_service_v01.c
new file mode 100644
index 000000000000..7d6a771bc0d5
--- /dev/null
+++ b/drivers/net/wireless/cnss2/wlan_firmware_service_v01.c
@@ -0,0 +1,2221 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "wlan_firmware_service_v01.h"
+
+static struct elem_info wlfw_ce_tgt_pipe_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01,
+ pipe_num),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_pipedir_enum_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01,
+ pipe_dir),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01,
+ nentries),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01,
+ nbytes_max),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01,
+ flags),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct elem_info wlfw_ce_svc_pipe_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_ce_svc_pipe_cfg_s_v01,
+ service_id),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_pipedir_enum_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_ce_svc_pipe_cfg_s_v01,
+ pipe_dir),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_ce_svc_pipe_cfg_s_v01,
+ pipe_num),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct elem_info wlfw_shadow_reg_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_shadow_reg_cfg_s_v01,
+ id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_shadow_reg_cfg_s_v01,
+ offset),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct elem_info wlfw_shadow_reg_v2_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_shadow_reg_v2_cfg_s_v01,
+ addr),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct elem_info wlfw_memory_region_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_memory_region_info_s_v01,
+ region_addr),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_memory_region_info_s_v01,
+ size),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_memory_region_info_s_v01,
+ secure_flag),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct elem_info wlfw_rf_chip_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_rf_chip_info_s_v01,
+ chip_id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_rf_chip_info_s_v01,
+ chip_family),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct elem_info wlfw_rf_board_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_rf_board_info_s_v01,
+ board_id),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct elem_info wlfw_soc_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_soc_info_s_v01,
+ soc_id),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct elem_info wlfw_fw_version_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_fw_version_info_s_v01,
+ fw_version),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = QMI_WLFW_MAX_TIMESTAMP_LEN_V01 + 1,
+ .elem_size = sizeof(char),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_fw_version_info_s_v01,
+ fw_build_timestamp),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_ind_register_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_ind_register_req_msg_v01,
+ fw_ready_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ fw_ready_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ initiate_cal_download_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ initiate_cal_download_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ initiate_cal_update_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ initiate_cal_update_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ msa_ready_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ msa_ready_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ pin_connect_result_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ pin_connect_result_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ client_id_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ client_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ request_mem_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ request_mem_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ fw_mem_ready_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ fw_mem_ready_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ cold_boot_cal_done_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ cold_boot_cal_done_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ rejuvenate_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ rejuvenate_enable),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_ind_register_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ wlfw_ind_register_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_ind_register_resp_msg_v01,
+ fw_status_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_ind_register_resp_msg_v01,
+ fw_status),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_fw_ready_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_msa_ready_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_pin_connect_result_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_pin_connect_result_ind_msg_v01,
+ pwr_pin_result_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_pin_connect_result_ind_msg_v01,
+ pwr_pin_result),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct
+ wlfw_pin_connect_result_ind_msg_v01,
+ phy_io_pin_result_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct
+ wlfw_pin_connect_result_ind_msg_v01,
+ phy_io_pin_result),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct
+ wlfw_pin_connect_result_ind_msg_v01,
+ rf_pin_result_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct
+ wlfw_pin_connect_result_ind_msg_v01,
+ rf_pin_result),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_wlan_mode_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_driver_mode_enum_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_wlan_mode_req_msg_v01,
+ mode),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_wlan_mode_req_msg_v01,
+ hw_debug_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_wlan_mode_req_msg_v01,
+ hw_debug),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_wlan_mode_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_wlan_mode_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_wlan_cfg_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ host_version_valid),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = QMI_WLFW_MAX_STR_LEN_V01 + 1,
+ .elem_size = sizeof(char),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ host_version),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ tgt_cfg_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ tgt_cfg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLFW_MAX_NUM_CE_V01,
+ .elem_size = sizeof(struct wlfw_ce_tgt_pipe_cfg_s_v01),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ tgt_cfg),
+ .ei_array = wlfw_ce_tgt_pipe_cfg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ svc_cfg_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ svc_cfg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLFW_MAX_NUM_SVC_V01,
+ .elem_size = sizeof(struct wlfw_ce_svc_pipe_cfg_s_v01),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ svc_cfg),
+ .ei_array = wlfw_ce_svc_pipe_cfg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ shadow_reg_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ shadow_reg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLFW_MAX_NUM_SHADOW_REG_V01,
+ .elem_size = sizeof(struct wlfw_shadow_reg_cfg_s_v01),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ shadow_reg),
+ .ei_array = wlfw_shadow_reg_cfg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ shadow_reg_v2_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ shadow_reg_v2_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLFW_MAX_NUM_SHADOW_REG_V2_V01,
+ .elem_size = sizeof(struct wlfw_shadow_reg_v2_cfg_s_v01),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ shadow_reg_v2),
+ .ei_array = wlfw_shadow_reg_v2_cfg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_wlan_cfg_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_wlan_cfg_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_cap_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_cap_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ chip_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct wlfw_rf_chip_info_s_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ chip_info),
+ .ei_array = wlfw_rf_chip_info_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ board_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct wlfw_rf_board_info_s_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ board_info),
+ .ei_array = wlfw_rf_board_info_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ soc_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct wlfw_soc_info_s_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ soc_info),
+ .ei_array = wlfw_soc_info_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ fw_version_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct wlfw_fw_version_info_s_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ fw_version_info),
+ .ei_array = wlfw_fw_version_info_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ fw_build_id_valid),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = QMI_WLFW_MAX_BUILD_ID_LEN_V01 + 1,
+ .elem_size = sizeof(char),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ fw_build_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ num_macs_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ num_macs),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_bdf_download_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ valid),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ file_id_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ file_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ total_size_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ total_size),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ seg_id_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ seg_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ data_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ data_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = QMI_WLFW_MAX_DATA_SIZE_V01,
+ .elem_size = sizeof(u8),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ data),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ end_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ end),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ bdf_type_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ bdf_type),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_bdf_download_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ wlfw_bdf_download_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_cal_report_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_cal_report_req_msg_v01,
+ meta_data_len),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = QMI_WLFW_MAX_NUM_CAL_V01,
+ .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_cal_report_req_msg_v01,
+ meta_data),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_cal_report_req_msg_v01,
+ xo_cal_data_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_cal_report_req_msg_v01,
+ xo_cal_data),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_cal_report_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_cal_report_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_initiate_cal_download_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(
+ struct wlfw_initiate_cal_download_ind_msg_v01,
+ cal_id),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_cal_download_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ valid),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ file_id_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ file_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ total_size_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ total_size),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ seg_id_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ seg_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ data_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ data_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = QMI_WLFW_MAX_DATA_SIZE_V01,
+ .elem_size = sizeof(u8),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ data),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ end_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ end),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_cal_download_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ wlfw_cal_download_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_initiate_cal_update_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct
+ wlfw_initiate_cal_update_ind_msg_v01,
+ cal_id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ wlfw_initiate_cal_update_ind_msg_v01,
+ total_size),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_cal_update_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_cal_update_req_msg_v01,
+ cal_id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_cal_update_req_msg_v01,
+ seg_id),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_cal_update_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ file_id_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ file_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ total_size_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ total_size),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ seg_id_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ seg_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ data_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ data_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = QMI_WLFW_MAX_DATA_SIZE_V01,
+ .elem_size = sizeof(u8),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ data),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ end_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ end),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_msa_info_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_msa_info_req_msg_v01,
+ msa_addr),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_msa_info_req_msg_v01,
+ size),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_msa_info_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_msa_info_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x03,
+ .offset = offsetof(struct wlfw_msa_info_resp_msg_v01,
+ mem_region_info_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLFW_MAX_NUM_MEMORY_REGIONS_V01,
+ .elem_size = sizeof(struct wlfw_memory_region_info_s_v01),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x03,
+ .offset = offsetof(struct wlfw_msa_info_resp_msg_v01,
+ mem_region_info),
+ .ei_array = wlfw_memory_region_info_s_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_msa_ready_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_msa_ready_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_msa_ready_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_ini_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_ini_req_msg_v01,
+ enablefwlog_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_ini_req_msg_v01,
+ enablefwlog),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_ini_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_ini_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_athdiag_read_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_athdiag_read_req_msg_v01,
+ offset),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_athdiag_read_req_msg_v01,
+ mem_type),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x03,
+ .offset = offsetof(struct wlfw_athdiag_read_req_msg_v01,
+ data_len),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_athdiag_read_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ wlfw_athdiag_read_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_athdiag_read_resp_msg_v01,
+ data_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_athdiag_read_resp_msg_v01,
+ data_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01,
+ .elem_size = sizeof(u8),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_athdiag_read_resp_msg_v01,
+ data),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_athdiag_write_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct
+ wlfw_athdiag_write_req_msg_v01,
+ offset),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ wlfw_athdiag_write_req_msg_v01,
+ mem_type),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x03,
+ .offset = offsetof(struct
+ wlfw_athdiag_write_req_msg_v01,
+ data_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01,
+ .elem_size = sizeof(u8),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x03,
+ .offset = offsetof(struct
+ wlfw_athdiag_write_req_msg_v01,
+ data),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_athdiag_write_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ wlfw_athdiag_write_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_vbatt_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_vbatt_req_msg_v01,
+ voltage_uv),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_vbatt_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_vbatt_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_mac_addr_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_mac_addr_req_msg_v01,
+ mac_addr_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = QMI_WLFW_MAC_ADDR_SIZE_V01,
+ .elem_size = sizeof(u8),
+ .is_array = STATIC_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_mac_addr_req_msg_v01,
+ mac_addr),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_mac_addr_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_mac_addr_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_host_cap_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ daemon_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ daemon_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ wake_msi_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ wake_msi),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_host_cap_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_host_cap_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_request_mem_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_request_mem_ind_msg_v01,
+ size),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_respond_mem_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_respond_mem_req_msg_v01,
+ addr),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_respond_mem_req_msg_v01,
+ size),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_respond_mem_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_respond_mem_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_fw_mem_ready_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_cold_boot_cal_done_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_rejuvenate_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+ cause_for_rejuvenation_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+ cause_for_rejuvenation),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+ requesting_sub_system_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+ requesting_sub_system),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+ line_number_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+ line_number),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+ function_name_valid),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = QMI_WLFW_FUNCTION_NAME_LEN_V01 + 1,
+ .elem_size = sizeof(char),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+ function_name),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_rejuvenate_ack_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_rejuvenate_ack_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ wlfw_rejuvenate_ack_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_dynamic_feature_mask_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct wlfw_dynamic_feature_mask_req_msg_v01,
+ mask_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct wlfw_dynamic_feature_mask_req_msg_v01,
+ mask),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_dynamic_feature_mask_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct wlfw_dynamic_feature_mask_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct wlfw_dynamic_feature_mask_resp_msg_v01,
+ prev_mask_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct wlfw_dynamic_feature_mask_resp_msg_v01,
+ prev_mask),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(
+ struct wlfw_dynamic_feature_mask_resp_msg_v01,
+ curr_mask_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(
+ struct wlfw_dynamic_feature_mask_resp_msg_v01,
+ curr_mask),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_m3_info_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_m3_info_req_msg_v01,
+ addr),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_m3_info_req_msg_v01,
+ size),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_m3_info_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_m3_info_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_xo_cal_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_xo_cal_ind_msg_v01,
+ xo_cal_data),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
diff --git a/drivers/net/wireless/cnss2/wlan_firmware_service_v01.h b/drivers/net/wireless/cnss2/wlan_firmware_service_v01.h
new file mode 100644
index 000000000000..a3081433cc2b
--- /dev/null
+++ b/drivers/net/wireless/cnss2/wlan_firmware_service_v01.h
@@ -0,0 +1,657 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef WLAN_FIRMWARE_SERVICE_V01_H
+#define WLAN_FIRMWARE_SERVICE_V01_H
+
+#include <linux/qmi_encdec.h>
+#include <soc/qcom/msm_qmi_interface.h>
+
+#define WLFW_SERVICE_ID_V01 0x45
+#define WLFW_SERVICE_VERS_V01 0x01
+
+#define QMI_WLFW_BDF_DOWNLOAD_REQ_V01 0x0025
+#define QMI_WLFW_FW_MEM_READY_IND_V01 0x0037
+#define QMI_WLFW_INITIATE_CAL_UPDATE_IND_V01 0x002A
+#define QMI_WLFW_HOST_CAP_REQ_V01 0x0034
+#define QMI_WLFW_DYNAMIC_FEATURE_MASK_RESP_V01 0x003B
+#define QMI_WLFW_M3_INFO_REQ_V01 0x003C
+#define QMI_WLFW_CAP_REQ_V01 0x0024
+#define QMI_WLFW_CAL_REPORT_REQ_V01 0x0026
+#define QMI_WLFW_M3_INFO_RESP_V01 0x003C
+#define QMI_WLFW_CAL_UPDATE_RESP_V01 0x0029
+#define QMI_WLFW_CAL_DOWNLOAD_RESP_V01 0x0027
+#define QMI_WLFW_XO_CAL_IND_V01 0x003D
+#define QMI_WLFW_INI_RESP_V01 0x002F
+#define QMI_WLFW_CAL_REPORT_RESP_V01 0x0026
+#define QMI_WLFW_MAC_ADDR_RESP_V01 0x0033
+#define QMI_WLFW_INITIATE_CAL_DOWNLOAD_IND_V01 0x0028
+#define QMI_WLFW_HOST_CAP_RESP_V01 0x0034
+#define QMI_WLFW_MSA_READY_IND_V01 0x002B
+#define QMI_WLFW_ATHDIAG_WRITE_RESP_V01 0x0031
+#define QMI_WLFW_WLAN_MODE_REQ_V01 0x0022
+#define QMI_WLFW_IND_REGISTER_REQ_V01 0x0020
+#define QMI_WLFW_WLAN_CFG_RESP_V01 0x0023
+#define QMI_WLFW_COLD_BOOT_CAL_DONE_IND_V01 0x0038
+#define QMI_WLFW_REQUEST_MEM_IND_V01 0x0035
+#define QMI_WLFW_REJUVENATE_IND_V01 0x0039
+#define QMI_WLFW_DYNAMIC_FEATURE_MASK_REQ_V01 0x003B
+#define QMI_WLFW_ATHDIAG_WRITE_REQ_V01 0x0031
+#define QMI_WLFW_WLAN_MODE_RESP_V01 0x0022
+#define QMI_WLFW_RESPOND_MEM_REQ_V01 0x0036
+#define QMI_WLFW_PIN_CONNECT_RESULT_IND_V01 0x002C
+#define QMI_WLFW_FW_READY_IND_V01 0x0021
+#define QMI_WLFW_MSA_READY_RESP_V01 0x002E
+#define QMI_WLFW_CAL_UPDATE_REQ_V01 0x0029
+#define QMI_WLFW_INI_REQ_V01 0x002F
+#define QMI_WLFW_BDF_DOWNLOAD_RESP_V01 0x0025
+#define QMI_WLFW_REJUVENATE_ACK_RESP_V01 0x003A
+#define QMI_WLFW_MSA_INFO_RESP_V01 0x002D
+#define QMI_WLFW_MSA_READY_REQ_V01 0x002E
+#define QMI_WLFW_CAP_RESP_V01 0x0024
+#define QMI_WLFW_REJUVENATE_ACK_REQ_V01 0x003A
+#define QMI_WLFW_ATHDIAG_READ_RESP_V01 0x0030
+#define QMI_WLFW_VBATT_REQ_V01 0x0032
+#define QMI_WLFW_MAC_ADDR_REQ_V01 0x0033
+#define QMI_WLFW_RESPOND_MEM_RESP_V01 0x0036
+#define QMI_WLFW_VBATT_RESP_V01 0x0032
+#define QMI_WLFW_MSA_INFO_REQ_V01 0x002D
+#define QMI_WLFW_CAL_DOWNLOAD_REQ_V01 0x0027
+#define QMI_WLFW_ATHDIAG_READ_REQ_V01 0x0030
+#define QMI_WLFW_WLAN_CFG_REQ_V01 0x0023
+#define QMI_WLFW_IND_REGISTER_RESP_V01 0x0020
+
+#define QMI_WLFW_MAX_NUM_MEMORY_REGIONS_V01 2
+#define QMI_WLFW_MAX_NUM_CAL_V01 5
+#define QMI_WLFW_MAX_DATA_SIZE_V01 6144
+#define QMI_WLFW_FUNCTION_NAME_LEN_V01 128
+#define QMI_WLFW_MAX_NUM_CE_V01 12
+#define QMI_WLFW_MAX_TIMESTAMP_LEN_V01 32
+#define QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01 512
+#define QMI_WLFW_MAX_BUILD_ID_LEN_V01 128
+#define QMI_WLFW_MAX_STR_LEN_V01 16
+#define QMI_WLFW_MAX_NUM_SHADOW_REG_V01 24
+#define QMI_WLFW_MAC_ADDR_SIZE_V01 6
+#define QMI_WLFW_MAX_NUM_SHADOW_REG_V2_V01 36
+#define QMI_WLFW_MAX_NUM_SVC_V01 24
+
+enum wlfw_driver_mode_enum_v01 {
+ WLFW_DRIVER_MODE_ENUM_MIN_VAL_V01 = INT_MIN,
+ QMI_WLFW_MISSION_V01 = 0,
+ QMI_WLFW_FTM_V01 = 1,
+ QMI_WLFW_EPPING_V01 = 2,
+ QMI_WLFW_WALTEST_V01 = 3,
+ QMI_WLFW_OFF_V01 = 4,
+ QMI_WLFW_CCPM_V01 = 5,
+ QMI_WLFW_QVIT_V01 = 6,
+ QMI_WLFW_CALIBRATION_V01 = 7,
+ WLFW_DRIVER_MODE_ENUM_MAX_VAL_V01 = INT_MAX,
+};
+
+enum wlfw_cal_temp_id_enum_v01 {
+ WLFW_CAL_TEMP_ID_ENUM_MIN_VAL_V01 = INT_MIN,
+ QMI_WLFW_CAL_TEMP_IDX_0_V01 = 0,
+ QMI_WLFW_CAL_TEMP_IDX_1_V01 = 1,
+ QMI_WLFW_CAL_TEMP_IDX_2_V01 = 2,
+ QMI_WLFW_CAL_TEMP_IDX_3_V01 = 3,
+ QMI_WLFW_CAL_TEMP_IDX_4_V01 = 4,
+ WLFW_CAL_TEMP_ID_ENUM_MAX_VAL_V01 = INT_MAX,
+};
+
+enum wlfw_pipedir_enum_v01 {
+ WLFW_PIPEDIR_ENUM_MIN_VAL_V01 = INT_MIN,
+ QMI_WLFW_PIPEDIR_NONE_V01 = 0,
+ QMI_WLFW_PIPEDIR_IN_V01 = 1,
+ QMI_WLFW_PIPEDIR_OUT_V01 = 2,
+ QMI_WLFW_PIPEDIR_INOUT_V01 = 3,
+ WLFW_PIPEDIR_ENUM_MAX_VAL_V01 = INT_MAX,
+};
+
+#define QMI_WLFW_CE_ATTR_FLAGS_V01 ((u32)0x00)
+#define QMI_WLFW_CE_ATTR_NO_SNOOP_V01 ((u32)0x01)
+#define QMI_WLFW_CE_ATTR_BYTE_SWAP_DATA_V01 ((u32)0x02)
+#define QMI_WLFW_CE_ATTR_SWIZZLE_DESCRIPTORS_V01 ((u32)0x04)
+#define QMI_WLFW_CE_ATTR_DISABLE_INTR_V01 ((u32)0x08)
+#define QMI_WLFW_CE_ATTR_ENABLE_POLL_V01 ((u32)0x10)
+
+#define QMI_WLFW_ALREADY_REGISTERED_V01 ((u64)0x01ULL)
+#define QMI_WLFW_FW_READY_V01 ((u64)0x02ULL)
+#define QMI_WLFW_MSA_READY_V01 ((u64)0x04ULL)
+#define QMI_WLFW_FW_MEM_READY_V01 ((u64)0x08ULL)
+
+#define QMI_WLFW_FW_REJUVENATE_V01 ((u64)0x01ULL)
+
+struct wlfw_ce_tgt_pipe_cfg_s_v01 {
+ u32 pipe_num;
+ enum wlfw_pipedir_enum_v01 pipe_dir;
+ u32 nentries;
+ u32 nbytes_max;
+ u32 flags;
+};
+
+struct wlfw_ce_svc_pipe_cfg_s_v01 {
+ u32 service_id;
+ enum wlfw_pipedir_enum_v01 pipe_dir;
+ u32 pipe_num;
+};
+
+struct wlfw_shadow_reg_cfg_s_v01 {
+ u16 id;
+ u16 offset;
+};
+
+struct wlfw_shadow_reg_v2_cfg_s_v01 {
+ u32 addr;
+};
+
+struct wlfw_memory_region_info_s_v01 {
+ u64 region_addr;
+ u32 size;
+ u8 secure_flag;
+};
+
+struct wlfw_rf_chip_info_s_v01 {
+ u32 chip_id;
+ u32 chip_family;
+};
+
+struct wlfw_rf_board_info_s_v01 {
+ u32 board_id;
+};
+
+struct wlfw_soc_info_s_v01 {
+ u32 soc_id;
+};
+
+struct wlfw_fw_version_info_s_v01 {
+ u32 fw_version;
+ char fw_build_timestamp[QMI_WLFW_MAX_TIMESTAMP_LEN_V01 + 1];
+};
+
+struct wlfw_ind_register_req_msg_v01 {
+ u8 fw_ready_enable_valid;
+ u8 fw_ready_enable;
+ u8 initiate_cal_download_enable_valid;
+ u8 initiate_cal_download_enable;
+ u8 initiate_cal_update_enable_valid;
+ u8 initiate_cal_update_enable;
+ u8 msa_ready_enable_valid;
+ u8 msa_ready_enable;
+ u8 pin_connect_result_enable_valid;
+ u8 pin_connect_result_enable;
+ u8 client_id_valid;
+ u32 client_id;
+ u8 request_mem_enable_valid;
+ u8 request_mem_enable;
+ u8 fw_mem_ready_enable_valid;
+ u8 fw_mem_ready_enable;
+ u8 cold_boot_cal_done_enable_valid;
+ u8 cold_boot_cal_done_enable;
+ u8 rejuvenate_enable_valid;
+ u32 rejuvenate_enable;
+};
+
+#define WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN 46
+extern struct elem_info wlfw_ind_register_req_msg_v01_ei[];
+
+struct wlfw_ind_register_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u8 fw_status_valid;
+ u64 fw_status;
+};
+
+#define WLFW_IND_REGISTER_RESP_MSG_V01_MAX_MSG_LEN 18
+extern struct elem_info wlfw_ind_register_resp_msg_v01_ei[];
+
+struct wlfw_fw_ready_ind_msg_v01 {
+ char placeholder;
+};
+
+#define WLFW_FW_READY_IND_MSG_V01_MAX_MSG_LEN 0
+extern struct elem_info wlfw_fw_ready_ind_msg_v01_ei[];
+
+struct wlfw_msa_ready_ind_msg_v01 {
+ char placeholder;
+};
+
+#define WLFW_MSA_READY_IND_MSG_V01_MAX_MSG_LEN 0
+extern struct elem_info wlfw_msa_ready_ind_msg_v01_ei[];
+
+struct wlfw_pin_connect_result_ind_msg_v01 {
+ u8 pwr_pin_result_valid;
+ u32 pwr_pin_result;
+ u8 phy_io_pin_result_valid;
+ u32 phy_io_pin_result;
+ u8 rf_pin_result_valid;
+ u32 rf_pin_result;
+};
+
+#define WLFW_PIN_CONNECT_RESULT_IND_MSG_V01_MAX_MSG_LEN 21
+extern struct elem_info wlfw_pin_connect_result_ind_msg_v01_ei[];
+
+struct wlfw_wlan_mode_req_msg_v01 {
+ enum wlfw_driver_mode_enum_v01 mode;
+ u8 hw_debug_valid;
+ u8 hw_debug;
+};
+
+#define WLFW_WLAN_MODE_REQ_MSG_V01_MAX_MSG_LEN 11
+extern struct elem_info wlfw_wlan_mode_req_msg_v01_ei[];
+
+struct wlfw_wlan_mode_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_WLAN_MODE_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_wlan_mode_resp_msg_v01_ei[];
+
+struct wlfw_wlan_cfg_req_msg_v01 {
+ u8 host_version_valid;
+ char host_version[QMI_WLFW_MAX_STR_LEN_V01 + 1];
+ u8 tgt_cfg_valid;
+ u32 tgt_cfg_len;
+ struct wlfw_ce_tgt_pipe_cfg_s_v01 tgt_cfg[QMI_WLFW_MAX_NUM_CE_V01];
+ u8 svc_cfg_valid;
+ u32 svc_cfg_len;
+ struct wlfw_ce_svc_pipe_cfg_s_v01 svc_cfg[QMI_WLFW_MAX_NUM_SVC_V01];
+ u8 shadow_reg_valid;
+ u32 shadow_reg_len;
+ struct wlfw_shadow_reg_cfg_s_v01
+ shadow_reg[QMI_WLFW_MAX_NUM_SHADOW_REG_V01];
+ u8 shadow_reg_v2_valid;
+ u32 shadow_reg_v2_len;
+ struct wlfw_shadow_reg_v2_cfg_s_v01
+ shadow_reg_v2[QMI_WLFW_MAX_NUM_SHADOW_REG_V2_V01];
+};
+
+#define WLFW_WLAN_CFG_REQ_MSG_V01_MAX_MSG_LEN 803
+extern struct elem_info wlfw_wlan_cfg_req_msg_v01_ei[];
+
+struct wlfw_wlan_cfg_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_WLAN_CFG_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_wlan_cfg_resp_msg_v01_ei[];
+
+struct wlfw_cap_req_msg_v01 {
+ char placeholder;
+};
+
+#define WLFW_CAP_REQ_MSG_V01_MAX_MSG_LEN 0
+extern struct elem_info wlfw_cap_req_msg_v01_ei[];
+
+struct wlfw_cap_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u8 chip_info_valid;
+ struct wlfw_rf_chip_info_s_v01 chip_info;
+ u8 board_info_valid;
+ struct wlfw_rf_board_info_s_v01 board_info;
+ u8 soc_info_valid;
+ struct wlfw_soc_info_s_v01 soc_info;
+ u8 fw_version_info_valid;
+ struct wlfw_fw_version_info_s_v01 fw_version_info;
+ u8 fw_build_id_valid;
+ char fw_build_id[QMI_WLFW_MAX_BUILD_ID_LEN_V01 + 1];
+ u8 num_macs_valid;
+ u8 num_macs;
+};
+
+#define WLFW_CAP_RESP_MSG_V01_MAX_MSG_LEN 207
+extern struct elem_info wlfw_cap_resp_msg_v01_ei[];
+
+struct wlfw_bdf_download_req_msg_v01 {
+ u8 valid;
+ u8 file_id_valid;
+ enum wlfw_cal_temp_id_enum_v01 file_id;
+ u8 total_size_valid;
+ u32 total_size;
+ u8 seg_id_valid;
+ u32 seg_id;
+ u8 data_valid;
+ u32 data_len;
+ u8 data[QMI_WLFW_MAX_DATA_SIZE_V01];
+ u8 end_valid;
+ u8 end;
+ u8 bdf_type_valid;
+ u8 bdf_type;
+};
+
+#define WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN 6182
+extern struct elem_info wlfw_bdf_download_req_msg_v01_ei[];
+
+struct wlfw_bdf_download_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_BDF_DOWNLOAD_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_bdf_download_resp_msg_v01_ei[];
+
+struct wlfw_cal_report_req_msg_v01 {
+ u32 meta_data_len;
+ enum wlfw_cal_temp_id_enum_v01 meta_data[QMI_WLFW_MAX_NUM_CAL_V01];
+ u8 xo_cal_data_valid;
+ u8 xo_cal_data;
+};
+
+#define WLFW_CAL_REPORT_REQ_MSG_V01_MAX_MSG_LEN 28
+extern struct elem_info wlfw_cal_report_req_msg_v01_ei[];
+
+struct wlfw_cal_report_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_CAL_REPORT_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_cal_report_resp_msg_v01_ei[];
+
+struct wlfw_initiate_cal_download_ind_msg_v01 {
+ enum wlfw_cal_temp_id_enum_v01 cal_id;
+};
+
+#define WLFW_INITIATE_CAL_DOWNLOAD_IND_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_initiate_cal_download_ind_msg_v01_ei[];
+
+struct wlfw_cal_download_req_msg_v01 {
+ u8 valid;
+ u8 file_id_valid;
+ enum wlfw_cal_temp_id_enum_v01 file_id;
+ u8 total_size_valid;
+ u32 total_size;
+ u8 seg_id_valid;
+ u32 seg_id;
+ u8 data_valid;
+ u32 data_len;
+ u8 data[QMI_WLFW_MAX_DATA_SIZE_V01];
+ u8 end_valid;
+ u8 end;
+};
+
+#define WLFW_CAL_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN 6178
+extern struct elem_info wlfw_cal_download_req_msg_v01_ei[];
+
+struct wlfw_cal_download_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_CAL_DOWNLOAD_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_cal_download_resp_msg_v01_ei[];
+
+struct wlfw_initiate_cal_update_ind_msg_v01 {
+ enum wlfw_cal_temp_id_enum_v01 cal_id;
+ u32 total_size;
+};
+
+#define WLFW_INITIATE_CAL_UPDATE_IND_MSG_V01_MAX_MSG_LEN 14
+extern struct elem_info wlfw_initiate_cal_update_ind_msg_v01_ei[];
+
+struct wlfw_cal_update_req_msg_v01 {
+ enum wlfw_cal_temp_id_enum_v01 cal_id;
+ u32 seg_id;
+};
+
+#define WLFW_CAL_UPDATE_REQ_MSG_V01_MAX_MSG_LEN 14
+extern struct elem_info wlfw_cal_update_req_msg_v01_ei[];
+
+struct wlfw_cal_update_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u8 file_id_valid;
+ enum wlfw_cal_temp_id_enum_v01 file_id;
+ u8 total_size_valid;
+ u32 total_size;
+ u8 seg_id_valid;
+ u32 seg_id;
+ u8 data_valid;
+ u32 data_len;
+ u8 data[QMI_WLFW_MAX_DATA_SIZE_V01];
+ u8 end_valid;
+ u8 end;
+};
+
+#define WLFW_CAL_UPDATE_RESP_MSG_V01_MAX_MSG_LEN 6181
+extern struct elem_info wlfw_cal_update_resp_msg_v01_ei[];
+
+struct wlfw_msa_info_req_msg_v01 {
+ u64 msa_addr;
+ u32 size;
+};
+
+#define WLFW_MSA_INFO_REQ_MSG_V01_MAX_MSG_LEN 18
+extern struct elem_info wlfw_msa_info_req_msg_v01_ei[];
+
+struct wlfw_msa_info_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u32 mem_region_info_len;
+ struct wlfw_memory_region_info_s_v01
+ mem_region_info[QMI_WLFW_MAX_NUM_MEMORY_REGIONS_V01];
+};
+
+#define WLFW_MSA_INFO_RESP_MSG_V01_MAX_MSG_LEN 37
+extern struct elem_info wlfw_msa_info_resp_msg_v01_ei[];
+
+struct wlfw_msa_ready_req_msg_v01 {
+ char placeholder;
+};
+
+#define WLFW_MSA_READY_REQ_MSG_V01_MAX_MSG_LEN 0
+extern struct elem_info wlfw_msa_ready_req_msg_v01_ei[];
+
+struct wlfw_msa_ready_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_MSA_READY_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_msa_ready_resp_msg_v01_ei[];
+
+struct wlfw_ini_req_msg_v01 {
+ u8 enablefwlog_valid;
+ u8 enablefwlog;
+};
+
+#define WLFW_INI_REQ_MSG_V01_MAX_MSG_LEN 4
+extern struct elem_info wlfw_ini_req_msg_v01_ei[];
+
+struct wlfw_ini_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_INI_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_ini_resp_msg_v01_ei[];
+
+struct wlfw_athdiag_read_req_msg_v01 {
+ u32 offset;
+ u32 mem_type;
+ u32 data_len;
+};
+
+#define WLFW_ATHDIAG_READ_REQ_MSG_V01_MAX_MSG_LEN 21
+extern struct elem_info wlfw_athdiag_read_req_msg_v01_ei[];
+
+struct wlfw_athdiag_read_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u8 data_valid;
+ u32 data_len;
+ u8 data[QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01];
+};
+
+#define WLFW_ATHDIAG_READ_RESP_MSG_V01_MAX_MSG_LEN 524
+extern struct elem_info wlfw_athdiag_read_resp_msg_v01_ei[];
+
+struct wlfw_athdiag_write_req_msg_v01 {
+ u32 offset;
+ u32 mem_type;
+ u32 data_len;
+ u8 data[QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01];
+};
+
+#define WLFW_ATHDIAG_WRITE_REQ_MSG_V01_MAX_MSG_LEN 531
+extern struct elem_info wlfw_athdiag_write_req_msg_v01_ei[];
+
+struct wlfw_athdiag_write_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_ATHDIAG_WRITE_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_athdiag_write_resp_msg_v01_ei[];
+
+struct wlfw_vbatt_req_msg_v01 {
+ u64 voltage_uv;
+};
+
+#define WLFW_VBATT_REQ_MSG_V01_MAX_MSG_LEN 11
+extern struct elem_info wlfw_vbatt_req_msg_v01_ei[];
+
+struct wlfw_vbatt_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_VBATT_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_vbatt_resp_msg_v01_ei[];
+
+struct wlfw_mac_addr_req_msg_v01 {
+ u8 mac_addr_valid;
+ u8 mac_addr[QMI_WLFW_MAC_ADDR_SIZE_V01];
+};
+
+#define WLFW_MAC_ADDR_REQ_MSG_V01_MAX_MSG_LEN 9
+extern struct elem_info wlfw_mac_addr_req_msg_v01_ei[];
+
+struct wlfw_mac_addr_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_MAC_ADDR_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_mac_addr_resp_msg_v01_ei[];
+
+struct wlfw_host_cap_req_msg_v01 {
+ u8 daemon_support_valid;
+ u8 daemon_support;
+ u8 wake_msi_valid;
+ u32 wake_msi;
+};
+
+#define WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN 11
+extern struct elem_info wlfw_host_cap_req_msg_v01_ei[];
+
+struct wlfw_host_cap_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_HOST_CAP_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_host_cap_resp_msg_v01_ei[];
+
+struct wlfw_request_mem_ind_msg_v01 {
+ u32 size;
+};
+
+#define WLFW_REQUEST_MEM_IND_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_request_mem_ind_msg_v01_ei[];
+
+struct wlfw_respond_mem_req_msg_v01 {
+ u64 addr;
+ u32 size;
+};
+
+#define WLFW_RESPOND_MEM_REQ_MSG_V01_MAX_MSG_LEN 18
+extern struct elem_info wlfw_respond_mem_req_msg_v01_ei[];
+
+struct wlfw_respond_mem_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_RESPOND_MEM_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_respond_mem_resp_msg_v01_ei[];
+
+struct wlfw_fw_mem_ready_ind_msg_v01 {
+ char placeholder;
+};
+
+#define WLFW_FW_MEM_READY_IND_MSG_V01_MAX_MSG_LEN 0
+extern struct elem_info wlfw_fw_mem_ready_ind_msg_v01_ei[];
+
+struct wlfw_cold_boot_cal_done_ind_msg_v01 {
+ char placeholder;
+};
+
+#define WLFW_COLD_BOOT_CAL_DONE_IND_MSG_V01_MAX_MSG_LEN 0
+extern struct elem_info wlfw_cold_boot_cal_done_ind_msg_v01_ei[];
+
+struct wlfw_rejuvenate_ind_msg_v01 {
+ u8 cause_for_rejuvenation_valid;
+ u8 cause_for_rejuvenation;
+ u8 requesting_sub_system_valid;
+ u8 requesting_sub_system;
+ u8 line_number_valid;
+ u16 line_number;
+ u8 function_name_valid;
+ char function_name[QMI_WLFW_FUNCTION_NAME_LEN_V01 + 1];
+};
+
+#define WLFW_REJUVENATE_IND_MSG_V01_MAX_MSG_LEN 144
+extern struct elem_info wlfw_rejuvenate_ind_msg_v01_ei[];
+
+struct wlfw_rejuvenate_ack_req_msg_v01 {
+ char placeholder;
+};
+
+#define WLFW_REJUVENATE_ACK_REQ_MSG_V01_MAX_MSG_LEN 0
+extern struct elem_info wlfw_rejuvenate_ack_req_msg_v01_ei[];
+
+struct wlfw_rejuvenate_ack_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_REJUVENATE_ACK_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_rejuvenate_ack_resp_msg_v01_ei[];
+
+struct wlfw_dynamic_feature_mask_req_msg_v01 {
+ u8 mask_valid;
+ u64 mask;
+};
+
+#define WLFW_DYNAMIC_FEATURE_MASK_REQ_MSG_V01_MAX_MSG_LEN 11
+extern struct elem_info wlfw_dynamic_feature_mask_req_msg_v01_ei[];
+
+struct wlfw_dynamic_feature_mask_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u8 prev_mask_valid;
+ u64 prev_mask;
+ u8 curr_mask_valid;
+ u64 curr_mask;
+};
+
+#define WLFW_DYNAMIC_FEATURE_MASK_RESP_MSG_V01_MAX_MSG_LEN 29
+extern struct elem_info wlfw_dynamic_feature_mask_resp_msg_v01_ei[];
+
+struct wlfw_m3_info_req_msg_v01 {
+ u64 addr;
+ u32 size;
+};
+
+#define WLFW_M3_INFO_REQ_MSG_V01_MAX_MSG_LEN 18
+extern struct elem_info wlfw_m3_info_req_msg_v01_ei[];
+
+struct wlfw_m3_info_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_M3_INFO_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_m3_info_resp_msg_v01_ei[];
+
+struct wlfw_xo_cal_ind_msg_v01 {
+ u8 xo_cal_data;
+};
+
+#define WLFW_XO_CAL_IND_MSG_V01_MAX_MSG_LEN 4
+extern struct elem_info wlfw_xo_cal_ind_msg_v01_ei[];
+
+#endif
diff --git a/drivers/net/wireless/cnss_utils/cnss_utils.c b/drivers/net/wireless/cnss_utils/cnss_utils.c
index a452900868c4..d73846efbc4c 100644
--- a/drivers/net/wireless/cnss_utils/cnss_utils.c
+++ b/drivers/net/wireless/cnss_utils/cnss_utils.c
@@ -283,7 +283,7 @@ static int __init cnss_utils_init(void)
{
struct cnss_utils_priv *priv = NULL;
- priv = kmalloc(sizeof(*priv), GFP_KERNEL);
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
diff --git a/drivers/net/wireless/wcnss/wcnss_wlan.c b/drivers/net/wireless/wcnss/wcnss_wlan.c
index 505a9e016777..9347882fba92 100644
--- a/drivers/net/wireless/wcnss/wcnss_wlan.c
+++ b/drivers/net/wireless/wcnss/wcnss_wlan.c
@@ -397,7 +397,7 @@ static struct {
int user_cal_read;
int user_cal_available;
u32 user_cal_rcvd;
- int user_cal_exp_size;
+ u32 user_cal_exp_size;
int iris_xo_mode_set;
int fw_vbatt_state;
char wlan_nv_macAddr[WLAN_MAC_ADDR_SIZE];
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 888e9cfef51a..34a062ccb11d 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -321,7 +321,7 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
queue->rx.req_prod_pvt = req_prod;
/* Not enough requests? Try again later. */
- if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN) {
+ if (req_prod - queue->rx.sring->req_prod < NET_RX_SLOTS_MIN) {
mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10));
return;
}
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 044253dca30a..b8a5a8e8f57d 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -27,6 +27,13 @@ enum {
NVME_NS_LIGHTNVM = 1,
};
+/* The below value is the specific amount of delay needed before checking
+ * readiness in case of the PCI_DEVICE(0x1c58, 0x0003), which needs the
+ * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was
+ * found empirically.
+ */
+#define NVME_QUIRK_DELAY_AMOUNT 2000
+
/*
* Represents an NVM Express device. Each nvme_dev is a PCI function.
*/
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index c851bc53831c..4c673d45f1bd 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1633,10 +1633,15 @@ static int nvme_wait_ready(struct nvme_dev *dev, u64 cap, bool enabled)
*/
static int nvme_disable_ctrl(struct nvme_dev *dev, u64 cap)
{
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+
dev->ctrl_config &= ~NVME_CC_SHN_MASK;
dev->ctrl_config &= ~NVME_CC_ENABLE;
writel(dev->ctrl_config, &dev->bar->cc);
+ if (pdev->vendor == 0x1c58 && pdev->device == 0x0003)
+ msleep(NVME_QUIRK_DELAY_AMOUNT);
+
return nvme_wait_ready(dev, cap, false);
}
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 0438512f4d69..ca175710c4c8 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -632,9 +632,12 @@ int __init of_scan_flat_dt(int (*it)(unsigned long node,
const char *pathp;
int offset, rc = 0, depth = -1;
- for (offset = fdt_next_node(blob, -1, &depth);
- offset >= 0 && depth >= 0 && !rc;
- offset = fdt_next_node(blob, offset, &depth)) {
+ if (!blob)
+ return 0;
+
+ for (offset = fdt_next_node(blob, -1, &depth);
+ offset >= 0 && depth >= 0 && !rc;
+ offset = fdt_next_node(blob, offset, &depth)) {
pathp = fdt_get_name(blob, offset, NULL);
if (*pathp == '/')
diff --git a/drivers/platform/msm/Kconfig b/drivers/platform/msm/Kconfig
index 66bdc593f811..333d28e64087 100644
--- a/drivers/platform/msm/Kconfig
+++ b/drivers/platform/msm/Kconfig
@@ -194,7 +194,7 @@ config MSM_11AD
tristate "Platform driver for 11ad chip"
depends on PCI
depends on PCI_MSM
- default y
+ default m
---help---
This module adds required platform support for wireless adapter based on
Qualcomm Technologies, Inc. 11ad chip, integrated into MSM platform
diff --git a/drivers/platform/msm/ipa/ipa_common_i.h b/drivers/platform/msm/ipa/ipa_common_i.h
index 981129eb9f3a..d5f102eaaac6 100644
--- a/drivers/platform/msm/ipa/ipa_common_i.h
+++ b/drivers/platform/msm/ipa/ipa_common_i.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -26,7 +26,8 @@
log_info.file = __FILENAME__; \
log_info.line = __LINE__; \
log_info.type = EP; \
- log_info.id_string = ipa_clients_strings[client]
+ log_info.id_string = (client < 0 || client >= IPA_CLIENT_MAX) \
+ ? "Invalid Client" : ipa_clients_strings[client]
#define IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info) \
log_info.file = __FILENAME__; \
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa.c b/drivers/platform/msm/ipa/ipa_v2/ipa.c
index 0d17fa58a853..85fa9da50779 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa.c
@@ -641,7 +641,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_nat_dma_cmd *)param)->entries
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_nat_dma_cmd *)param)->entries,
pre_entry);
retval = -EFAULT;
@@ -688,7 +688,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_add_hdr *)param)->num_hdrs
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_add_hdr *)param)->num_hdrs,
pre_entry);
retval = -EFAULT;
@@ -727,7 +727,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_del_hdr *)param)->num_hdls
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_del_hdr *)param)->num_hdls,
pre_entry);
retval = -EFAULT;
@@ -767,7 +767,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_add_rt_rule *)param)->num_rules
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_add_rt_rule *)param)->
num_rules,
pre_entry);
@@ -807,7 +807,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_mdfy_rt_rule *)param)->num_rules
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_mdfy_rt_rule *)param)->
num_rules,
pre_entry);
@@ -847,7 +847,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_del_rt_rule *)param)->num_hdls
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_del_rt_rule *)param)->num_hdls,
pre_entry);
retval = -EFAULT;
@@ -886,7 +886,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_add_flt_rule *)param)->num_rules
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_add_flt_rule *)param)->
num_rules,
pre_entry);
@@ -926,7 +926,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_del_flt_rule *)param)->num_hdls
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_del_flt_rule *)param)->
num_hdls,
pre_entry);
@@ -966,7 +966,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_mdfy_flt_rule *)param)->num_rules
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_mdfy_flt_rule *)param)->
num_rules,
pre_entry);
@@ -1104,7 +1104,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (unlikely(((struct ipa_ioc_query_intf_tx_props *)
param)->num_tx_props
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_query_intf_tx_props *)
param)->num_tx_props, pre_entry);
retval = -EFAULT;
@@ -1149,7 +1149,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_query_intf_rx_props *)
param)->num_rx_props != pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_query_intf_rx_props *)
param)->num_rx_props, pre_entry);
retval = -EFAULT;
@@ -1194,7 +1194,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_query_intf_ext_props *)
param)->num_ext_props != pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_query_intf_ext_props *)
param)->num_ext_props, pre_entry);
retval = -EFAULT;
@@ -1232,7 +1232,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_msg_meta *)param)->msg_len
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_msg_meta *)param)->msg_len,
pre_entry);
retval = -EFAULT;
@@ -1372,7 +1372,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_add_hdr_proc_ctx *)
param)->num_proc_ctxs != pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_add_hdr_proc_ctx *)
param)->num_proc_ctxs, pre_entry);
retval = -EFAULT;
@@ -1411,7 +1411,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_del_hdr_proc_ctx *)
param)->num_hdls != pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_del_hdr_proc_ctx *)param)->
num_hdls,
pre_entry);
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c
index 80514f6c738e..72542bf6dd5d 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c
@@ -1017,25 +1017,25 @@ static int __ipa_add_flt_rule(struct ipa_flt_tbl *tbl, enum ipa_ip_type ip,
if (rule->action != IPA_PASS_TO_EXCEPTION) {
if (!rule->eq_attrib_type) {
if (!rule->rt_tbl_hdl) {
- IPAERR("invalid RT tbl\n");
+ IPAERR_RL("invalid RT tbl\n");
goto error;
}
rt_tbl = ipa_id_find(rule->rt_tbl_hdl);
if (rt_tbl == NULL) {
- IPAERR("RT tbl not found\n");
+ IPAERR_RL("RT tbl not found\n");
goto error;
}
- if (rt_tbl->cookie != IPA_COOKIE) {
- IPAERR("RT table cookie is invalid\n");
+ if (rt_tbl->cookie != IPA_RT_TBL_COOKIE) {
+ IPAERR_RL("RT table cookie is invalid\n");
goto error;
}
} else {
if (rule->rt_tbl_idx > ((ip == IPA_IP_v4) ?
IPA_MEM_PART(v4_modem_rt_index_hi) :
IPA_MEM_PART(v6_modem_rt_index_hi))) {
- IPAERR("invalid RT tbl\n");
+ IPAERR_RL("invalid RT tbl\n");
goto error;
}
}
@@ -1048,7 +1048,7 @@ static int __ipa_add_flt_rule(struct ipa_flt_tbl *tbl, enum ipa_ip_type ip,
}
INIT_LIST_HEAD(&entry->link);
entry->rule = *rule;
- entry->cookie = IPA_COOKIE;
+ entry->cookie = IPA_FLT_COOKIE;
entry->rt_tbl = rt_tbl;
entry->tbl = tbl;
if (add_rear) {
@@ -1067,13 +1067,19 @@ static int __ipa_add_flt_rule(struct ipa_flt_tbl *tbl, enum ipa_ip_type ip,
if (id < 0) {
IPAERR("failed to add to tree\n");
WARN_ON(1);
+ goto ipa_insert_failed;
}
*rule_hdl = id;
entry->id = id;
IPADBG_LOW("add flt rule rule_cnt=%d\n", tbl->rule_cnt);
return 0;
-
+ipa_insert_failed:
+ tbl->rule_cnt--;
+ if (entry->rt_tbl)
+ entry->rt_tbl->ref_cnt--;
+ list_del(&entry->link);
+ kmem_cache_free(ipa_ctx->flt_rule_cache, entry);
error:
return -EPERM;
}
@@ -1085,12 +1091,12 @@ static int __ipa_del_flt_rule(u32 rule_hdl)
entry = ipa_id_find(rule_hdl);
if (entry == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
return -EINVAL;
}
- if (entry->cookie != IPA_COOKIE) {
- IPAERR("bad params\n");
+ if (entry->cookie != IPA_FLT_COOKIE) {
+ IPAERR_RL("bad params\n");
return -EINVAL;
}
id = entry->id;
@@ -1117,12 +1123,12 @@ static int __ipa_mdfy_flt_rule(struct ipa_flt_rule_mdfy *frule,
entry = ipa_id_find(frule->rule_hdl);
if (entry == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
goto error;
}
- if (entry->cookie != IPA_COOKIE) {
- IPAERR("bad params\n");
+ if (entry->cookie != IPA_FLT_COOKIE) {
+ IPAERR_RL("bad params\n");
goto error;
}
@@ -1132,25 +1138,25 @@ static int __ipa_mdfy_flt_rule(struct ipa_flt_rule_mdfy *frule,
if (frule->rule.action != IPA_PASS_TO_EXCEPTION) {
if (!frule->rule.eq_attrib_type) {
if (!frule->rule.rt_tbl_hdl) {
- IPAERR("invalid RT tbl\n");
+ IPAERR_RL("invalid RT tbl\n");
goto error;
}
rt_tbl = ipa_id_find(frule->rule.rt_tbl_hdl);
if (rt_tbl == NULL) {
- IPAERR("RT tbl not found\n");
+ IPAERR_RL("RT tbl not found\n");
goto error;
}
- if (rt_tbl->cookie != IPA_COOKIE) {
- IPAERR("RT table cookie is invalid\n");
+ if (rt_tbl->cookie != IPA_RT_TBL_COOKIE) {
+ IPAERR_RL("RT table cookie is invalid\n");
goto error;
}
} else {
if (frule->rule.rt_tbl_idx > ((ip == IPA_IP_v4) ?
IPA_MEM_PART(v4_modem_rt_index_hi) :
IPA_MEM_PART(v6_modem_rt_index_hi))) {
- IPAERR("invalid RT tbl\n");
+ IPAERR_RL("invalid RT tbl\n");
goto error;
}
}
@@ -1174,7 +1180,7 @@ static int __ipa_add_global_flt_rule(enum ipa_ip_type ip,
struct ipa_flt_tbl *tbl;
if (rule == NULL || rule_hdl == NULL) {
- IPAERR("bad parms rule=%p rule_hdl=%p\n", rule, rule_hdl);
+ IPAERR_RL("bad parms rule=%p rule_hdl=%p\n", rule, rule_hdl);
return -EINVAL;
}
@@ -1193,14 +1199,14 @@ static int __ipa_add_ep_flt_rule(enum ipa_ip_type ip, enum ipa_client_type ep,
int ipa_ep_idx;
if (rule == NULL || rule_hdl == NULL || ep >= IPA_CLIENT_MAX) {
- IPAERR("bad parms rule=%p rule_hdl=%p ep=%d\n", rule,
+ IPAERR_RL("bad parms rule=%p rule_hdl=%p ep=%d\n", rule,
rule_hdl, ep);
return -EINVAL;
}
ipa_ep_idx = ipa2_get_ep_mapping(ep);
if (ipa_ep_idx == IPA_FLT_TABLE_INDEX_NOT_FOUND) {
- IPAERR("ep not valid ep=%d\n", ep);
+ IPAERR_RL("ep not valid ep=%d\n", ep);
return -EINVAL;
}
if (ipa_ctx->ep[ipa_ep_idx].valid == 0)
@@ -1227,7 +1233,7 @@ int ipa2_add_flt_rule(struct ipa_ioc_add_flt_rule *rules)
if (rules == NULL || rules->num_rules == 0 ||
rules->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -1245,7 +1251,7 @@ int ipa2_add_flt_rule(struct ipa_ioc_add_flt_rule *rules)
rules->rules[i].at_rear,
&rules->rules[i].flt_rule_hdl);
if (result) {
- IPAERR("failed to add flt rule %d\n", i);
+ IPAERR_RL("failed to add flt rule %d\n", i);
rules->rules[i].status = IPA_FLT_STATUS_OF_ADD_FAILED;
} else {
rules->rules[i].status = 0;
@@ -1278,14 +1284,14 @@ int ipa2_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls)
int result;
if (hdls == NULL || hdls->num_hdls == 0 || hdls->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa_ctx->lock);
for (i = 0; i < hdls->num_hdls; i++) {
if (__ipa_del_flt_rule(hdls->hdl[i].hdl)) {
- IPAERR("failed to del rt rule %i\n", i);
+ IPAERR_RL("failed to del rt rule %i\n", i);
hdls->hdl[i].status = IPA_FLT_STATUS_OF_DEL_FAILED;
} else {
hdls->hdl[i].status = 0;
@@ -1318,14 +1324,14 @@ int ipa2_mdfy_flt_rule(struct ipa_ioc_mdfy_flt_rule *hdls)
int result;
if (hdls == NULL || hdls->num_rules == 0 || hdls->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa_ctx->lock);
for (i = 0; i < hdls->num_rules; i++) {
if (__ipa_mdfy_flt_rule(&hdls->rules[i], hdls->ip)) {
- IPAERR("failed to mdfy rt rule %i\n", i);
+ IPAERR_RL("failed to mdfy rt rule %i\n", i);
hdls->rules[i].status = IPA_FLT_STATUS_OF_MDFY_FAILED;
} else {
hdls->rules[i].status = 0;
@@ -1359,7 +1365,7 @@ int ipa2_commit_flt(enum ipa_ip_type ip)
int result;
if (ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -1395,7 +1401,7 @@ int ipa2_reset_flt(enum ipa_ip_type ip)
int id;
if (ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
index 10a49b1e75d8..51806cec1e4d 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
@@ -547,7 +547,7 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx,
{
struct ipa_hdr_entry *hdr_entry;
struct ipa_hdr_proc_ctx_entry *entry;
- struct ipa_hdr_proc_ctx_offset_entry *offset;
+ struct ipa_hdr_proc_ctx_offset_entry *offset = NULL;
u32 bin;
struct ipa_hdr_proc_ctx_tbl *htbl = &ipa_ctx->hdr_proc_ctx_tbl;
int id;
@@ -558,13 +558,13 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx,
proc_ctx->type, proc_ctx->hdr_hdl);
if (!HDR_PROC_TYPE_IS_VALID(proc_ctx->type)) {
- IPAERR("invalid processing type %d\n", proc_ctx->type);
+ IPAERR_RL("invalid processing type %d\n", proc_ctx->type);
return -EINVAL;
}
hdr_entry = ipa_id_find(proc_ctx->hdr_hdl);
- if (!hdr_entry || (hdr_entry->cookie != IPA_COOKIE)) {
- IPAERR("hdr_hdl is invalid\n");
+ if (!hdr_entry || (hdr_entry->cookie != IPA_HDR_COOKIE)) {
+ IPAERR_RL("hdr_hdl is invalid\n");
return -EINVAL;
}
@@ -580,7 +580,7 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx,
entry->hdr = hdr_entry;
if (add_ref_hdr)
hdr_entry->ref_cnt++;
- entry->cookie = IPA_COOKIE;
+ entry->cookie = IPA_PROC_HDR_COOKIE;
needed_len = (proc_ctx->type == IPA_HDR_PROC_NONE) ?
sizeof(struct ipa_hdr_proc_ctx_add_hdr_seq) :
@@ -592,7 +592,7 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx,
ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN1]) {
bin = IPA_HDR_PROC_CTX_BIN1;
} else {
- IPAERR("unexpected needed len %d\n", needed_len);
+ IPAERR_RL("unexpected needed len %d\n", needed_len);
WARN_ON(1);
goto bad_len;
}
@@ -602,7 +602,7 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx,
IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr);
if (list_empty(&htbl->head_free_offset_list[bin])) {
if (htbl->end + ipa_hdr_proc_ctx_bin_sz[bin] > mem_size) {
- IPAERR("hdr proc ctx table overflow\n");
+ IPAERR_RL("hdr proc ctx table overflow\n");
goto bad_len;
}
@@ -640,6 +640,7 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx,
if (id < 0) {
IPAERR("failed to alloc id\n");
WARN_ON(1);
+ goto ipa_insert_failed;
}
entry->id = id;
proc_ctx->proc_ctx_hdl = id;
@@ -647,6 +648,14 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx,
return 0;
+ipa_insert_failed:
+ if (offset)
+ list_move(&offset->link,
+ &htbl->head_free_offset_list[offset->bin]);
+ entry->offset_entry = NULL;
+ list_del(&entry->link);
+ htbl->proc_ctx_cnt--;
+
bad_len:
if (add_ref_hdr)
hdr_entry->ref_cnt--;
@@ -659,7 +668,7 @@ bad_len:
static int __ipa_add_hdr(struct ipa_hdr_add *hdr)
{
struct ipa_hdr_entry *entry;
- struct ipa_hdr_offset_entry *offset;
+ struct ipa_hdr_offset_entry *offset = NULL;
u32 bin;
struct ipa_hdr_tbl *htbl = &ipa_ctx->hdr_tbl;
int id;
@@ -667,12 +676,12 @@ static int __ipa_add_hdr(struct ipa_hdr_add *hdr)
gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
if (hdr->hdr_len == 0 || hdr->hdr_len > IPA_HDR_MAX_SIZE) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
goto error;
}
if (!HDR_TYPE_IS_VALID(hdr->type)) {
- IPAERR("invalid hdr type %d\n", hdr->type);
+ IPAERR_RL("invalid hdr type %d\n", hdr->type);
goto error;
}
@@ -691,7 +700,7 @@ static int __ipa_add_hdr(struct ipa_hdr_add *hdr)
entry->type = hdr->type;
entry->is_eth2_ofst_valid = hdr->is_eth2_ofst_valid;
entry->eth2_ofst = hdr->eth2_ofst;
- entry->cookie = IPA_COOKIE;
+ entry->cookie = IPA_HDR_COOKIE;
if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN0])
bin = IPA_HDR_BIN0;
@@ -704,7 +713,7 @@ static int __ipa_add_hdr(struct ipa_hdr_add *hdr)
else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN4])
bin = IPA_HDR_BIN4;
else {
- IPAERR("unexpected hdr len %d\n", hdr->hdr_len);
+ IPAERR_RL("unexpected hdr len %d\n", hdr->hdr_len);
goto bad_hdr_len;
}
@@ -780,6 +789,7 @@ static int __ipa_add_hdr(struct ipa_hdr_add *hdr)
if (id < 0) {
IPAERR("failed to alloc id\n");
WARN_ON(1);
+ goto ipa_insert_failed;
}
entry->id = id;
hdr->hdr_hdl = id;
@@ -804,10 +814,19 @@ fail_add_proc_ctx:
entry->ref_cnt--;
hdr->hdr_hdl = 0;
ipa_id_remove(id);
+ipa_insert_failed:
+ if (entry->is_hdr_proc_ctx) {
+ dma_unmap_single(ipa_ctx->pdev, entry->phys_base,
+ entry->hdr_len, DMA_TO_DEVICE);
+ } else {
+ if (offset)
+ list_move(&offset->link,
+ &htbl->head_free_offset_list[offset->bin]);
+ entry->offset_entry = NULL;
+ }
htbl->hdr_cnt--;
list_del(&entry->link);
- dma_unmap_single(ipa_ctx->pdev, entry->phys_base,
- entry->hdr_len, DMA_TO_DEVICE);
+
fail_dma_mapping:
entry->is_hdr_proc_ctx = false;
bad_hdr_len:
@@ -824,8 +843,8 @@ static int __ipa_del_hdr_proc_ctx(u32 proc_ctx_hdl,
struct ipa_hdr_proc_ctx_tbl *htbl = &ipa_ctx->hdr_proc_ctx_tbl;
entry = ipa_id_find(proc_ctx_hdl);
- if (!entry || (entry->cookie != IPA_COOKIE)) {
- IPAERR("bad parm\n");
+ if (!entry || (entry->cookie != IPA_PROC_HDR_COOKIE)) {
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -833,7 +852,7 @@ static int __ipa_del_hdr_proc_ctx(u32 proc_ctx_hdl,
htbl->proc_ctx_cnt, entry->offset_entry->offset);
if (by_user && entry->user_deleted) {
- IPAERR("proc_ctx already deleted by user\n");
+ IPAERR_RL("proc_ctx already deleted by user\n");
return -EINVAL;
}
@@ -871,12 +890,12 @@ int __ipa_del_hdr(u32 hdr_hdl, bool by_user)
entry = ipa_id_find(hdr_hdl);
if (entry == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
return -EINVAL;
}
- if (entry->cookie != IPA_COOKIE) {
- IPAERR("bad parm\n");
+ if (entry->cookie != IPA_HDR_COOKIE) {
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -888,7 +907,7 @@ int __ipa_del_hdr(u32 hdr_hdl, bool by_user)
htbl->hdr_cnt, entry->offset_entry->offset);
if (by_user && entry->user_deleted) {
- IPAERR("hdr already deleted by user\n");
+ IPAERR_RL("hdr already deleted by user\n");
return -EINVAL;
}
@@ -937,12 +956,12 @@ int ipa2_add_hdr(struct ipa_ioc_add_hdr *hdrs)
int result = -EFAULT;
if (unlikely(!ipa_ctx)) {
- IPAERR("IPA driver was not initialized\n");
+ IPAERR_RL("IPA driver was not initialized\n");
return -EINVAL;
}
if (hdrs == NULL || hdrs->num_hdrs == 0) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -951,7 +970,7 @@ int ipa2_add_hdr(struct ipa_ioc_add_hdr *hdrs)
hdrs->num_hdrs);
for (i = 0; i < hdrs->num_hdrs; i++) {
if (__ipa_add_hdr(&hdrs->hdr[i])) {
- IPAERR("failed to add hdr %d\n", i);
+ IPAERR_RL("failed to add hdr %d\n", i);
hdrs->hdr[i].status = -1;
} else {
hdrs->hdr[i].status = 0;
@@ -992,14 +1011,14 @@ int ipa2_del_hdr_by_user(struct ipa_ioc_del_hdr *hdls, bool by_user)
}
if (hdls == NULL || hdls->num_hdls == 0) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa_ctx->lock);
for (i = 0; i < hdls->num_hdls; i++) {
if (__ipa_del_hdr(hdls->hdl[i].hdl, by_user)) {
- IPAERR("failed to del hdr %i\n", i);
+ IPAERR_RL("failed to del hdr %i\n", i);
hdls->hdl[i].status = -1;
} else {
hdls->hdl[i].status = 0;
@@ -1048,13 +1067,13 @@ int ipa2_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs)
if (ipa_ctx->ipa_hw_type <= IPA_HW_v2_0 ||
ipa_ctx->ipa_hw_type == IPA_HW_v2_6L) {
- IPAERR("Processing context not supported on IPA HW %d\n",
+ IPAERR_RL("Processing context not supported on IPA HW %d\n",
ipa_ctx->ipa_hw_type);
return -EFAULT;
}
if (proc_ctxs == NULL || proc_ctxs->num_proc_ctxs == 0) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -1063,7 +1082,7 @@ int ipa2_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs)
proc_ctxs->num_proc_ctxs);
for (i = 0; i < proc_ctxs->num_proc_ctxs; i++) {
if (__ipa_add_hdr_proc_ctx(&proc_ctxs->proc_ctx[i], true)) {
- IPAERR("failed to add hdr pric ctx %d\n", i);
+ IPAERR_RL("failed to add hdr pric ctx %d\n", i);
proc_ctxs->proc_ctx[i].status = -1;
} else {
proc_ctxs->proc_ctx[i].status = 0;
@@ -1108,14 +1127,14 @@ int ipa2_del_hdr_proc_ctx_by_user(struct ipa_ioc_del_hdr_proc_ctx *hdls,
}
if (hdls == NULL || hdls->num_hdls == 0) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa_ctx->lock);
for (i = 0; i < hdls->num_hdls; i++) {
if (__ipa_del_hdr_proc_ctx(hdls->hdl[i].hdl, true, by_user)) {
- IPAERR("failed to del hdr %i\n", i);
+ IPAERR_RL("failed to del hdr %i\n", i);
hdls->hdl[i].status = -1;
} else {
hdls->hdl[i].status = 0;
@@ -1352,7 +1371,7 @@ int ipa2_get_hdr(struct ipa_ioc_get_hdr *lookup)
}
if (lookup == NULL) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa_ctx->lock);
@@ -1439,13 +1458,13 @@ int ipa2_put_hdr(u32 hdr_hdl)
entry = ipa_id_find(hdr_hdl);
if (entry == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
result = -EINVAL;
goto bail;
}
- if (entry->cookie != IPA_COOKIE) {
- IPAERR("invalid header entry\n");
+ if (entry->cookie != IPA_HDR_COOKIE) {
+ IPAERR_RL("invalid header entry\n");
result = -EINVAL;
goto bail;
}
@@ -1473,7 +1492,7 @@ int ipa2_copy_hdr(struct ipa_ioc_copy_hdr *copy)
int result = -EFAULT;
if (copy == NULL) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa_ctx->lock);
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
index bb11230c960a..bfb1ce56412c 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
@@ -37,7 +37,15 @@
#define DRV_NAME "ipa"
#define NAT_DEV_NAME "ipaNatTable"
+
#define IPA_COOKIE 0x57831603
+#define IPA_RT_RULE_COOKIE 0x57831604
+#define IPA_RT_TBL_COOKIE 0x57831605
+#define IPA_FLT_COOKIE 0x57831606
+#define IPA_HDR_COOKIE 0x57831607
+#define IPA_PROC_HDR_COOKIE 0x57831608
+
+
#define MTU_BYTE 1500
#define IPA_MAX_NUM_PIPES 0x14
@@ -87,6 +95,18 @@
} \
} while (0)
+#define IPAERR_RL(fmt, args...) \
+ do { \
+ pr_err_ratelimited(DRV_NAME " %s:%d " fmt, __func__, \
+ __LINE__, ## args);\
+ if (ipa_ctx) { \
+ IPA_IPC_LOGGING(ipa_ctx->logbuf, \
+ DRV_NAME " %s:%d " fmt, ## args); \
+ IPA_IPC_LOGGING(ipa_ctx->logbuf_low, \
+ DRV_NAME " %s:%d " fmt, ## args); \
+ } \
+ } while (0)
+
#define WLAN_AMPDU_TX_EP 15
#define WLAN_PROD_TX_EP 19
#define WLAN1_CONS_RX_EP 14
@@ -223,8 +243,8 @@ struct ipa_smmu_cb_ctx {
*/
struct ipa_flt_entry {
struct list_head link;
- struct ipa_flt_rule rule;
u32 cookie;
+ struct ipa_flt_rule rule;
struct ipa_flt_tbl *tbl;
struct ipa_rt_tbl *rt_tbl;
u32 hw_len;
@@ -249,13 +269,13 @@ struct ipa_flt_entry {
*/
struct ipa_rt_tbl {
struct list_head link;
+ u32 cookie;
struct list_head head_rt_rule_list;
char name[IPA_RESOURCE_NAME_MAX];
u32 idx;
u32 rule_cnt;
u32 ref_cnt;
struct ipa_rt_tbl_set *set;
- u32 cookie;
bool in_sys;
u32 sz;
struct ipa_mem_buffer curr_mem;
@@ -286,6 +306,7 @@ struct ipa_rt_tbl {
*/
struct ipa_hdr_entry {
struct list_head link;
+ u32 cookie;
u8 hdr[IPA_HDR_MAX_SIZE];
u32 hdr_len;
char name[IPA_RESOURCE_NAME_MAX];
@@ -295,7 +316,6 @@ struct ipa_hdr_entry {
dma_addr_t phys_base;
struct ipa_hdr_proc_ctx_entry *proc_ctx;
struct ipa_hdr_offset_entry *offset_entry;
- u32 cookie;
u32 ref_cnt;
int id;
u8 is_eth2_ofst_valid;
@@ -368,10 +388,10 @@ struct ipa_hdr_proc_ctx_add_hdr_cmd_seq {
*/
struct ipa_hdr_proc_ctx_entry {
struct list_head link;
+ u32 cookie;
enum ipa_hdr_proc_type type;
struct ipa_hdr_proc_ctx_offset_entry *offset_entry;
struct ipa_hdr_entry *hdr;
- u32 cookie;
u32 ref_cnt;
int id;
bool user_deleted;
@@ -427,8 +447,8 @@ struct ipa_flt_tbl {
*/
struct ipa_rt_entry {
struct list_head link;
- struct ipa_rt_rule rule;
u32 cookie;
+ struct ipa_rt_rule rule;
struct ipa_rt_tbl *tbl;
struct ipa_hdr_entry *hdr;
struct ipa_hdr_proc_ctx_entry *proc_ctx;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c b/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c
index 5c07bc7d43b5..9c4fc0ce8cc1 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c
@@ -274,7 +274,7 @@ int ipa_query_intf_tx_props(struct ipa_ioc_query_intf_tx_props *tx)
if (!strncmp(entry->name, tx->name, IPA_RESOURCE_NAME_MAX)) {
/* add the entry check */
if (entry->num_tx_props != tx->num_tx_props) {
- IPAERR("invalid entry number(%u %u)\n",
+ IPAERR_RL("invalid entry number(%u %u)\n",
entry->num_tx_props,
tx->num_tx_props);
mutex_unlock(&ipa_ctx->lock);
@@ -315,7 +315,7 @@ int ipa_query_intf_rx_props(struct ipa_ioc_query_intf_rx_props *rx)
if (!strncmp(entry->name, rx->name, IPA_RESOURCE_NAME_MAX)) {
/* add the entry check */
if (entry->num_rx_props != rx->num_rx_props) {
- IPAERR("invalid entry number(%u %u)\n",
+ IPAERR_RL("invalid entry number(%u %u)\n",
entry->num_rx_props,
rx->num_rx_props);
mutex_unlock(&ipa_ctx->lock);
@@ -356,7 +356,7 @@ int ipa_query_intf_ext_props(struct ipa_ioc_query_intf_ext_props *ext)
if (!strcmp(entry->name, ext->name)) {
/* add the entry check */
if (entry->num_ext_props != ext->num_ext_props) {
- IPAERR("invalid entry number(%u %u)\n",
+ IPAERR_RL("invalid entry number(%u %u)\n",
entry->num_ext_props,
ext->num_ext_props);
mutex_unlock(&ipa_ctx->lock);
@@ -405,13 +405,13 @@ int ipa2_send_msg(struct ipa_msg_meta *meta, void *buff,
if (meta == NULL || (buff == NULL && callback != NULL) ||
(buff != NULL && callback == NULL)) {
- IPAERR("invalid param meta=%p buff=%p, callback=%p\n",
+ IPAERR_RL("invalid param meta=%p buff=%p, callback=%p\n",
meta, buff, callback);
return -EINVAL;
}
if (meta->msg_type >= IPA_EVENT_MAX_NUM) {
- IPAERR("unsupported message type %d\n", meta->msg_type);
+ IPAERR_RL("unsupported message type %d\n", meta->msg_type);
return -EINVAL;
}
@@ -541,15 +541,15 @@ ssize_t ipa_read(struct file *filp, char __user *buf, size_t count,
char __user *start;
struct ipa_push_msg *msg = NULL;
int ret;
- DEFINE_WAIT(wait);
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
int locked;
start = buf;
+ add_wait_queue(&ipa_ctx->msg_waitq, &wait);
while (1) {
mutex_lock(&ipa_ctx->msg_lock);
locked = 1;
- prepare_to_wait(&ipa_ctx->msg_waitq, &wait, TASK_INTERRUPTIBLE);
if (!list_empty(&ipa_ctx->msg_list)) {
msg = list_first_entry(&ipa_ctx->msg_list,
struct ipa_push_msg, link);
@@ -601,10 +601,10 @@ ssize_t ipa_read(struct file *filp, char __user *buf, size_t count,
locked = 0;
mutex_unlock(&ipa_ctx->msg_lock);
- schedule();
+ wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
}
- finish_wait(&ipa_ctx->msg_waitq, &wait);
+ remove_wait_queue(&ipa_ctx->msg_waitq, &wait);
if (start != buf && ret != -EFAULT)
ret = buf - start;
@@ -634,7 +634,7 @@ int ipa_pull_msg(struct ipa_msg_meta *meta, char *buff, size_t count)
int result = -EINVAL;
if (meta == NULL || buff == NULL || !count) {
- IPAERR("invalid param name=%p buff=%p count=%zu\n",
+ IPAERR_RL("invalid param name=%p buff=%p count=%zu\n",
meta, buff, count);
return result;
}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c b/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c
index 5dd8b225217d..e7092e9acbc7 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c
@@ -252,8 +252,8 @@ int ipa2_allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem)
mutex_lock(&nat_ctx->lock);
if (strcmp(mem->dev_name, NAT_DEV_NAME)) {
- IPAERR("Nat device name mismatch\n");
- IPAERR("Expect: %s Recv: %s\n", NAT_DEV_NAME, mem->dev_name);
+ IPAERR_RL("Nat device name mismatch\n");
+ IPAERR_RL("Expect: %s Recv: %s\n", NAT_DEV_NAME, mem->dev_name);
result = -EPERM;
goto bail;
}
@@ -272,7 +272,7 @@ int ipa2_allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem)
if (mem->size <= 0 ||
nat_ctx->is_dev_init == true) {
- IPAERR("Invalid Parameters or device is already init\n");
+ IPAERR_RL("Invalid Parameters or device is already init\n");
result = -EPERM;
goto bail;
}
@@ -335,17 +335,17 @@ int ipa2_nat_init_cmd(struct ipa_ioc_v4_nat_init *init)
/* check for integer overflow */
if (init->ipv4_rules_offset >
- UINT_MAX - (TBL_ENTRY_SIZE * (init->table_entries + 1))) {
- IPAERR("Detected overflow\n");
- return -EPERM;
+ (UINT_MAX - (TBL_ENTRY_SIZE * (init->table_entries + 1)))) {
+ IPAERR_RL("Detected overflow\n");
+ return -EPERM;
}
/* Check Table Entry offset is not
beyond allocated size */
tmp = init->ipv4_rules_offset +
(TBL_ENTRY_SIZE * (init->table_entries + 1));
if (tmp > ipa_ctx->nat_mem.size) {
- IPAERR("Table rules offset not valid\n");
- IPAERR("offset:%d entrys:%d size:%zu mem_size:%zu\n",
+ IPAERR_RL("Table rules offset not valid\n");
+ IPAERR_RL("offset:%d entrys:%d size:%zu mem_size:%zu\n",
init->ipv4_rules_offset, (init->table_entries + 1),
tmp, ipa_ctx->nat_mem.size);
return -EPERM;
@@ -354,16 +354,16 @@ int ipa2_nat_init_cmd(struct ipa_ioc_v4_nat_init *init)
/* check for integer overflow */
if (init->expn_rules_offset >
UINT_MAX - (TBL_ENTRY_SIZE * init->expn_table_entries)) {
- IPAERR("Detected overflow\n");
- return -EPERM;
+ IPAERR_RL("Detected overflow\n");
+ return -EPERM;
}
/* Check Expn Table Entry offset is not
beyond allocated size */
tmp = init->expn_rules_offset +
(TBL_ENTRY_SIZE * init->expn_table_entries);
if (tmp > ipa_ctx->nat_mem.size) {
- IPAERR("Expn Table rules offset not valid\n");
- IPAERR("offset:%d entrys:%d size:%zu mem_size:%zu\n",
+ IPAERR_RL("Expn Table rules offset not valid\n");
+ IPAERR_RL("offset:%d entrys:%d size:%zu mem_size:%zu\n",
init->expn_rules_offset, init->expn_table_entries,
tmp, ipa_ctx->nat_mem.size);
return -EPERM;
@@ -372,16 +372,16 @@ int ipa2_nat_init_cmd(struct ipa_ioc_v4_nat_init *init)
/* check for integer overflow */
if (init->index_offset >
UINT_MAX - (INDX_TBL_ENTRY_SIZE * (init->table_entries + 1))) {
- IPAERR("Detected overflow\n");
- return -EPERM;
+ IPAERR_RL("Detected overflow\n");
+ return -EPERM;
}
/* Check Indx Table Entry offset is not
beyond allocated size */
tmp = init->index_offset +
(INDX_TBL_ENTRY_SIZE * (init->table_entries + 1));
if (tmp > ipa_ctx->nat_mem.size) {
- IPAERR("Indx Table rules offset not valid\n");
- IPAERR("offset:%d entrys:%d size:%zu mem_size:%zu\n",
+ IPAERR_RL("Indx Table rules offset not valid\n");
+ IPAERR_RL("offset:%d entrys:%d size:%zu mem_size:%zu\n",
init->index_offset, (init->table_entries + 1),
tmp, ipa_ctx->nat_mem.size);
return -EPERM;
@@ -389,17 +389,17 @@ int ipa2_nat_init_cmd(struct ipa_ioc_v4_nat_init *init)
/* check for integer overflow */
if (init->index_expn_offset >
- UINT_MAX - (INDX_TBL_ENTRY_SIZE * init->expn_table_entries)) {
- IPAERR("Detected overflow\n");
- return -EPERM;
+ (UINT_MAX - (INDX_TBL_ENTRY_SIZE * init->expn_table_entries))) {
+ IPAERR_RL("Detected overflow\n");
+ return -EPERM;
}
/* Check Expn Table entry offset is not
beyond allocated size */
tmp = init->index_expn_offset +
(INDX_TBL_ENTRY_SIZE * init->expn_table_entries);
if (tmp > ipa_ctx->nat_mem.size) {
- IPAERR("Indx Expn Table rules offset not valid\n");
- IPAERR("offset:%d entrys:%d size:%zu mem_size:%zu\n",
+ IPAERR_RL("Indx Expn Table rules offset not valid\n");
+ IPAERR_RL("offset:%d entrys:%d size:%zu mem_size:%zu\n",
init->index_expn_offset, init->expn_table_entries,
tmp, ipa_ctx->nat_mem.size);
return -EPERM;
@@ -444,16 +444,16 @@ int ipa2_nat_init_cmd(struct ipa_ioc_v4_nat_init *init)
(init->expn_rules_offset > offset) ||
(init->index_offset > offset) ||
(init->index_expn_offset > offset)) {
- IPAERR("Failed due to integer overflow\n");
- IPAERR("nat.mem.dma_handle: 0x%pa\n",
+ IPAERR_RL("Failed due to integer overflow\n");
+ IPAERR_RL("nat.mem.dma_handle: 0x%pa\n",
&ipa_ctx->nat_mem.dma_handle);
- IPAERR("ipv4_rules_offset: 0x%x\n",
+ IPAERR_RL("ipv4_rules_offset: 0x%x\n",
init->ipv4_rules_offset);
- IPAERR("expn_rules_offset: 0x%x\n",
+ IPAERR_RL("expn_rules_offset: 0x%x\n",
init->expn_rules_offset);
- IPAERR("index_offset: 0x%x\n",
+ IPAERR_RL("index_offset: 0x%x\n",
init->index_offset);
- IPAERR("index_expn_offset: 0x%x\n",
+ IPAERR_RL("index_expn_offset: 0x%x\n",
init->index_expn_offset);
result = -EPERM;
goto free_mem;
@@ -509,7 +509,7 @@ int ipa2_nat_init_cmd(struct ipa_ioc_v4_nat_init *init)
desc[1].len = size;
IPADBG("posting v4 init command\n");
if (ipa_send_cmd(2, desc)) {
- IPAERR("Fail to send immediate command\n");
+ IPAERR_RL("Fail to send immediate command\n");
result = -EPERM;
goto free_mem;
}
@@ -574,7 +574,7 @@ int ipa2_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
IPADBG("\n");
if (dma->entries <= 0) {
- IPAERR("Invalid number of commands %d\n",
+ IPAERR_RL("Invalid number of commands %d\n",
dma->entries);
ret = -EPERM;
goto bail;
@@ -582,7 +582,7 @@ int ipa2_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
for (cnt = 0; cnt < dma->entries; cnt++) {
if (dma->dma[cnt].table_index >= 1) {
- IPAERR("Invalid table index %d\n",
+ IPAERR_RL("Invalid table index %d\n",
dma->dma[cnt].table_index);
ret = -EPERM;
goto bail;
@@ -593,7 +593,7 @@ int ipa2_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
if (dma->dma[cnt].offset >=
(ipa_ctx->nat_mem.size_base_tables + 1) *
NAT_TABLE_ENTRY_SIZE_BYTE) {
- IPAERR("Invalid offset %d\n",
+ IPAERR_RL("Invalid offset %d\n",
dma->dma[cnt].offset);
ret = -EPERM;
goto bail;
@@ -605,7 +605,7 @@ int ipa2_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
if (dma->dma[cnt].offset >=
ipa_ctx->nat_mem.size_expansion_tables *
NAT_TABLE_ENTRY_SIZE_BYTE) {
- IPAERR("Invalid offset %d\n",
+ IPAERR_RL("Invalid offset %d\n",
dma->dma[cnt].offset);
ret = -EPERM;
goto bail;
@@ -617,7 +617,7 @@ int ipa2_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
if (dma->dma[cnt].offset >=
(ipa_ctx->nat_mem.size_base_tables + 1) *
NAT_INTEX_TABLE_ENTRY_SIZE_BYTE) {
- IPAERR("Invalid offset %d\n",
+ IPAERR_RL("Invalid offset %d\n",
dma->dma[cnt].offset);
ret = -EPERM;
goto bail;
@@ -629,7 +629,7 @@ int ipa2_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
if (dma->dma[cnt].offset >=
ipa_ctx->nat_mem.size_expansion_tables *
NAT_INTEX_TABLE_ENTRY_SIZE_BYTE) {
- IPAERR("Invalid offset %d\n",
+ IPAERR_RL("Invalid offset %d\n",
dma->dma[cnt].offset);
ret = -EPERM;
goto bail;
@@ -638,7 +638,7 @@ int ipa2_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
break;
default:
- IPAERR("Invalid base_addr %d\n",
+ IPAERR_RL("Invalid base_addr %d\n",
dma->dma[cnt].base_addr);
ret = -EPERM;
goto bail;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
index f2909110d09f..011ca300cc09 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
@@ -853,7 +853,7 @@ int ipa2_query_rt_index(struct ipa_ioc_get_rt_tbl_indx *in)
struct ipa_rt_tbl *entry;
if (in->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -909,7 +909,7 @@ static struct ipa_rt_tbl *__ipa_add_rt_tbl(enum ipa_ip_type ip,
INIT_LIST_HEAD(&entry->link);
strlcpy(entry->name, name, IPA_RESOURCE_NAME_MAX);
entry->set = set;
- entry->cookie = IPA_COOKIE;
+ entry->cookie = IPA_RT_TBL_COOKIE;
entry->in_sys = (ip == IPA_IP_v4) ?
!ipa_ctx->ip4_rt_tbl_lcl : !ipa_ctx->ip6_rt_tbl_lcl;
set->tbl_cnt++;
@@ -922,12 +922,16 @@ static struct ipa_rt_tbl *__ipa_add_rt_tbl(enum ipa_ip_type ip,
if (id < 0) {
IPAERR("failed to add to tree\n");
WARN_ON(1);
+ goto ipa_insert_failed;
}
entry->id = id;
}
return entry;
+ipa_insert_failed:
+ set->tbl_cnt--;
+ list_del(&entry->link);
fail_rt_idx_alloc:
entry->cookie = 0;
kmem_cache_free(ipa_ctx->rt_tbl_cache, entry);
@@ -940,13 +944,13 @@ static int __ipa_del_rt_tbl(struct ipa_rt_tbl *entry)
enum ipa_ip_type ip = IPA_IP_MAX;
u32 id;
- if (entry == NULL || (entry->cookie != IPA_COOKIE)) {
- IPAERR("bad parms\n");
+ if (entry == NULL || (entry->cookie != IPA_RT_TBL_COOKIE)) {
+ IPAERR_RL("bad parms\n");
return -EINVAL;
}
id = entry->id;
if (ipa_id_find(id) == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
return -EPERM;
}
@@ -954,8 +958,11 @@ static int __ipa_del_rt_tbl(struct ipa_rt_tbl *entry)
ip = IPA_IP_v4;
else if (entry->set == &ipa_ctx->rt_tbl_set[IPA_IP_v6])
ip = IPA_IP_v6;
- else
+ else {
WARN_ON(1);
+ return -EPERM;
+ }
+
if (!entry->in_sys) {
list_del(&entry->link);
@@ -994,13 +1001,14 @@ static int __ipa_add_rt_rule(enum ipa_ip_type ip, const char *name,
if (rule->hdr_hdl) {
hdr = ipa_id_find(rule->hdr_hdl);
- if ((hdr == NULL) || (hdr->cookie != IPA_COOKIE)) {
+ if ((hdr == NULL) || (hdr->cookie != IPA_HDR_COOKIE)) {
IPAERR("rt rule does not point to valid hdr\n");
goto error;
}
} else if (rule->hdr_proc_ctx_hdl) {
proc_ctx = ipa_id_find(rule->hdr_proc_ctx_hdl);
- if ((proc_ctx == NULL) || (proc_ctx->cookie != IPA_COOKIE)) {
+ if ((proc_ctx == NULL) ||
+ (proc_ctx->cookie != IPA_PROC_HDR_COOKIE)) {
IPAERR("rt rule does not point to valid proc ctx\n");
goto error;
}
@@ -1008,7 +1016,7 @@ static int __ipa_add_rt_rule(enum ipa_ip_type ip, const char *name,
tbl = __ipa_add_rt_tbl(ip, name);
- if (tbl == NULL || (tbl->cookie != IPA_COOKIE)) {
+ if (tbl == NULL || (tbl->cookie != IPA_RT_TBL_COOKIE)) {
IPAERR("bad params\n");
goto error;
}
@@ -1029,7 +1037,7 @@ static int __ipa_add_rt_rule(enum ipa_ip_type ip, const char *name,
goto error;
}
INIT_LIST_HEAD(&entry->link);
- entry->cookie = IPA_COOKIE;
+ entry->cookie = IPA_RT_RULE_COOKIE;
entry->rule = *rule;
entry->tbl = tbl;
entry->hdr = hdr;
@@ -1082,7 +1090,7 @@ int ipa2_add_rt_rule(struct ipa_ioc_add_rt_rule *rules)
int ret;
if (rules == NULL || rules->num_rules == 0 || rules->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -1092,7 +1100,7 @@ int ipa2_add_rt_rule(struct ipa_ioc_add_rt_rule *rules)
&rules->rules[i].rule,
rules->rules[i].at_rear,
&rules->rules[i].rt_rule_hdl)) {
- IPAERR("failed to add rt rule %d\n", i);
+ IPAERR_RL("failed to add rt rule %d\n", i);
rules->rules[i].status = IPA_RT_STATUS_OF_ADD_FAILED;
} else {
rules->rules[i].status = 0;
@@ -1119,12 +1127,12 @@ int __ipa_del_rt_rule(u32 rule_hdl)
entry = ipa_id_find(rule_hdl);
if (entry == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
return -EINVAL;
}
- if (entry->cookie != IPA_COOKIE) {
- IPAERR("bad params\n");
+ if (entry->cookie != IPA_RT_RULE_COOKIE) {
+ IPAERR_RL("bad params\n");
return -EINVAL;
}
@@ -1138,7 +1146,7 @@ int __ipa_del_rt_rule(u32 rule_hdl)
entry->tbl->rule_cnt);
if (entry->tbl->rule_cnt == 0 && entry->tbl->ref_cnt == 0) {
if (__ipa_del_rt_tbl(entry->tbl))
- IPAERR("fail to del RT tbl\n");
+ IPAERR_RL("fail to del RT tbl\n");
}
entry->cookie = 0;
id = entry->id;
@@ -1165,14 +1173,14 @@ int ipa2_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls)
int ret;
if (hdls == NULL || hdls->num_hdls == 0 || hdls->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa_ctx->lock);
for (i = 0; i < hdls->num_hdls; i++) {
if (__ipa_del_rt_rule(hdls->hdl[i].hdl)) {
- IPAERR("failed to del rt rule %i\n", i);
+ IPAERR_RL("failed to del rt rule %i\n", i);
hdls->hdl[i].status = IPA_RT_STATUS_OF_DEL_FAILED;
} else {
hdls->hdl[i].status = 0;
@@ -1205,7 +1213,7 @@ int ipa2_commit_rt(enum ipa_ip_type ip)
int ret;
if (ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -1249,7 +1257,7 @@ int ipa2_reset_rt(enum ipa_ip_type ip)
int id;
if (ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -1267,7 +1275,7 @@ int ipa2_reset_rt(enum ipa_ip_type ip)
* filtering rules point to routing tables
*/
if (ipa2_reset_flt(ip))
- IPAERR("fail to reset flt ip=%d\n", ip);
+ IPAERR_RL("fail to reset flt ip=%d\n", ip);
set = &ipa_ctx->rt_tbl_set[ip];
rset = &ipa_ctx->reap_rt_tbl_set[ip];
@@ -1353,14 +1361,14 @@ int ipa2_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup)
int result = -EFAULT;
if (lookup == NULL || lookup->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa_ctx->lock);
entry = __ipa_find_rt_tbl(lookup->ip, lookup->name);
- if (entry && entry->cookie == IPA_COOKIE) {
+ if (entry && entry->cookie == IPA_RT_TBL_COOKIE) {
if (entry->ref_cnt == U32_MAX) {
- IPAERR("fail: ref count crossed limit\n");
+ IPAERR_RL("fail: ref count crossed limit\n");
goto ret;
}
entry->ref_cnt++;
@@ -1368,7 +1376,7 @@ int ipa2_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup)
/* commit for get */
if (ipa_ctx->ctrl->ipa_commit_rt(lookup->ip))
- IPAERR("fail to commit RT tbl\n");
+ IPAERR_RL("fail to commit RT tbl\n");
result = 0;
}
@@ -1396,13 +1404,13 @@ int ipa2_put_rt_tbl(u32 rt_tbl_hdl)
mutex_lock(&ipa_ctx->lock);
entry = ipa_id_find(rt_tbl_hdl);
if (entry == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
result = -EINVAL;
goto ret;
}
- if ((entry->cookie != IPA_COOKIE) || entry->ref_cnt == 0) {
- IPAERR("bad parms\n");
+ if ((entry->cookie != IPA_RT_TBL_COOKIE) || entry->ref_cnt == 0) {
+ IPAERR_RL("bad parms\n");
result = -EINVAL;
goto ret;
}
@@ -1411,16 +1419,19 @@ int ipa2_put_rt_tbl(u32 rt_tbl_hdl)
ip = IPA_IP_v4;
else if (entry->set == &ipa_ctx->rt_tbl_set[IPA_IP_v6])
ip = IPA_IP_v6;
- else
+ else {
WARN_ON(1);
+ result = -EINVAL;
+ goto ret;
+ }
entry->ref_cnt--;
if (entry->ref_cnt == 0 && entry->rule_cnt == 0) {
if (__ipa_del_rt_tbl(entry))
- IPAERR("fail to del RT tbl\n");
+ IPAERR_RL("fail to del RT tbl\n");
/* commit for put */
if (ipa_ctx->ctrl->ipa_commit_rt(ip))
- IPAERR("fail to commit RT tbl\n");
+ IPAERR_RL("fail to commit RT tbl\n");
}
result = 0;
@@ -1439,20 +1450,20 @@ static int __ipa_mdfy_rt_rule(struct ipa_rt_rule_mdfy *rtrule)
if (rtrule->rule.hdr_hdl) {
hdr = ipa_id_find(rtrule->rule.hdr_hdl);
- if ((hdr == NULL) || (hdr->cookie != IPA_COOKIE)) {
- IPAERR("rt rule does not point to valid hdr\n");
+ if ((hdr == NULL) || (hdr->cookie != IPA_HDR_COOKIE)) {
+ IPAERR_RL("rt rule does not point to valid hdr\n");
goto error;
}
}
entry = ipa_id_find(rtrule->rt_rule_hdl);
if (entry == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
goto error;
}
- if (entry->cookie != IPA_COOKIE) {
- IPAERR("bad params\n");
+ if (entry->cookie != IPA_RT_RULE_COOKIE) {
+ IPAERR_RL("bad params\n");
goto error;
}
@@ -1485,14 +1496,14 @@ int ipa2_mdfy_rt_rule(struct ipa_ioc_mdfy_rt_rule *hdls)
int result;
if (hdls == NULL || hdls->num_rules == 0 || hdls->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa_ctx->lock);
for (i = 0; i < hdls->num_rules; i++) {
if (__ipa_mdfy_rt_rule(&hdls->rules[i])) {
- IPAERR("failed to mdfy rt rule %i\n", i);
+ IPAERR_RL("failed to mdfy rt rule %i\n", i);
hdls->rules[i].status = IPA_RT_STATUS_OF_MDFY_FAILED;
} else {
hdls->rules[i].status = 0;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c
index 56b4bf1a2d1e..23f802425cf0 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c
@@ -1672,7 +1672,7 @@ int ipa_write_qmapid_wdi_pipe(u32 clnt_hdl, u8 qmap_id)
if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
ipa_ctx->ep[clnt_hdl].valid == 0) {
- IPAERR("bad parm, %d\n", clnt_hdl);
+ IPAERR_RL("bad parm, %d\n", clnt_hdl);
return -EINVAL;
}
@@ -1685,7 +1685,7 @@ int ipa_write_qmapid_wdi_pipe(u32 clnt_hdl, u8 qmap_id)
ep = &ipa_ctx->ep[clnt_hdl];
if (!(ep->uc_offload_state & IPA_WDI_CONNECTED)) {
- IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
+ IPAERR_RL("WDI channel bad state %d\n", ep->uc_offload_state);
return -EFAULT;
}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
index e0200fe50871..50a8e46d3b12 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
@@ -918,7 +918,7 @@ int ipa2_get_ep_mapping(enum ipa_client_type client)
}
if (client >= IPA_CLIENT_MAX || client < 0) {
- IPAERR("Bad client number! client =%d\n", client);
+ IPAERR_RL("Bad client number! client =%d\n", client);
return INVALID_EP_MAPPING_INDEX;
}
@@ -1769,7 +1769,7 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_NEXT_HDR ||
attrib->attrib_mask & IPA_FLT_TC || attrib->attrib_mask &
IPA_FLT_FLOW_LABEL) {
- IPAERR("v6 attrib's specified for v4 rule\n");
+ IPAERR_RL("v6 attrib's specified for v4 rule\n");
return -EPERM;
}
@@ -1781,7 +1781,7 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
if (ipa_ofst_meq32[ofst_meq32] == -1) {
- IPAERR("ran out of meq32 eq\n");
+ IPAERR_RL("ran out of meq32 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq32[ofst_meq32];
@@ -1801,7 +1801,7 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
if (ipa_ofst_meq32[ofst_meq32] == -1) {
- IPAERR("ran out of meq32 eq\n");
+ IPAERR_RL("ran out of meq32 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq32[ofst_meq32];
@@ -1815,7 +1815,7 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
if (ipa_ofst_meq32[ofst_meq32] == -1) {
- IPAERR("ran out of meq32 eq\n");
+ IPAERR_RL("ran out of meq32 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq32[ofst_meq32];
@@ -1829,11 +1829,11 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
- IPAERR("ran out of ihl_rng16 eq\n");
+ IPAERR_RL("ran out of ihl_rng16 eq\n");
return -EPERM;
}
if (attrib->src_port_hi < attrib->src_port_lo) {
- IPAERR("bad src port range param\n");
+ IPAERR_RL("bad src port range param\n");
return -EPERM;
}
*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
@@ -1847,11 +1847,11 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
- IPAERR("ran out of ihl_rng16 eq\n");
+ IPAERR_RL("ran out of ihl_rng16 eq\n");
return -EPERM;
}
if (attrib->dst_port_hi < attrib->dst_port_lo) {
- IPAERR("bad dst port range param\n");
+ IPAERR_RL("bad dst port range param\n");
return -EPERM;
}
*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
@@ -1865,7 +1865,7 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_TYPE) {
if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
- IPAERR("ran out of ihl_meq32 eq\n");
+ IPAERR_RL("ran out of ihl_meq32 eq\n");
return -EPERM;
}
*en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
@@ -1878,7 +1878,7 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_CODE) {
if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
- IPAERR("ran out of ihl_meq32 eq\n");
+ IPAERR_RL("ran out of ihl_meq32 eq\n");
return -EPERM;
}
*en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
@@ -1891,7 +1891,7 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_SPI) {
if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
- IPAERR("ran out of ihl_meq32 eq\n");
+ IPAERR_RL("ran out of ihl_meq32 eq\n");
return -EPERM;
}
*en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
@@ -1905,7 +1905,7 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
- IPAERR("ran out of ihl_rng16 eq\n");
+ IPAERR_RL("ran out of ihl_rng16 eq\n");
return -EPERM;
}
*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
@@ -1919,7 +1919,7 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
- IPAERR("ran out of ihl_rng16 eq\n");
+ IPAERR_RL("ran out of ihl_rng16 eq\n");
return -EPERM;
}
*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
@@ -1946,7 +1946,7 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) {
if (ipa_ofst_meq128[ofst_meq128] == -1) {
- IPAERR("ran out of meq128 eq\n");
+ IPAERR_RL("ran out of meq128 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq128[ofst_meq128];
@@ -1961,7 +1961,7 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) {
if (ipa_ofst_meq128[ofst_meq128] == -1) {
- IPAERR("ran out of meq128 eq\n");
+ IPAERR_RL("ran out of meq128 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq128[ofst_meq128];
@@ -1976,7 +1976,7 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) {
if (ipa_ofst_meq128[ofst_meq128] == -1) {
- IPAERR("ran out of meq128 eq\n");
+ IPAERR_RL("ran out of meq128 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq128[ofst_meq128];
@@ -1991,7 +1991,7 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) {
if (ipa_ofst_meq128[ofst_meq128] == -1) {
- IPAERR("ran out of meq128 eq\n");
+ IPAERR_RL("ran out of meq128 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq128[ofst_meq128];
@@ -2006,7 +2006,7 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
if (ipa_ofst_meq32[ofst_meq32] == -1) {
- IPAERR("ran out of meq128 eq\n");
+ IPAERR_RL("ran out of meq128 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq32[ofst_meq32];
@@ -2024,7 +2024,7 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
/* error check */
if (attrib->attrib_mask & IPA_FLT_TOS ||
attrib->attrib_mask & IPA_FLT_PROTOCOL) {
- IPAERR("v4 attrib's specified for v6 rule\n");
+ IPAERR_RL("v4 attrib's specified for v6 rule\n");
return -EPERM;
}
@@ -2036,7 +2036,7 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_TYPE) {
if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
- IPAERR("ran out of ihl_meq32 eq\n");
+ IPAERR_RL("ran out of ihl_meq32 eq\n");
return -EPERM;
}
*en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
@@ -2049,7 +2049,7 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_CODE) {
if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
- IPAERR("ran out of ihl_meq32 eq\n");
+ IPAERR_RL("ran out of ihl_meq32 eq\n");
return -EPERM;
}
*en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
@@ -2062,7 +2062,7 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_SPI) {
if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
- IPAERR("ran out of ihl_meq32 eq\n");
+ IPAERR_RL("ran out of ihl_meq32 eq\n");
return -EPERM;
}
*en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
@@ -2076,7 +2076,7 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
- IPAERR("ran out of ihl_rng16 eq\n");
+ IPAERR_RL("ran out of ihl_rng16 eq\n");
return -EPERM;
}
*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
@@ -2090,7 +2090,7 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
- IPAERR("ran out of ihl_rng16 eq\n");
+ IPAERR_RL("ran out of ihl_rng16 eq\n");
return -EPERM;
}
*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
@@ -2104,11 +2104,11 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
- IPAERR("ran out of ihl_rng16 eq\n");
+ IPAERR_RL("ran out of ihl_rng16 eq\n");
return -EPERM;
}
if (attrib->src_port_hi < attrib->src_port_lo) {
- IPAERR("bad src port range param\n");
+ IPAERR_RL("bad src port range param\n");
return -EPERM;
}
*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
@@ -2122,11 +2122,11 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
- IPAERR("ran out of ihl_rng16 eq\n");
+ IPAERR_RL("ran out of ihl_rng16 eq\n");
return -EPERM;
}
if (attrib->dst_port_hi < attrib->dst_port_lo) {
- IPAERR("bad dst port range param\n");
+ IPAERR_RL("bad dst port range param\n");
return -EPERM;
}
*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
@@ -2140,7 +2140,7 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
if (ipa_ofst_meq128[ofst_meq128] == -1) {
- IPAERR("ran out of meq128 eq\n");
+ IPAERR_RL("ran out of meq128 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq128[ofst_meq128];
@@ -2166,7 +2166,7 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
if (ipa_ofst_meq128[ofst_meq128] == -1) {
- IPAERR("ran out of meq128 eq\n");
+ IPAERR_RL("ran out of meq128 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq128[ofst_meq128];
@@ -2198,7 +2198,7 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
if (ipa_ofst_meq128[ofst_meq128] == -1) {
- IPAERR("ran out of meq128 eq\n");
+ IPAERR_RL("ran out of meq128 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq128[ofst_meq128];
@@ -2243,7 +2243,7 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) {
if (ipa_ofst_meq128[ofst_meq128] == -1) {
- IPAERR("ran out of meq128 eq\n");
+ IPAERR_RL("ran out of meq128 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq128[ofst_meq128];
@@ -2258,7 +2258,7 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) {
if (ipa_ofst_meq128[ofst_meq128] == -1) {
- IPAERR("ran out of meq128 eq\n");
+ IPAERR_RL("ran out of meq128 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq128[ofst_meq128];
@@ -2273,7 +2273,7 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) {
if (ipa_ofst_meq128[ofst_meq128] == -1) {
- IPAERR("ran out of meq128 eq\n");
+ IPAERR_RL("ran out of meq128 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq128[ofst_meq128];
@@ -2288,7 +2288,7 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) {
if (ipa_ofst_meq128[ofst_meq128] == -1) {
- IPAERR("ran out of meq128 eq\n");
+ IPAERR_RL("ran out of meq128 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq128[ofst_meq128];
@@ -2303,7 +2303,7 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
if (ipa_ofst_meq32[ofst_meq32] == -1) {
- IPAERR("ran out of meq128 eq\n");
+ IPAERR_RL("ran out of meq128 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq32[ofst_meq32];
@@ -2316,7 +2316,7 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
}
} else {
- IPAERR("unsupported ip %d\n", ip);
+ IPAERR_RL("unsupported ip %d\n", ip);
return -EPERM;
}
@@ -2326,7 +2326,7 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
*/
if (attrib->attrib_mask == 0) {
if (ipa_ofst_meq32[ofst_meq32] == -1) {
- IPAERR("ran out of meq32 eq\n");
+ IPAERR_RL("ran out of meq32 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq32[ofst_meq32];
@@ -3617,19 +3617,19 @@ int ipa2_write_qmap_id(struct ipa_ioc_write_qmapid *param_in)
}
if (param_in->client >= IPA_CLIENT_MAX) {
- IPAERR("bad parm client:%d\n", param_in->client);
+ IPAERR_RL("bad parm client:%d\n", param_in->client);
goto fail;
}
ipa_ep_idx = ipa2_get_ep_mapping(param_in->client);
if (ipa_ep_idx == -1) {
- IPAERR("Invalid client.\n");
+ IPAERR_RL("Invalid client.\n");
goto fail;
}
ep = &ipa_ctx->ep[ipa_ep_idx];
if (!ep->valid) {
- IPAERR("EP not allocated.\n");
+ IPAERR_RL("EP not allocated.\n");
goto fail;
}
@@ -3642,7 +3642,7 @@ int ipa2_write_qmap_id(struct ipa_ioc_write_qmapid *param_in)
ipa_ctx->ep[ipa_ep_idx].cfg.meta = meta;
result = ipa_write_qmapid_wdi_pipe(ipa_ep_idx, meta.qmap_id);
if (result)
- IPAERR("qmap_id %d write failed on ep=%d\n",
+ IPAERR_RL("qmap_id %d write failed on ep=%d\n",
meta.qmap_id, ipa_ep_idx);
result = 0;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index a50cd0b807a2..62e263a139cc 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -603,7 +603,7 @@ static int ipa3_send_wan_msg(unsigned long usr_param, uint8_t msg_type)
msg_meta.msg_len = sizeof(struct ipa_wan_msg);
retval = ipa3_send_msg(&msg_meta, wan_msg, ipa3_wan_msg_free_cb);
if (retval) {
- IPAERR("ipa3_send_msg failed: %d\n", retval);
+ IPAERR_RL("ipa3_send_msg failed: %d\n", retval);
kfree(wan_msg);
return retval;
}
@@ -695,7 +695,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_nat_dma_cmd *)param)->entries
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_nat_dma_cmd *)param)->entries,
pre_entry);
retval = -EFAULT;
@@ -742,7 +742,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_add_hdr *)param)->num_hdrs
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_add_hdr *)param)->num_hdrs,
pre_entry);
retval = -EFAULT;
@@ -781,7 +781,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_del_hdr *)param)->num_hdls
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_del_hdr *)param)->num_hdls,
pre_entry);
retval = -EFAULT;
@@ -821,7 +821,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_add_rt_rule *)param)->num_rules
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_add_rt_rule *)param)->
num_rules,
pre_entry);
@@ -861,7 +861,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_add_rt_rule_after *)param)->
num_rules != pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_add_rt_rule_after *)param)->
num_rules,
pre_entry);
@@ -903,7 +903,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_mdfy_rt_rule *)param)->num_rules
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_mdfy_rt_rule *)param)->
num_rules,
pre_entry);
@@ -943,7 +943,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_del_rt_rule *)param)->num_hdls
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_del_rt_rule *)param)->num_hdls,
pre_entry);
retval = -EFAULT;
@@ -982,7 +982,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_add_flt_rule *)param)->num_rules
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_add_flt_rule *)param)->
num_rules,
pre_entry);
@@ -1024,7 +1024,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_add_flt_rule_after *)param)->
num_rules != pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_add_flt_rule_after *)param)->
num_rules,
pre_entry);
@@ -1065,7 +1065,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_del_flt_rule *)param)->num_hdls
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_del_flt_rule *)param)->
num_hdls,
pre_entry);
@@ -1105,7 +1105,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_mdfy_flt_rule *)param)->num_rules
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_mdfy_flt_rule *)param)->
num_rules,
pre_entry);
@@ -1243,7 +1243,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (unlikely(((struct ipa_ioc_query_intf_tx_props *)
param)->num_tx_props
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_query_intf_tx_props *)
param)->num_tx_props, pre_entry);
retval = -EFAULT;
@@ -1288,7 +1288,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_query_intf_rx_props *)
param)->num_rx_props != pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_query_intf_rx_props *)
param)->num_rx_props, pre_entry);
retval = -EFAULT;
@@ -1333,7 +1333,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_query_intf_ext_props *)
param)->num_ext_props != pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_query_intf_ext_props *)
param)->num_ext_props, pre_entry);
retval = -EFAULT;
@@ -1371,7 +1371,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_msg_meta *)param)->msg_len
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_msg_meta *)param)->msg_len,
pre_entry);
retval = -EFAULT;
@@ -1511,7 +1511,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_add_hdr_proc_ctx *)
param)->num_proc_ctxs != pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_add_hdr_proc_ctx *)
param)->num_proc_ctxs, pre_entry);
retval = -EFAULT;
@@ -1550,7 +1550,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_del_hdr_proc_ctx *)
param)->num_hdls != pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_del_hdr_proc_ctx *)param)->
num_hdls,
pre_entry);
@@ -1654,6 +1654,21 @@ int ipa3_setup_dflt_rt_tables(void)
return 0;
}
+static int ipa3_clkon_cfg_wa(void)
+{
+ struct ipahal_reg_clkon_cfg clkon_cfg = { 0 };
+ int ret = 0;
+
+ clkon_cfg.cgc_open_misc = 1;
+
+ if (ipa3_cfg_clkon_cfg(&clkon_cfg)) {
+ IPAERR("fail to set cgc_open_misc = 1\n");
+ ret = -EPERM;
+ }
+
+ return ret;
+}
+
static int ipa3_setup_exception_path(void)
{
struct ipa_ioc_add_hdr *hdr;
@@ -3962,6 +3977,8 @@ static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p,
/* Prevent consequent calls from trying to load the FW again. */
if (ipa3_ctx->ipa_initialization_complete)
return 0;
+ /* move proxy vote for modem on ipa3_post_init */
+ IPA_ACTIVE_CLIENTS_INC_SPECIAL("PROXY_CLK_VOTE");
if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
memset(&gsi_props, 0, sizeof(gsi_props));
@@ -4063,8 +4080,14 @@ static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p,
ipa3_trigger_ipa_ready_cbs();
complete_all(&ipa3_ctx->init_completion_obj);
- pr_info("IPA driver initialization was successful.\n");
+ /* WA to disable MISC clock gating for IPA_HW_v3_1 */
+ if (ipa3_ctx->ipa_hw_type == IPA_HW_v3_1) {
+ pr_info(" WA to set cgc_open_misc = 1\n");
+ ipa3_clkon_cfg_wa();
+ }
+
+ pr_info("IPA driver initialization was successful\n");
return 0;
fail_teth_bridge_driver_init:
@@ -4275,7 +4298,6 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
int i;
struct ipa3_flt_tbl *flt_tbl;
struct ipa3_rt_tbl_set *rset;
- struct ipa_active_client_logging_info log_info;
IPADBG("IPA Driver initialization started\n");
@@ -4465,8 +4487,7 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
mutex_init(&ipa3_ctx->ipa3_active_clients.mutex);
spin_lock_init(&ipa3_ctx->ipa3_active_clients.spinlock);
- IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, "PROXY_CLK_VOTE");
- ipa3_active_clients_log_inc(&log_info, false);
+ /* move proxy vote for modem to ipa3_post_init() */
ipa3_ctx->ipa3_active_clients.cnt = 1;
/* Assign resource limitation to each group */
@@ -4702,7 +4723,6 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
init_completion(&ipa3_ctx->init_completion_obj);
init_completion(&ipa3_ctx->uc_loaded_completion_obj);
-
/*
* For GSI, we can't register the GSI driver yet, as it expects
* the GSI FW to be up and running before the registration.
@@ -4745,6 +4765,8 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
IPADBG("ipa cdev added successful. major:%d minor:%d\n",
MAJOR(ipa3_ctx->dev_num),
MINOR(ipa3_ctx->dev_num));
+ /* proxy vote for motem is added in ipa3_post_init() phase */
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return 0;
fail_cdev_add:
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
index bc6622d4725b..8c6bd48cfb2c 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
@@ -1254,6 +1254,12 @@ int ipa3_request_gsi_channel(struct ipa_request_gsi_channel_params *params,
memset(gsi_ep_cfg_ptr, 0, sizeof(struct ipa_gsi_ep_config));
gsi_ep_cfg_ptr = ipa_get_gsi_ep_info(ipa_ep_idx);
+ if (gsi_ep_cfg_ptr == NULL) {
+ IPAERR("Error ipa_get_gsi_ep_info ret NULL\n");
+ result = -EFAULT;
+ goto write_evt_scratch_fail;
+ }
+
params->chan_params.evt_ring_hdl = ep->gsi_evt_ring_hdl;
params->chan_params.ch_id = gsi_ep_cfg_ptr->ipa_gsi_chan_num;
gsi_res = gsi_alloc_channel(&params->chan_params, gsi_dev_hdl,
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index 6a3a89e2cf8d..034b7fc45d8d 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -1188,6 +1188,15 @@ static void ipa3_handle_rx(struct ipa3_sys_context *sys)
} else {
inactive_cycles = 0;
}
+
+ /*
+ * if pipe is out of buffers there is no point polling for
+ * completed descs; release the worker so delayed work can
+ * run in a timely manner
+ */
+ if (sys->len == 0)
+ break;
+
} while (inactive_cycles <= POLLING_INACTIVITY_RX);
trace_poll_to_intr3(sys->ep->client);
@@ -2084,6 +2093,8 @@ static void ipa3_cleanup_wlan_rx_common_cache(void)
struct ipa3_rx_pkt_wrapper *rx_pkt;
struct ipa3_rx_pkt_wrapper *tmp;
+ spin_lock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
+
list_for_each_entry_safe(rx_pkt, tmp,
&ipa3_ctx->wc_memb.wlan_comm_desc_list, link) {
list_del(&rx_pkt->link);
@@ -2104,6 +2115,8 @@ static void ipa3_cleanup_wlan_rx_common_cache(void)
IPAERR("wlan comm buff total cnt: %d\n",
ipa3_ctx->wc_memb.wlan_comm_total_cnt);
+ spin_unlock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
+
}
static void ipa3_alloc_wlan_rx_common_cache(u32 size)
@@ -2141,11 +2154,13 @@ static void ipa3_alloc_wlan_rx_common_cache(u32 size)
goto fail_dma_mapping;
}
+ spin_lock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
list_add_tail(&rx_pkt->link,
&ipa3_ctx->wc_memb.wlan_comm_desc_list);
rx_len_cached = ++ipa3_ctx->wc_memb.wlan_comm_total_cnt;
ipa3_ctx->wc_memb.wlan_comm_free_cnt++;
+ spin_unlock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
index 41b29335d23b..c2fb87ab757b 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
@@ -745,7 +745,7 @@ static int __ipa_validate_flt_rule(const struct ipa_flt_rule *rule,
goto error;
}
- if ((*rt_tbl)->cookie != IPA_COOKIE) {
+ if ((*rt_tbl)->cookie != IPA_RT_TBL_COOKIE) {
IPAERR("RT table cookie is invalid\n");
goto error;
}
@@ -787,7 +787,7 @@ static int __ipa_create_flt_entry(struct ipa3_flt_entry **entry,
}
INIT_LIST_HEAD(&((*entry)->link));
(*entry)->rule = *rule;
- (*entry)->cookie = IPA_COOKIE;
+ (*entry)->cookie = IPA_FLT_COOKIE;
(*entry)->rt_tbl = rt_tbl;
(*entry)->tbl = tbl;
if (rule->rule_id) {
@@ -822,12 +822,18 @@ static int __ipa_finish_flt_rule_add(struct ipa3_flt_tbl *tbl,
if (id < 0) {
IPAERR("failed to add to tree\n");
WARN_ON(1);
+ goto ipa_insert_failed;
}
*rule_hdl = id;
entry->id = id;
IPADBG_LOW("add flt rule rule_cnt=%d\n", tbl->rule_cnt);
return 0;
+ipa_insert_failed:
+ if (entry->rt_tbl)
+ entry->rt_tbl->ref_cnt--;
+ tbl->rule_cnt--;
+ return -EPERM;
}
static int __ipa_add_flt_rule(struct ipa3_flt_tbl *tbl, enum ipa_ip_type ip,
@@ -853,9 +859,16 @@ static int __ipa_add_flt_rule(struct ipa3_flt_tbl *tbl, enum ipa_ip_type ip,
list_add(&entry->link, &tbl->head_flt_rule_list);
}
- __ipa_finish_flt_rule_add(tbl, entry, rule_hdl);
+ if (__ipa_finish_flt_rule_add(tbl, entry, rule_hdl))
+ goto ipa_insert_failed;
return 0;
+ipa_insert_failed:
+ list_del(&entry->link);
+ /* if rule id was allocated from idr, remove it */
+ if (!(entry->rule_id & ipahal_get_rule_id_hi_bit()))
+ idr_remove(&entry->tbl->rule_ids, entry->rule_id);
+ kmem_cache_free(ipa3_ctx->flt_rule_cache, entry);
error:
return -EPERM;
@@ -874,7 +887,7 @@ static int __ipa_add_flt_rule_after(struct ipa3_flt_tbl *tbl,
goto error;
if (rule == NULL || rule_hdl == NULL) {
- IPAERR("bad parms rule=%p rule_hdl=%p\n", rule,
+ IPAERR_RL("bad parms rule=%p rule_hdl=%p\n", rule,
rule_hdl);
goto error;
}
@@ -887,7 +900,8 @@ static int __ipa_add_flt_rule_after(struct ipa3_flt_tbl *tbl,
list_add(&entry->link, &((*add_after_entry)->link));
- __ipa_finish_flt_rule_add(tbl, entry, rule_hdl);
+ if (__ipa_finish_flt_rule_add(tbl, entry, rule_hdl))
+ goto ipa_insert_failed;
/*
* prepare for next insertion
@@ -896,6 +910,13 @@ static int __ipa_add_flt_rule_after(struct ipa3_flt_tbl *tbl,
return 0;
+ipa_insert_failed:
+ list_del(&entry->link);
+ /* if rule id was allocated from idr, remove it */
+ if (!(entry->rule_id & ipahal_get_rule_id_hi_bit()))
+ idr_remove(&entry->tbl->rule_ids, entry->rule_id);
+ kmem_cache_free(ipa3_ctx->flt_rule_cache, entry);
+
error:
*add_after_entry = NULL;
return -EPERM;
@@ -908,12 +929,12 @@ static int __ipa_del_flt_rule(u32 rule_hdl)
entry = ipa3_id_find(rule_hdl);
if (entry == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
return -EINVAL;
}
- if (entry->cookie != IPA_COOKIE) {
- IPAERR("bad params\n");
+ if (entry->cookie != IPA_FLT_COOKIE) {
+ IPAERR_RL("bad params\n");
return -EINVAL;
}
id = entry->id;
@@ -945,12 +966,12 @@ static int __ipa_mdfy_flt_rule(struct ipa_flt_rule_mdfy *frule,
entry = ipa3_id_find(frule->rule_hdl);
if (entry == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
goto error;
}
- if (entry->cookie != IPA_COOKIE) {
- IPAERR("bad params\n");
+ if (entry->cookie != IPA_FLT_COOKIE) {
+ IPAERR_RL("bad params\n");
goto error;
}
@@ -960,25 +981,25 @@ static int __ipa_mdfy_flt_rule(struct ipa_flt_rule_mdfy *frule,
if (frule->rule.action != IPA_PASS_TO_EXCEPTION) {
if (!frule->rule.eq_attrib_type) {
if (!frule->rule.rt_tbl_hdl) {
- IPAERR("invalid RT tbl\n");
+ IPAERR_RL("invalid RT tbl\n");
goto error;
}
rt_tbl = ipa3_id_find(frule->rule.rt_tbl_hdl);
if (rt_tbl == NULL) {
- IPAERR("RT tbl not found\n");
+ IPAERR_RL("RT tbl not found\n");
goto error;
}
- if (rt_tbl->cookie != IPA_COOKIE) {
- IPAERR("RT table cookie is invalid\n");
+ if (rt_tbl->cookie != IPA_RT_TBL_COOKIE) {
+ IPAERR_RL("RT table cookie is invalid\n");
goto error;
}
} else {
if (frule->rule.rt_tbl_idx > ((ip == IPA_IP_v4) ?
IPA_MEM_PART(v4_modem_rt_index_hi) :
IPA_MEM_PART(v6_modem_rt_index_hi))) {
- IPAERR("invalid RT tbl\n");
+ IPAERR_RL("invalid RT tbl\n");
goto error;
}
}
@@ -1023,7 +1044,7 @@ static int __ipa_add_ep_flt_rule(enum ipa_ip_type ip, enum ipa_client_type ep,
int ipa_ep_idx;
if (rule == NULL || rule_hdl == NULL || ep >= IPA_CLIENT_MAX) {
- IPAERR("bad parms rule=%p rule_hdl=%p ep=%d\n", rule,
+ IPAERR_RL("bad parms rule=%p rule_hdl=%p ep=%d\n", rule,
rule_hdl, ep);
return -EINVAL;
@@ -1053,7 +1074,7 @@ int ipa3_add_flt_rule(struct ipa_ioc_add_flt_rule *rules)
if (rules == NULL || rules->num_rules == 0 ||
rules->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -1068,7 +1089,7 @@ int ipa3_add_flt_rule(struct ipa_ioc_add_flt_rule *rules)
result = -1;
if (result) {
- IPAERR("failed to add flt rule %d\n", i);
+ IPAERR_RL("failed to add flt rule %d\n", i);
rules->rules[i].status = IPA_FLT_STATUS_OF_ADD_FAILED;
} else {
rules->rules[i].status = 0;
@@ -1076,7 +1097,7 @@ int ipa3_add_flt_rule(struct ipa_ioc_add_flt_rule *rules)
}
if (rules->global) {
- IPAERR("no support for global filter rules\n");
+ IPAERR_RL("no support for global filter rules\n");
result = -EPERM;
goto bail;
}
@@ -1111,12 +1132,12 @@ int ipa3_add_flt_rule_after(struct ipa_ioc_add_flt_rule_after *rules)
if (rules == NULL || rules->num_rules == 0 ||
rules->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
if (rules->ep >= IPA_CLIENT_MAX) {
- IPAERR("bad parms ep=%d\n", rules->ep);
+ IPAERR_RL("bad parms ep=%d\n", rules->ep);
return -EINVAL;
}
@@ -1131,20 +1152,20 @@ int ipa3_add_flt_rule_after(struct ipa_ioc_add_flt_rule_after *rules)
entry = ipa3_id_find(rules->add_after_hdl);
if (entry == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
result = -EINVAL;
goto bail;
}
if (entry->tbl != tbl) {
- IPAERR("given entry does not match the table\n");
+ IPAERR_RL("given entry does not match the table\n");
result = -EINVAL;
goto bail;
}
if (tbl->sticky_rear)
if (&entry->link == tbl->head_flt_rule_list.prev) {
- IPAERR("cannot add rule at end of a sticky table");
+ IPAERR_RL("cannot add rule at end of a sticky table");
result = -EINVAL;
goto bail;
}
@@ -1166,7 +1187,7 @@ int ipa3_add_flt_rule_after(struct ipa_ioc_add_flt_rule_after *rules)
&entry);
if (result) {
- IPAERR("failed to add flt rule %d\n", i);
+ IPAERR_RL("failed to add flt rule %d\n", i);
rules->rules[i].status = IPA_FLT_STATUS_OF_ADD_FAILED;
} else {
rules->rules[i].status = 0;
@@ -1200,14 +1221,14 @@ int ipa3_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls)
int result;
if (hdls == NULL || hdls->num_hdls == 0 || hdls->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa3_ctx->lock);
for (i = 0; i < hdls->num_hdls; i++) {
if (__ipa_del_flt_rule(hdls->hdl[i].hdl)) {
- IPAERR("failed to del flt rule %i\n", i);
+ IPAERR_RL("failed to del flt rule %i\n", i);
hdls->hdl[i].status = IPA_FLT_STATUS_OF_DEL_FAILED;
} else {
hdls->hdl[i].status = 0;
@@ -1240,14 +1261,14 @@ int ipa3_mdfy_flt_rule(struct ipa_ioc_mdfy_flt_rule *hdls)
int result;
if (hdls == NULL || hdls->num_rules == 0 || hdls->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa3_ctx->lock);
for (i = 0; i < hdls->num_rules; i++) {
if (__ipa_mdfy_flt_rule(&hdls->rules[i], hdls->ip)) {
- IPAERR("failed to mdfy flt rule %i\n", i);
+ IPAERR_RL("failed to mdfy flt rule %i\n", i);
hdls->rules[i].status = IPA_FLT_STATUS_OF_MDFY_FAILED;
} else {
hdls->rules[i].status = 0;
@@ -1281,7 +1302,7 @@ int ipa3_commit_flt(enum ipa_ip_type ip)
int result;
if (ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -1317,7 +1338,7 @@ int ipa3_reset_flt(enum ipa_ip_type ip)
int id;
if (ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
index a5186f1aff35..7c3b5838242e 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
@@ -330,17 +330,17 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx,
proc_ctx->type, proc_ctx->hdr_hdl);
if (!HDR_PROC_TYPE_IS_VALID(proc_ctx->type)) {
- IPAERR("invalid processing type %d\n", proc_ctx->type);
+ IPAERR_RL("invalid processing type %d\n", proc_ctx->type);
return -EINVAL;
}
hdr_entry = ipa3_id_find(proc_ctx->hdr_hdl);
if (!hdr_entry) {
- IPAERR("hdr_hdl is invalid\n");
+ IPAERR_RL("hdr_hdl is invalid\n");
return -EINVAL;
}
- if (hdr_entry->cookie != IPA_COOKIE) {
- IPAERR("Invalid header cookie %u\n", hdr_entry->cookie);
+ if (hdr_entry->cookie != IPA_HDR_COOKIE) {
+ IPAERR_RL("Invalid header cookie %u\n", hdr_entry->cookie);
WARN_ON(1);
return -EINVAL;
}
@@ -359,7 +359,7 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx,
entry->hdr = hdr_entry;
if (add_ref_hdr)
hdr_entry->ref_cnt++;
- entry->cookie = IPA_COOKIE;
+ entry->cookie = IPA_PROC_HDR_COOKIE;
needed_len = ipahal_get_proc_ctx_needed_len(proc_ctx->type);
@@ -369,7 +369,7 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx,
ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN1]) {
bin = IPA_HDR_PROC_CTX_BIN1;
} else {
- IPAERR("unexpected needed len %d\n", needed_len);
+ IPAERR_RL("unexpected needed len %d\n", needed_len);
WARN_ON(1);
goto bad_len;
}
@@ -379,7 +379,7 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx,
IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr);
if (list_empty(&htbl->head_free_offset_list[bin])) {
if (htbl->end + ipa_hdr_proc_ctx_bin_sz[bin] > mem_size) {
- IPAERR("hdr proc ctx table overflow\n");
+ IPAERR_RL("hdr proc ctx table overflow\n");
goto bad_len;
}
@@ -417,6 +417,7 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx,
if (id < 0) {
IPAERR("failed to alloc id\n");
WARN_ON(1);
+ goto ipa_insert_failed;
}
entry->id = id;
proc_ctx->proc_ctx_hdl = id;
@@ -424,6 +425,14 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx,
return 0;
+ipa_insert_failed:
+ if (offset)
+ list_move(&offset->link,
+ &htbl->head_free_offset_list[offset->bin]);
+ entry->offset_entry = NULL;
+ list_del(&entry->link);
+ htbl->proc_ctx_cnt--;
+
bad_len:
if (add_ref_hdr)
hdr_entry->ref_cnt--;
@@ -436,19 +445,19 @@ bad_len:
static int __ipa_add_hdr(struct ipa_hdr_add *hdr)
{
struct ipa3_hdr_entry *entry;
- struct ipa_hdr_offset_entry *offset;
+ struct ipa_hdr_offset_entry *offset = NULL;
u32 bin;
struct ipa3_hdr_tbl *htbl = &ipa3_ctx->hdr_tbl;
int id;
int mem_size;
if (hdr->hdr_len == 0 || hdr->hdr_len > IPA_HDR_MAX_SIZE) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
goto error;
}
if (!HDR_TYPE_IS_VALID(hdr->type)) {
- IPAERR("invalid hdr type %d\n", hdr->type);
+ IPAERR_RL("invalid hdr type %d\n", hdr->type);
goto error;
}
@@ -467,7 +476,7 @@ static int __ipa_add_hdr(struct ipa_hdr_add *hdr)
entry->type = hdr->type;
entry->is_eth2_ofst_valid = hdr->is_eth2_ofst_valid;
entry->eth2_ofst = hdr->eth2_ofst;
- entry->cookie = IPA_COOKIE;
+ entry->cookie = IPA_HDR_COOKIE;
if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN0])
bin = IPA_HDR_BIN0;
@@ -480,7 +489,7 @@ static int __ipa_add_hdr(struct ipa_hdr_add *hdr)
else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN4])
bin = IPA_HDR_BIN4;
else {
- IPAERR("unexpected hdr len %d\n", hdr->hdr_len);
+ IPAERR_RL("unexpected hdr len %d\n", hdr->hdr_len);
goto bad_hdr_len;
}
@@ -546,6 +555,7 @@ static int __ipa_add_hdr(struct ipa_hdr_add *hdr)
if (id < 0) {
IPAERR("failed to alloc id\n");
WARN_ON(1);
+ goto ipa_insert_failed;
}
entry->id = id;
hdr->hdr_hdl = id;
@@ -570,10 +580,19 @@ fail_add_proc_ctx:
entry->ref_cnt--;
hdr->hdr_hdl = 0;
ipa3_id_remove(id);
+ipa_insert_failed:
+ if (entry->is_hdr_proc_ctx) {
+ dma_unmap_single(ipa3_ctx->pdev, entry->phys_base,
+ entry->hdr_len, DMA_TO_DEVICE);
+ } else {
+ if (offset)
+ list_move(&offset->link,
+ &htbl->head_free_offset_list[offset->bin]);
+ entry->offset_entry = NULL;
+ }
htbl->hdr_cnt--;
list_del(&entry->link);
- dma_unmap_single(ipa3_ctx->pdev, entry->phys_base,
- entry->hdr_len, DMA_TO_DEVICE);
+
fail_dma_mapping:
entry->is_hdr_proc_ctx = false;
@@ -591,8 +610,8 @@ static int __ipa3_del_hdr_proc_ctx(u32 proc_ctx_hdl,
struct ipa3_hdr_proc_ctx_tbl *htbl = &ipa3_ctx->hdr_proc_ctx_tbl;
entry = ipa3_id_find(proc_ctx_hdl);
- if (!entry || (entry->cookie != IPA_COOKIE)) {
- IPAERR("bad parm\n");
+ if (!entry || (entry->cookie != IPA_PROC_HDR_COOKIE)) {
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -600,7 +619,7 @@ static int __ipa3_del_hdr_proc_ctx(u32 proc_ctx_hdl,
htbl->proc_ctx_cnt, entry->offset_entry->offset);
if (by_user && entry->user_deleted) {
- IPAERR("proc_ctx already deleted by user\n");
+ IPAERR_RL("proc_ctx already deleted by user\n");
return -EINVAL;
}
@@ -638,12 +657,12 @@ int __ipa3_del_hdr(u32 hdr_hdl, bool by_user)
entry = ipa3_id_find(hdr_hdl);
if (entry == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
return -EINVAL;
}
- if (entry->cookie != IPA_COOKIE) {
- IPAERR("bad parm\n");
+ if (entry->cookie != IPA_HDR_COOKIE) {
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -656,7 +675,7 @@ int __ipa3_del_hdr(u32 hdr_hdl, bool by_user)
entry->offset_entry->offset);
if (by_user && entry->user_deleted) {
- IPAERR("proc_ctx already deleted by user\n");
+ IPAERR_RL("proc_ctx already deleted by user\n");
return -EINVAL;
}
@@ -705,7 +724,7 @@ int ipa3_add_hdr(struct ipa_ioc_add_hdr *hdrs)
int result = -EFAULT;
if (hdrs == NULL || hdrs->num_hdrs == 0) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -714,7 +733,7 @@ int ipa3_add_hdr(struct ipa_ioc_add_hdr *hdrs)
hdrs->num_hdrs);
for (i = 0; i < hdrs->num_hdrs; i++) {
if (__ipa_add_hdr(&hdrs->hdr[i])) {
- IPAERR("failed to add hdr %d\n", i);
+ IPAERR_RL("failed to add hdr %d\n", i);
hdrs->hdr[i].status = -1;
} else {
hdrs->hdr[i].status = 0;
@@ -750,14 +769,14 @@ int ipa3_del_hdr_by_user(struct ipa_ioc_del_hdr *hdls, bool by_user)
int result = -EFAULT;
if (hdls == NULL || hdls->num_hdls == 0) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa3_ctx->lock);
for (i = 0; i < hdls->num_hdls; i++) {
if (__ipa3_del_hdr(hdls->hdl[i].hdl, by_user)) {
- IPAERR("failed to del hdr %i\n", i);
+ IPAERR_RL("failed to del hdr %i\n", i);
hdls->hdl[i].status = -1;
} else {
hdls->hdl[i].status = 0;
@@ -805,7 +824,7 @@ int ipa3_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs)
int result = -EFAULT;
if (proc_ctxs == NULL || proc_ctxs->num_proc_ctxs == 0) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -814,7 +833,7 @@ int ipa3_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs)
proc_ctxs->num_proc_ctxs);
for (i = 0; i < proc_ctxs->num_proc_ctxs; i++) {
if (__ipa_add_hdr_proc_ctx(&proc_ctxs->proc_ctx[i], true)) {
- IPAERR("failed to add hdr pric ctx %d\n", i);
+ IPAERR_RL("failed to add hdr pric ctx %d\n", i);
proc_ctxs->proc_ctx[i].status = -1;
} else {
proc_ctxs->proc_ctx[i].status = 0;
@@ -852,14 +871,14 @@ int ipa3_del_hdr_proc_ctx_by_user(struct ipa_ioc_del_hdr_proc_ctx *hdls,
int result;
if (hdls == NULL || hdls->num_hdls == 0) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa3_ctx->lock);
for (i = 0; i < hdls->num_hdls; i++) {
if (__ipa3_del_hdr_proc_ctx(hdls->hdl[i].hdl, true, by_user)) {
- IPAERR("failed to del hdr %i\n", i);
+ IPAERR_RL("failed to del hdr %i\n", i);
hdls->hdl[i].status = -1;
} else {
hdls->hdl[i].status = 0;
@@ -1066,7 +1085,7 @@ static struct ipa3_hdr_entry *__ipa_find_hdr(const char *name)
struct ipa3_hdr_entry *entry;
if (strnlen(name, IPA_RESOURCE_NAME_MAX) == IPA_RESOURCE_NAME_MAX) {
- IPAERR("Header name too long: %s\n", name);
+ IPAERR_RL("Header name too long: %s\n", name);
return NULL;
}
@@ -1096,7 +1115,7 @@ int ipa3_get_hdr(struct ipa_ioc_get_hdr *lookup)
int result = -1;
if (lookup == NULL) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa3_ctx->lock);
@@ -1183,13 +1202,13 @@ int ipa3_put_hdr(u32 hdr_hdl)
entry = ipa3_id_find(hdr_hdl);
if (entry == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
result = -EINVAL;
goto bail;
}
- if (entry->cookie != IPA_COOKIE) {
- IPAERR("invalid header entry\n");
+ if (entry->cookie != IPA_HDR_COOKIE) {
+ IPAERR_RL("invalid header entry\n");
result = -EINVAL;
goto bail;
}
@@ -1217,7 +1236,7 @@ int ipa3_copy_hdr(struct ipa_ioc_copy_hdr *copy)
int result = -EFAULT;
if (copy == NULL) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa3_ctx->lock);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index ac7ef6a21952..4278dc45aad2 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -40,6 +40,12 @@
#define DRV_NAME "ipa"
#define NAT_DEV_NAME "ipaNatTable"
#define IPA_COOKIE 0x57831603
+#define IPA_RT_RULE_COOKIE 0x57831604
+#define IPA_RT_TBL_COOKIE 0x57831605
+#define IPA_FLT_COOKIE 0x57831606
+#define IPA_HDR_COOKIE 0x57831607
+#define IPA_PROC_HDR_COOKIE 0x57831608
+
#define MTU_BYTE 1500
#define IPA_EP_NOT_ALLOCATED (-1)
@@ -89,6 +95,18 @@
} \
} while (0)
+#define IPAERR_RL(fmt, args...) \
+ do { \
+ pr_err_ratelimited(DRV_NAME " %s:%d " fmt, __func__,\
+ __LINE__, ## args);\
+ if (ipa3_ctx) { \
+ IPA_IPC_LOGGING(ipa3_ctx->logbuf, \
+ DRV_NAME " %s:%d " fmt, ## args); \
+ IPA_IPC_LOGGING(ipa3_ctx->logbuf_low, \
+ DRV_NAME " %s:%d " fmt, ## args); \
+ } \
+ } while (0)
+
#define WLAN_AMPDU_TX_EP 15
#define WLAN_PROD_TX_EP 19
#define WLAN1_CONS_RX_EP 14
@@ -217,8 +235,8 @@ struct ipa_smmu_cb_ctx {
*/
struct ipa3_flt_entry {
struct list_head link;
- struct ipa_flt_rule rule;
u32 cookie;
+ struct ipa_flt_rule rule;
struct ipa3_flt_tbl *tbl;
struct ipa3_rt_tbl *rt_tbl;
u32 hw_len;
@@ -246,13 +264,13 @@ struct ipa3_flt_entry {
*/
struct ipa3_rt_tbl {
struct list_head link;
+ u32 cookie;
struct list_head head_rt_rule_list;
char name[IPA_RESOURCE_NAME_MAX];
u32 idx;
u32 rule_cnt;
u32 ref_cnt;
struct ipa3_rt_tbl_set *set;
- u32 cookie;
bool in_sys[IPA_RULE_TYPE_MAX];
u32 sz[IPA_RULE_TYPE_MAX];
struct ipa_mem_buffer curr_mem[IPA_RULE_TYPE_MAX];
@@ -284,6 +302,7 @@ struct ipa3_rt_tbl {
*/
struct ipa3_hdr_entry {
struct list_head link;
+ u32 cookie;
u8 hdr[IPA_HDR_MAX_SIZE];
u32 hdr_len;
char name[IPA_RESOURCE_NAME_MAX];
@@ -293,7 +312,6 @@ struct ipa3_hdr_entry {
dma_addr_t phys_base;
struct ipa3_hdr_proc_ctx_entry *proc_ctx;
struct ipa_hdr_offset_entry *offset_entry;
- u32 cookie;
u32 ref_cnt;
int id;
u8 is_eth2_ofst_valid;
@@ -342,10 +360,10 @@ struct ipa3_hdr_proc_ctx_offset_entry {
*/
struct ipa3_hdr_proc_ctx_entry {
struct list_head link;
+ u32 cookie;
enum ipa_hdr_proc_type type;
struct ipa3_hdr_proc_ctx_offset_entry *offset_entry;
struct ipa3_hdr_entry *hdr;
- u32 cookie;
u32 ref_cnt;
int id;
bool user_deleted;
@@ -407,8 +425,8 @@ struct ipa3_flt_tbl {
*/
struct ipa3_rt_entry {
struct list_head link;
- struct ipa_rt_rule rule;
u32 cookie;
+ struct ipa_rt_rule rule;
struct ipa3_rt_tbl *tbl;
struct ipa3_hdr_entry *hdr;
struct ipa3_hdr_proc_ctx_entry *proc_ctx;
@@ -1859,6 +1877,7 @@ void ipa3_dump_buff_internal(void *base, dma_addr_t phy_base, u32 size);
#else
#define IPA_DUMP_BUFF(base, phy_base, size)
#endif
+int ipa3_cfg_clkon_cfg(struct ipahal_reg_clkon_cfg *clkon_cfg);
int ipa3_init_mem_partition(struct device_node *dev_node);
int ipa3_controller_static_bind(struct ipa3_controller *controller,
enum ipa_hw_type ipa_hw_type);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c b/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c
index 16a567644f79..76f37162f495 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c
@@ -227,7 +227,7 @@ int ipa3_query_intf(struct ipa_ioc_query_intf *lookup)
if (strnlen(lookup->name, IPA_RESOURCE_NAME_MAX) ==
IPA_RESOURCE_NAME_MAX) {
- IPAERR("Interface name too long. (%s)\n", lookup->name);
+ IPAERR_RL("Interface name too long. (%s)\n", lookup->name);
return result;
}
@@ -268,7 +268,7 @@ int ipa3_query_intf_tx_props(struct ipa_ioc_query_intf_tx_props *tx)
}
if (strnlen(tx->name, IPA_RESOURCE_NAME_MAX) == IPA_RESOURCE_NAME_MAX) {
- IPAERR("Interface name too long. (%s)\n", tx->name);
+ IPAERR_RL("Interface name too long. (%s)\n", tx->name);
return result;
}
@@ -277,7 +277,7 @@ int ipa3_query_intf_tx_props(struct ipa_ioc_query_intf_tx_props *tx)
if (!strcmp(entry->name, tx->name)) {
/* add the entry check */
if (entry->num_tx_props != tx->num_tx_props) {
- IPAERR("invalid entry number(%u %u)\n",
+ IPAERR_RL("invalid entry number(%u %u)\n",
entry->num_tx_props,
tx->num_tx_props);
mutex_unlock(&ipa3_ctx->lock);
@@ -315,7 +315,7 @@ int ipa3_query_intf_rx_props(struct ipa_ioc_query_intf_rx_props *rx)
}
if (strnlen(rx->name, IPA_RESOURCE_NAME_MAX) == IPA_RESOURCE_NAME_MAX) {
- IPAERR("Interface name too long. (%s)\n", rx->name);
+ IPAERR_RL("Interface name too long. (%s)\n", rx->name);
return result;
}
@@ -324,7 +324,7 @@ int ipa3_query_intf_rx_props(struct ipa_ioc_query_intf_rx_props *rx)
if (!strcmp(entry->name, rx->name)) {
/* add the entry check */
if (entry->num_rx_props != rx->num_rx_props) {
- IPAERR("invalid entry number(%u %u)\n",
+ IPAERR_RL("invalid entry number(%u %u)\n",
entry->num_rx_props,
rx->num_rx_props);
mutex_unlock(&ipa3_ctx->lock);
@@ -366,7 +366,7 @@ int ipa3_query_intf_ext_props(struct ipa_ioc_query_intf_ext_props *ext)
if (!strcmp(entry->name, ext->name)) {
/* add the entry check */
if (entry->num_ext_props != ext->num_ext_props) {
- IPAERR("invalid entry number(%u %u)\n",
+ IPAERR_RL("invalid entry number(%u %u)\n",
entry->num_ext_props,
ext->num_ext_props);
mutex_unlock(&ipa3_ctx->lock);
@@ -410,13 +410,13 @@ int ipa3_send_msg(struct ipa_msg_meta *meta, void *buff,
if (meta == NULL || (buff == NULL && callback != NULL) ||
(buff != NULL && callback == NULL)) {
- IPAERR("invalid param meta=%p buff=%p, callback=%p\n",
+ IPAERR_RL("invalid param meta=%p buff=%p, callback=%p\n",
meta, buff, callback);
return -EINVAL;
}
if (meta->msg_type >= IPA_EVENT_MAX_NUM) {
- IPAERR("unsupported message type %d\n", meta->msg_type);
+ IPAERR_RL("unsupported message type %d\n", meta->msg_type);
return -EINVAL;
}
@@ -546,17 +546,15 @@ ssize_t ipa3_read(struct file *filp, char __user *buf, size_t count,
char __user *start;
struct ipa3_push_msg *msg = NULL;
int ret;
- DEFINE_WAIT(wait);
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
int locked;
start = buf;
+ add_wait_queue(&ipa3_ctx->msg_waitq, &wait);
while (1) {
mutex_lock(&ipa3_ctx->msg_lock);
locked = 1;
- prepare_to_wait(&ipa3_ctx->msg_waitq,
- &wait,
- TASK_INTERRUPTIBLE);
if (!list_empty(&ipa3_ctx->msg_list)) {
msg = list_first_entry(&ipa3_ctx->msg_list,
@@ -609,10 +607,10 @@ ssize_t ipa3_read(struct file *filp, char __user *buf, size_t count,
locked = 0;
mutex_unlock(&ipa3_ctx->msg_lock);
- schedule();
+ wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
}
- finish_wait(&ipa3_ctx->msg_waitq, &wait);
+ remove_wait_queue(&ipa3_ctx->msg_waitq, &wait);
if (start != buf && ret != -EFAULT)
ret = buf - start;
@@ -642,7 +640,7 @@ int ipa3_pull_msg(struct ipa_msg_meta *meta, char *buff, size_t count)
int result = -EINVAL;
if (meta == NULL || buff == NULL || !count) {
- IPAERR("invalid param name=%p buff=%p count=%zu\n",
+ IPAERR_RL("invalid param name=%p buff=%p count=%zu\n",
meta, buff, count);
return result;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c
index cd027c28e597..3267e0e83a82 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c
@@ -191,7 +191,7 @@ static int ipa3_mhi_get_ch_poll_cfg(enum ipa_client_type client,
static int ipa_mhi_start_gsi_channel(enum ipa_client_type client,
int ipa_ep_idx, struct start_gsi_channel *params)
{
- int res;
+ int res = 0;
struct gsi_evt_ring_props ev_props;
struct ipa_mhi_msi_info *msi;
struct gsi_chan_props ch_props;
@@ -241,7 +241,6 @@ static int ipa_mhi_start_gsi_channel(enum ipa_client_type client,
if (res) {
IPA_MHI_ERR("gsi_alloc_evt_ring failed %d\n", res);
goto fail_alloc_evt;
- return res;
}
IPA_MHI_DBG("client %d, caching event ring hdl %lu\n",
client,
@@ -259,7 +258,6 @@ static int ipa_mhi_start_gsi_channel(enum ipa_client_type client,
IPA_MHI_ERR("event ring wp is not updated. base=wp=0x%llx\n",
params->ev_ctx_host->wp);
goto fail_alloc_ch;
- return res;
}
IPA_MHI_DBG("Ring event db: evt_ring_hdl=%lu host_wp=0x%llx\n",
@@ -270,7 +268,6 @@ static int ipa_mhi_start_gsi_channel(enum ipa_client_type client,
IPA_MHI_ERR("fail to ring evt ring db %d. hdl=%lu wp=0x%llx\n",
res, ep->gsi_evt_ring_hdl, params->ev_ctx_host->wp);
goto fail_alloc_ch;
- return res;
}
memset(&ch_props, 0, sizeof(ch_props));
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
index e7e5cf114242..0256ff89ae24 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -254,8 +254,8 @@ int ipa3_allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem)
mutex_lock(&nat_ctx->lock);
if (strcmp(mem->dev_name, NAT_DEV_NAME)) {
- IPAERR("Nat device name mismatch\n");
- IPAERR("Expect: %s Recv: %s\n", NAT_DEV_NAME, mem->dev_name);
+ IPAERR_RL("Nat device name mismatch\n");
+ IPAERR_RL("Expect: %s Recv: %s\n", NAT_DEV_NAME, mem->dev_name);
result = -EPERM;
goto bail;
}
@@ -274,7 +274,7 @@ int ipa3_allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem)
if (mem->size <= 0 ||
nat_ctx->is_dev_init == true) {
- IPAERR("Invalid Parameters or device is already init\n");
+ IPAERR_RL("Invalid Parameters or device is already init\n");
result = -EPERM;
goto bail;
}
@@ -337,16 +337,16 @@ int ipa3_nat_init_cmd(struct ipa_ioc_v4_nat_init *init)
/* check for integer overflow */
if (init->ipv4_rules_offset >
UINT_MAX - (TBL_ENTRY_SIZE * (init->table_entries + 1))) {
- IPAERR("Detected overflow\n");
- return -EPERM;
+ IPAERR_RL("Detected overflow\n");
+ return -EPERM;
}
/* Check Table Entry offset is not
beyond allocated size */
tmp = init->ipv4_rules_offset +
(TBL_ENTRY_SIZE * (init->table_entries + 1));
if (tmp > ipa3_ctx->nat_mem.size) {
- IPAERR("Table rules offset not valid\n");
- IPAERR("offset:%d entrys:%d size:%zu mem_size:%zu\n",
+ IPAERR_RL("Table rules offset not valid\n");
+ IPAERR_RL("offset:%d entrys:%d size:%zu mem_size:%zu\n",
init->ipv4_rules_offset, (init->table_entries + 1),
tmp, ipa3_ctx->nat_mem.size);
return -EPERM;
@@ -354,17 +354,17 @@ int ipa3_nat_init_cmd(struct ipa_ioc_v4_nat_init *init)
/* check for integer overflow */
if (init->expn_rules_offset >
- UINT_MAX - (TBL_ENTRY_SIZE * init->expn_table_entries)) {
- IPAERR("Detected overflow\n");
- return -EPERM;
+ (UINT_MAX - (TBL_ENTRY_SIZE * init->expn_table_entries))) {
+ IPAERR_RL("Detected overflow\n");
+ return -EPERM;
}
/* Check Expn Table Entry offset is not
beyond allocated size */
tmp = init->expn_rules_offset +
(TBL_ENTRY_SIZE * init->expn_table_entries);
if (tmp > ipa3_ctx->nat_mem.size) {
- IPAERR("Expn Table rules offset not valid\n");
- IPAERR("offset:%d entrys:%d size:%zu mem_size:%zu\n",
+ IPAERR_RL("Expn Table rules offset not valid\n");
+ IPAERR_RL("offset:%d entrys:%d size:%zu mem_size:%zu\n",
init->expn_rules_offset, init->expn_table_entries,
tmp, ipa3_ctx->nat_mem.size);
return -EPERM;
@@ -373,16 +373,16 @@ int ipa3_nat_init_cmd(struct ipa_ioc_v4_nat_init *init)
/* check for integer overflow */
if (init->index_offset >
UINT_MAX - (INDX_TBL_ENTRY_SIZE * (init->table_entries + 1))) {
- IPAERR("Detected overflow\n");
- return -EPERM;
+ IPAERR_RL("Detected overflow\n");
+ return -EPERM;
}
/* Check Indx Table Entry offset is not
beyond allocated size */
tmp = init->index_offset +
(INDX_TBL_ENTRY_SIZE * (init->table_entries + 1));
if (tmp > ipa3_ctx->nat_mem.size) {
- IPAERR("Indx Table rules offset not valid\n");
- IPAERR("offset:%d entrys:%d size:%zu mem_size:%zu\n",
+ IPAERR_RL("Indx Table rules offset not valid\n");
+ IPAERR_RL("offset:%d entrys:%d size:%zu mem_size:%zu\n",
init->index_offset, (init->table_entries + 1),
tmp, ipa3_ctx->nat_mem.size);
return -EPERM;
@@ -391,16 +391,16 @@ int ipa3_nat_init_cmd(struct ipa_ioc_v4_nat_init *init)
/* check for integer overflow */
if (init->index_expn_offset >
UINT_MAX - (INDX_TBL_ENTRY_SIZE * init->expn_table_entries)) {
- IPAERR("Detected overflow\n");
- return -EPERM;
+ IPAERR_RL("Detected overflow\n");
+ return -EPERM;
}
/* Check Expn Table entry offset is not
beyond allocated size */
tmp = init->index_expn_offset +
(INDX_TBL_ENTRY_SIZE * init->expn_table_entries);
if (tmp > ipa3_ctx->nat_mem.size) {
- IPAERR("Indx Expn Table rules offset not valid\n");
- IPAERR("offset:%d entrys:%d size:%zu mem_size:%zu\n",
+ IPAERR_RL("Indx Expn Table rules offset not valid\n");
+ IPAERR_RL("offset:%d entrys:%d size:%zu mem_size:%zu\n",
init->index_expn_offset, init->expn_table_entries,
tmp, ipa3_ctx->nat_mem.size);
return -EPERM;
@@ -437,16 +437,16 @@ int ipa3_nat_init_cmd(struct ipa_ioc_v4_nat_init *init)
(init->expn_rules_offset > offset) ||
(init->index_offset > offset) ||
(init->index_expn_offset > offset)) {
- IPAERR("Failed due to integer overflow\n");
- IPAERR("nat.mem.dma_handle: 0x%pa\n",
+ IPAERR_RL("Failed due to integer overflow\n");
+ IPAERR_RL("nat.mem.dma_handle: 0x%pa\n",
&ipa3_ctx->nat_mem.dma_handle);
- IPAERR("ipv4_rules_offset: 0x%x\n",
+ IPAERR_RL("ipv4_rules_offset: 0x%x\n",
init->ipv4_rules_offset);
- IPAERR("expn_rules_offset: 0x%x\n",
+ IPAERR_RL("expn_rules_offset: 0x%x\n",
init->expn_rules_offset);
- IPAERR("index_offset: 0x%x\n",
+ IPAERR_RL("index_offset: 0x%x\n",
init->index_offset);
- IPAERR("index_expn_offset: 0x%x\n",
+ IPAERR_RL("index_expn_offset: 0x%x\n",
init->index_expn_offset);
result = -EPERM;
goto free_nop;
@@ -496,7 +496,7 @@ int ipa3_nat_init_cmd(struct ipa_ioc_v4_nat_init *init)
cmd_pyld = ipahal_construct_imm_cmd(
IPA_IMM_CMD_IP_V4_NAT_INIT, &cmd, false);
if (!cmd_pyld) {
- IPAERR("Fail to construct ip_v4_nat_init imm cmd\n");
+ IPAERR_RL("Fail to construct ip_v4_nat_init imm cmd\n");
result = -EPERM;
goto free_nop;
}
@@ -576,7 +576,7 @@ int ipa3_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
IPADBG("\n");
if (dma->entries <= 0) {
- IPAERR("Invalid number of commands %d\n",
+ IPAERR_RL("Invalid number of commands %d\n",
dma->entries);
ret = -EPERM;
goto bail;
@@ -584,7 +584,7 @@ int ipa3_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
for (cnt = 0; cnt < dma->entries; cnt++) {
if (dma->dma[cnt].table_index >= 1) {
- IPAERR("Invalid table index %d\n",
+ IPAERR_RL("Invalid table index %d\n",
dma->dma[cnt].table_index);
ret = -EPERM;
goto bail;
@@ -595,7 +595,7 @@ int ipa3_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
if (dma->dma[cnt].offset >=
(ipa3_ctx->nat_mem.size_base_tables + 1) *
NAT_TABLE_ENTRY_SIZE_BYTE) {
- IPAERR("Invalid offset %d\n",
+ IPAERR_RL("Invalid offset %d\n",
dma->dma[cnt].offset);
ret = -EPERM;
goto bail;
@@ -607,7 +607,7 @@ int ipa3_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
if (dma->dma[cnt].offset >=
ipa3_ctx->nat_mem.size_expansion_tables *
NAT_TABLE_ENTRY_SIZE_BYTE) {
- IPAERR("Invalid offset %d\n",
+ IPAERR_RL("Invalid offset %d\n",
dma->dma[cnt].offset);
ret = -EPERM;
goto bail;
@@ -619,7 +619,7 @@ int ipa3_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
if (dma->dma[cnt].offset >=
(ipa3_ctx->nat_mem.size_base_tables + 1) *
NAT_INTEX_TABLE_ENTRY_SIZE_BYTE) {
- IPAERR("Invalid offset %d\n",
+ IPAERR_RL("Invalid offset %d\n",
dma->dma[cnt].offset);
ret = -EPERM;
goto bail;
@@ -631,7 +631,7 @@ int ipa3_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
if (dma->dma[cnt].offset >=
ipa3_ctx->nat_mem.size_expansion_tables *
NAT_INTEX_TABLE_ENTRY_SIZE_BYTE) {
- IPAERR("Invalid offset %d\n",
+ IPAERR_RL("Invalid offset %d\n",
dma->dma[cnt].offset);
ret = -EPERM;
goto bail;
@@ -640,7 +640,7 @@ int ipa3_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
break;
default:
- IPAERR("Invalid base_addr %d\n",
+ IPAERR_RL("Invalid base_addr %d\n",
dma->dma[cnt].base_addr);
ret = -EPERM;
goto bail;
@@ -679,7 +679,7 @@ int ipa3_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
cmd_pyld = ipahal_construct_imm_cmd(
IPA_IMM_CMD_NAT_DMA, &cmd, false);
if (!cmd_pyld) {
- IPAERR("Fail to construct nat_dma imm cmd\n");
+ IPAERR_RL("Fail to construct nat_dma imm cmd\n");
continue;
}
desc[1].type = IPA_IMM_CMD_DESC;
@@ -796,7 +796,7 @@ int ipa3_nat_del_cmd(struct ipa_ioc_v4_nat_del *del)
cmd_pyld = ipahal_construct_imm_cmd(
IPA_IMM_CMD_IP_V4_NAT_INIT, &cmd, false);
if (!cmd_pyld) {
- IPAERR("Fail to construct ip_v4_nat_init imm cmd\n");
+ IPAERR_RL("Fail to construct ip_v4_nat_init imm cmd\n");
result = -EPERM;
goto destroy_regwrt_imm_cmd;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
index 0269bfba993f..690a9db67ff0 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
@@ -1177,10 +1177,13 @@ void ipa3_qmi_service_exit(void)
}
/* clean the QMI msg cache */
+ mutex_lock(&ipa3_qmi_lock);
if (ipa3_qmi_ctx != NULL) {
vfree(ipa3_qmi_ctx);
ipa3_qmi_ctx = NULL;
}
+ mutex_unlock(&ipa3_qmi_lock);
+
ipa3_svc_handle = 0;
ipa3_qmi_modem_init_fin = false;
ipa3_qmi_indication_fin = false;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
index 6197c9f64ca5..bc7cc7060545 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
@@ -697,7 +697,7 @@ struct ipa3_rt_tbl *__ipa3_find_rt_tbl(enum ipa_ip_type ip, const char *name)
struct ipa3_rt_tbl_set *set;
if (strnlen(name, IPA_RESOURCE_NAME_MAX) == IPA_RESOURCE_NAME_MAX) {
- IPAERR("Name too long: %s\n", name);
+ IPAERR_RL("Name too long: %s\n", name);
return NULL;
}
@@ -723,7 +723,7 @@ int ipa3_query_rt_index(struct ipa_ioc_get_rt_tbl_indx *in)
struct ipa3_rt_tbl *entry;
if (in->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -749,7 +749,7 @@ static struct ipa3_rt_tbl *__ipa_add_rt_tbl(enum ipa_ip_type ip,
int max_tbl_indx;
if (name == NULL) {
- IPAERR("no tbl name\n");
+ IPAERR_RL("no tbl name\n");
goto error;
}
@@ -762,7 +762,7 @@ static struct ipa3_rt_tbl *__ipa_add_rt_tbl(enum ipa_ip_type ip,
max(IPA_MEM_PART(v6_modem_rt_index_hi),
IPA_MEM_PART(v6_apps_rt_index_hi));
} else {
- IPAERR("bad ip family type\n");
+ IPAERR_RL("bad ip family type\n");
goto error;
}
@@ -796,7 +796,7 @@ static struct ipa3_rt_tbl *__ipa_add_rt_tbl(enum ipa_ip_type ip,
INIT_LIST_HEAD(&entry->link);
strlcpy(entry->name, name, IPA_RESOURCE_NAME_MAX);
entry->set = set;
- entry->cookie = IPA_COOKIE;
+ entry->cookie = IPA_RT_TBL_COOKIE;
entry->in_sys[IPA_RULE_HASHABLE] = (ip == IPA_IP_v4) ?
!ipa3_ctx->ip4_rt_tbl_hash_lcl :
!ipa3_ctx->ip6_rt_tbl_hash_lcl;
@@ -814,12 +814,16 @@ static struct ipa3_rt_tbl *__ipa_add_rt_tbl(enum ipa_ip_type ip,
if (id < 0) {
IPAERR("failed to add to tree\n");
WARN_ON(1);
+ goto ipa_insert_failed;
}
entry->id = id;
}
return entry;
-
+ipa_insert_failed:
+ set->tbl_cnt--;
+ list_del(&entry->link);
+ idr_destroy(&entry->rule_ids);
fail_rt_idx_alloc:
entry->cookie = 0;
kmem_cache_free(ipa3_ctx->rt_tbl_cache, entry);
@@ -833,13 +837,13 @@ static int __ipa_del_rt_tbl(struct ipa3_rt_tbl *entry)
u32 id;
struct ipa3_rt_tbl_set *rset;
- if (entry == NULL || (entry->cookie != IPA_COOKIE)) {
- IPAERR("bad parms\n");
+ if (entry == NULL || (entry->cookie != IPA_RT_TBL_COOKIE)) {
+ IPAERR_RL("bad parms\n");
return -EINVAL;
}
id = entry->id;
if (ipa3_id_find(id) == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
return -EPERM;
}
@@ -847,8 +851,10 @@ static int __ipa_del_rt_tbl(struct ipa3_rt_tbl *entry)
ip = IPA_IP_v4;
else if (entry->set == &ipa3_ctx->rt_tbl_set[IPA_IP_v6])
ip = IPA_IP_v6;
- else
+ else {
WARN_ON(1);
+ return -EPERM;
+ }
rset = &ipa3_ctx->reap_rt_tbl_set[ip];
@@ -885,14 +891,14 @@ static int __ipa_rt_validate_hndls(const struct ipa_rt_rule *rule,
if (rule->hdr_hdl) {
*hdr = ipa3_id_find(rule->hdr_hdl);
- if ((*hdr == NULL) || ((*hdr)->cookie != IPA_COOKIE)) {
+ if ((*hdr == NULL) || ((*hdr)->cookie != IPA_HDR_COOKIE)) {
IPAERR("rt rule does not point to valid hdr\n");
return -EPERM;
}
} else if (rule->hdr_proc_ctx_hdl) {
*proc_ctx = ipa3_id_find(rule->hdr_proc_ctx_hdl);
if ((*proc_ctx == NULL) ||
- ((*proc_ctx)->cookie != IPA_COOKIE)) {
+ ((*proc_ctx)->cookie != IPA_PROC_HDR_COOKIE)) {
IPAERR("rt rule does not point to valid proc ctx\n");
return -EPERM;
@@ -915,7 +921,7 @@ static int __ipa_create_rt_entry(struct ipa3_rt_entry **entry,
goto error;
}
INIT_LIST_HEAD(&(*entry)->link);
- (*(entry))->cookie = IPA_COOKIE;
+ (*(entry))->cookie = IPA_RT_RULE_COOKIE;
(*(entry))->rule = *rule;
(*(entry))->tbl = tbl;
(*(entry))->hdr = hdr;
@@ -983,8 +989,8 @@ static int __ipa_add_rt_rule(enum ipa_ip_type ip, const char *name,
tbl = __ipa_add_rt_tbl(ip, name);
- if (tbl == NULL || (tbl->cookie != IPA_COOKIE)) {
- IPAERR("failed adding rt tbl name = %s\n",
+ if (tbl == NULL || (tbl->cookie != IPA_RT_TBL_COOKIE)) {
+ IPAERR_RL("failed adding rt tbl name = %s\n",
name ? name : "");
goto error;
}
@@ -994,8 +1000,8 @@ static int __ipa_add_rt_rule(enum ipa_ip_type ip, const char *name,
*/
if (!strcmp(tbl->name, IPA_DFLT_RT_TBL_NAME) &&
(tbl->rule_cnt > 0) && (at_rear != 0)) {
- IPAERR("cannot add rule at end of tbl rule_cnt=%d at_rear=%d\n",
- tbl->rule_cnt, at_rear);
+ IPAERR_RL("cannot add rule at end of tbl rule_cnt=%d at_rear=%d"
+ , tbl->rule_cnt, at_rear);
goto error;
}
@@ -1065,7 +1071,7 @@ int ipa3_add_rt_rule(struct ipa_ioc_add_rt_rule *rules)
int ret;
if (rules == NULL || rules->num_rules == 0 || rules->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -1075,7 +1081,7 @@ int ipa3_add_rt_rule(struct ipa_ioc_add_rt_rule *rules)
&rules->rules[i].rule,
rules->rules[i].at_rear,
&rules->rules[i].rt_rule_hdl)) {
- IPAERR("failed to add rt rule %d\n", i);
+ IPAERR_RL("failed to add rt rule %d\n", i);
rules->rules[i].status = IPA_RT_STATUS_OF_ADD_FAILED;
} else {
rules->rules[i].status = 0;
@@ -1111,36 +1117,36 @@ int ipa3_add_rt_rule_after(struct ipa_ioc_add_rt_rule_after *rules)
struct ipa3_rt_entry *entry = NULL;
if (rules == NULL || rules->num_rules == 0 || rules->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa3_ctx->lock);
tbl = __ipa3_find_rt_tbl(rules->ip, rules->rt_tbl_name);
- if (tbl == NULL || (tbl->cookie != IPA_COOKIE)) {
- IPAERR("failed finding rt tbl name = %s\n",
+ if (tbl == NULL || (tbl->cookie != IPA_RT_TBL_COOKIE)) {
+ IPAERR_RL("failed finding rt tbl name = %s\n",
rules->rt_tbl_name ? rules->rt_tbl_name : "");
ret = -EINVAL;
goto bail;
}
if (tbl->rule_cnt <= 0) {
- IPAERR("tbl->rule_cnt <= 0");
+ IPAERR_RL("tbl->rule_cnt <= 0");
ret = -EINVAL;
goto bail;
}
entry = ipa3_id_find(rules->add_after_hdl);
if (!entry) {
- IPAERR("failed finding rule %d in rt tbls\n",
+ IPAERR_RL("failed finding rule %d in rt tbls\n",
rules->add_after_hdl);
ret = -EINVAL;
goto bail;
}
if (entry->tbl != tbl) {
- IPAERR("given rt rule does not match the table\n");
+ IPAERR_RL("given rt rule does not match the table\n");
ret = -EINVAL;
goto bail;
}
@@ -1151,7 +1157,7 @@ int ipa3_add_rt_rule_after(struct ipa_ioc_add_rt_rule_after *rules)
*/
if (!strcmp(tbl->name, IPA_DFLT_RT_TBL_NAME) &&
(&entry->link == tbl->head_rt_rule_list.prev)) {
- IPAERR("cannot add rule at end of tbl rule_cnt=%d\n",
+ IPAERR_RL("cannot add rule at end of tbl rule_cnt=%d\n",
tbl->rule_cnt);
ret = -EINVAL;
goto bail;
@@ -1168,7 +1174,7 @@ int ipa3_add_rt_rule_after(struct ipa_ioc_add_rt_rule_after *rules)
&rules->rules[i].rule,
&rules->rules[i].rt_rule_hdl,
&entry)) {
- IPAERR("failed to add rt rule %d\n", i);
+ IPAERR_RL("failed to add rt rule %d\n", i);
rules->rules[i].status = IPA_RT_STATUS_OF_ADD_FAILED;
} else {
rules->rules[i].status = 0;
@@ -1177,7 +1183,7 @@ int ipa3_add_rt_rule_after(struct ipa_ioc_add_rt_rule_after *rules)
if (rules->commit)
if (ipa3_ctx->ctrl->ipa3_commit_rt(rules->ip)) {
- IPAERR("failed to commit\n");
+ IPAERR_RL("failed to commit\n");
ret = -EPERM;
goto bail;
}
@@ -1198,12 +1204,12 @@ int __ipa3_del_rt_rule(u32 rule_hdl)
entry = ipa3_id_find(rule_hdl);
if (entry == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
return -EINVAL;
}
- if (entry->cookie != IPA_COOKIE) {
- IPAERR("bad params\n");
+ if (entry->cookie != IPA_RT_RULE_COOKIE) {
+ IPAERR_RL("bad params\n");
return -EINVAL;
}
@@ -1219,7 +1225,7 @@ int __ipa3_del_rt_rule(u32 rule_hdl)
idr_remove(&entry->tbl->rule_ids, entry->rule_id);
if (entry->tbl->rule_cnt == 0 && entry->tbl->ref_cnt == 0) {
if (__ipa_del_rt_tbl(entry->tbl))
- IPAERR("fail to del RT tbl\n");
+ IPAERR_RL("fail to del RT tbl\n");
}
entry->cookie = 0;
id = entry->id;
@@ -1246,14 +1252,14 @@ int ipa3_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls)
int ret;
if (hdls == NULL || hdls->num_hdls == 0 || hdls->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa3_ctx->lock);
for (i = 0; i < hdls->num_hdls; i++) {
if (__ipa3_del_rt_rule(hdls->hdl[i].hdl)) {
- IPAERR("failed to del rt rule %i\n", i);
+ IPAERR_RL("failed to del rt rule %i\n", i);
hdls->hdl[i].status = IPA_RT_STATUS_OF_DEL_FAILED;
} else {
hdls->hdl[i].status = 0;
@@ -1286,7 +1292,7 @@ int ipa3_commit_rt(enum ipa_ip_type ip)
int ret;
if (ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -1330,7 +1336,7 @@ int ipa3_reset_rt(enum ipa_ip_type ip)
int id;
if (ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -1346,7 +1352,7 @@ int ipa3_reset_rt(enum ipa_ip_type ip)
* filtering rules point to routing tables
*/
if (ipa3_reset_flt(ip))
- IPAERR("fail to reset flt ip=%d\n", ip);
+ IPAERR_RL("fail to reset flt ip=%d\n", ip);
set = &ipa3_ctx->rt_tbl_set[ip];
rset = &ipa3_ctx->reap_rt_tbl_set[ip];
@@ -1435,14 +1441,14 @@ int ipa3_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup)
int result = -EFAULT;
if (lookup == NULL || lookup->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa3_ctx->lock);
entry = __ipa3_find_rt_tbl(lookup->ip, lookup->name);
- if (entry && entry->cookie == IPA_COOKIE) {
+ if (entry && entry->cookie == IPA_RT_TBL_COOKIE) {
if (entry->ref_cnt == U32_MAX) {
- IPAERR("fail: ref count crossed limit\n");
+ IPAERR_RL("fail: ref count crossed limit\n");
goto ret;
}
entry->ref_cnt++;
@@ -1450,7 +1456,7 @@ int ipa3_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup)
/* commit for get */
if (ipa3_ctx->ctrl->ipa3_commit_rt(lookup->ip))
- IPAERR("fail to commit RT tbl\n");
+ IPAERR_RL("fail to commit RT tbl\n");
result = 0;
}
@@ -1478,13 +1484,13 @@ int ipa3_put_rt_tbl(u32 rt_tbl_hdl)
mutex_lock(&ipa3_ctx->lock);
entry = ipa3_id_find(rt_tbl_hdl);
if (entry == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
result = -EINVAL;
goto ret;
}
- if ((entry->cookie != IPA_COOKIE) || entry->ref_cnt == 0) {
- IPAERR("bad parms\n");
+ if ((entry->cookie != IPA_RT_TBL_COOKIE) || entry->ref_cnt == 0) {
+ IPAERR_RL("bad parms\n");
result = -EINVAL;
goto ret;
}
@@ -1493,18 +1499,20 @@ int ipa3_put_rt_tbl(u32 rt_tbl_hdl)
ip = IPA_IP_v4;
else if (entry->set == &ipa3_ctx->rt_tbl_set[IPA_IP_v6])
ip = IPA_IP_v6;
- else
+ else {
WARN_ON(1);
+ goto ret;
+ }
entry->ref_cnt--;
if (entry->ref_cnt == 0 && entry->rule_cnt == 0) {
IPADBG("zero ref_cnt, delete rt tbl (idx=%u)\n",
entry->idx);
if (__ipa_del_rt_tbl(entry))
- IPAERR("fail to del RT tbl\n");
+ IPAERR_RL("fail to del RT tbl\n");
/* commit for put */
if (ipa3_ctx->ctrl->ipa3_commit_rt(ip))
- IPAERR("fail to commit RT tbl\n");
+ IPAERR_RL("fail to commit RT tbl\n");
}
result = 0;
@@ -1524,26 +1532,27 @@ static int __ipa_mdfy_rt_rule(struct ipa_rt_rule_mdfy *rtrule)
if (rtrule->rule.hdr_hdl) {
hdr = ipa3_id_find(rtrule->rule.hdr_hdl);
- if ((hdr == NULL) || (hdr->cookie != IPA_COOKIE)) {
- IPAERR("rt rule does not point to valid hdr\n");
+ if ((hdr == NULL) || (hdr->cookie != IPA_HDR_COOKIE)) {
+ IPAERR_RL("rt rule does not point to valid hdr\n");
goto error;
}
} else if (rtrule->rule.hdr_proc_ctx_hdl) {
proc_ctx = ipa3_id_find(rtrule->rule.hdr_proc_ctx_hdl);
- if ((proc_ctx == NULL) || (proc_ctx->cookie != IPA_COOKIE)) {
- IPAERR("rt rule does not point to valid proc ctx\n");
+ if ((proc_ctx == NULL) ||
+ (proc_ctx->cookie != IPA_PROC_HDR_COOKIE)) {
+ IPAERR_RL("rt rule does not point to valid proc ctx\n");
goto error;
}
}
entry = ipa3_id_find(rtrule->rt_rule_hdl);
if (entry == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
goto error;
}
- if (entry->cookie != IPA_COOKIE) {
- IPAERR("bad params\n");
+ if (entry->cookie != IPA_RT_RULE_COOKIE) {
+ IPAERR_RL("bad params\n");
goto error;
}
@@ -1584,14 +1593,14 @@ int ipa3_mdfy_rt_rule(struct ipa_ioc_mdfy_rt_rule *hdls)
int result;
if (hdls == NULL || hdls->num_rules == 0 || hdls->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa3_ctx->lock);
for (i = 0; i < hdls->num_rules; i++) {
if (__ipa_mdfy_rt_rule(&hdls->rules[i])) {
- IPAERR("failed to mdfy rt rule %i\n", i);
+ IPAERR_RL("failed to mdfy rt rule %i\n", i);
hdls->rules[i].status = IPA_RT_STATUS_OF_MDFY_FAILED;
} else {
hdls->rules[i].status = 0;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
index e49bdc8c7083..d3837a05fdc2 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
@@ -1664,7 +1664,7 @@ int ipa3_write_qmapid_wdi_pipe(u32 clnt_hdl, u8 qmap_id)
if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
ipa3_ctx->ep[clnt_hdl].valid == 0) {
- IPAERR("bad parm, %d\n", clnt_hdl);
+ IPAERR_RL("bad parm, %d\n", clnt_hdl);
return -EINVAL;
}
@@ -1677,7 +1677,7 @@ int ipa3_write_qmapid_wdi_pipe(u32 clnt_hdl, u8 qmap_id)
ep = &ipa3_ctx->ep[clnt_hdl];
if (!(ep->uc_offload_state & IPA_WDI_CONNECTED)) {
- IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
+ IPAERR_RL("WDI channel bad state %d\n", ep->uc_offload_state);
return -EFAULT;
}
IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index 6647f919a577..e8bd0cd2ffb5 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -795,6 +795,28 @@ void _ipa_sram_settings_read_v3_0(void)
}
/**
+ * ipa3_cfg_clkon_cfg() - configure IPA clkon_cfg
+ * @clkon_cfg: IPA clkon_cfg
+ *
+ * Return codes:
+ * 0: success
+ */
+int ipa3_cfg_clkon_cfg(struct ipahal_reg_clkon_cfg *clkon_cfg)
+{
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+ IPADBG("cgc_open_misc = %d\n",
+ clkon_cfg->cgc_open_misc);
+
+ ipahal_write_reg_fields(IPA_CLKON_CFG, clkon_cfg);
+
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+ return 0;
+}
+
+/**
* ipa3_cfg_route() - configure IPA route
* @route: IPA route
*
@@ -838,7 +860,7 @@ int ipa3_cfg_route(struct ipahal_reg_route *route)
*/
int ipa3_cfg_filter(u32 disable)
{
- IPAERR("Filter disable is not supported!\n");
+ IPAERR_RL("Filter disable is not supported!\n");
return -EPERM;
}
@@ -928,7 +950,7 @@ int ipa3_get_ep_mapping(enum ipa_client_type client)
int ipa_ep_idx;
if (client >= IPA_CLIENT_MAX || client < 0) {
- IPAERR("Bad client number! client =%d\n", client);
+ IPAERR_RL("Bad client number! client =%d\n", client);
return IPA_EP_NOT_ALLOCATED;
}
@@ -2001,19 +2023,19 @@ int ipa3_write_qmap_id(struct ipa_ioc_write_qmapid *param_in)
int result = -EINVAL;
if (param_in->client >= IPA_CLIENT_MAX) {
- IPAERR("bad parm client:%d\n", param_in->client);
+ IPAERR_RL("bad parm client:%d\n", param_in->client);
goto fail;
}
ipa_ep_idx = ipa3_get_ep_mapping(param_in->client);
if (ipa_ep_idx == -1) {
- IPAERR("Invalid client.\n");
+ IPAERR_RL("Invalid client.\n");
goto fail;
}
ep = &ipa3_ctx->ep[ipa_ep_idx];
if (!ep->valid) {
- IPAERR("EP not allocated.\n");
+ IPAERR_RL("EP not allocated.\n");
goto fail;
}
@@ -2026,7 +2048,7 @@ int ipa3_write_qmap_id(struct ipa_ioc_write_qmapid *param_in)
ipa3_ctx->ep[ipa_ep_idx].cfg.meta = meta;
result = ipa3_write_qmapid_wdi_pipe(ipa_ep_idx, meta.qmap_id);
if (result)
- IPAERR("qmap_id %d write failed on ep=%d\n",
+ IPAERR_RL("qmap_id %d write failed on ep=%d\n",
meta.qmap_id, ipa_ep_idx);
result = 0;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
index 585f9e6bd492..0db9f30181a7 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
@@ -19,6 +19,7 @@
#include "ipahal_reg_i.h"
static const char *ipareg_name_to_str[IPA_REG_MAX] = {
+ __stringify(IPA_CLKON_CFG),
__stringify(IPA_ROUTE),
__stringify(IPA_IRQ_STTS_EE_n),
__stringify(IPA_IRQ_EN_EE_n),
@@ -858,6 +859,18 @@ static void ipareg_construct_endp_init_hdr_n(enum ipahal_reg_name reg,
IPA_ENDP_INIT_HDR_n_HDR_LEN_BMSK);
}
+static void ipareg_construct_clkon_cfg(enum ipahal_reg_name reg,
+ const void *fields, u32 *val)
+{
+ struct ipahal_reg_clkon_cfg *clkon_cfg;
+
+ clkon_cfg = (struct ipahal_reg_clkon_cfg *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, clkon_cfg->cgc_open_misc,
+ IPA_CLKON_CFG_CGC_OPEN_MISC_SHFT,
+ IPA_CLKON_CFG_CGC_OPEN_MISC_BMSK);
+}
+
static void ipareg_construct_route(enum ipahal_reg_name reg,
const void *fields, u32 *val)
{
@@ -1159,6 +1172,9 @@ static struct ipahal_reg_obj ipahal_reg_objs[IPA_HW_MAX][IPA_REG_MAX] = {
/* IPAv3.1 */
+ [IPA_HW_v3_1][IPA_CLKON_CFG] = {
+ ipareg_construct_clkon_cfg, ipareg_parse_dummy,
+ 0x00000044, 0},
[IPA_HW_v3_1][IPA_IRQ_SUSPEND_INFO_EE_n] = {
ipareg_construct_dummy, ipareg_parse_dummy,
0x00003030, 0x1000},
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
index c37415f13380..4db09475d5e2 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
@@ -22,6 +22,7 @@
* array as well.
*/
enum ipahal_reg_name {
+ IPA_CLKON_CFG,
IPA_ROUTE,
IPA_IRQ_STTS_EE_n,
IPA_IRQ_EN_EE_n,
@@ -90,6 +91,14 @@ enum ipahal_reg_name {
};
/*
+ * struct ipahal_reg_clkon_cfg - IPA clock on configuration register
+ * @cgc_open_misc: clock gating needs for MISC
+ */
+struct ipahal_reg_clkon_cfg {
+ u32 cgc_open_misc;
+};
+
+/*
* struct ipahal_reg_route - IPA route register
* @route_dis: route disable
* @route_def_pipe: route default pipe
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h
index ac97e5ac0494..37458c4d0697 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h
@@ -21,6 +21,9 @@ int ipahal_reg_init(enum ipa_hw_type ipa_hw_type);
#define IPA_GETFIELD_FROM_REG(reg, shift, mask) \
(((reg) & (mask)) >> (shift))
+/* IPA_CLKON_CFG register */
+#define IPA_CLKON_CFG_CGC_OPEN_MISC_SHFT 0x3
+#define IPA_CLKON_CFG_CGC_OPEN_MISC_BMSK 0x8
/* IPA_ROUTE register */
#define IPA_ROUTE_ROUTE_DIS_SHFT 0x0
diff --git a/drivers/platform/msm/mhi_uci/mhi_uci.c b/drivers/platform/msm/mhi_uci/mhi_uci.c
index 4563810d7e5b..5b50666d30a2 100644
--- a/drivers/platform/msm/mhi_uci/mhi_uci.c
+++ b/drivers/platform/msm/mhi_uci/mhi_uci.c
@@ -326,73 +326,6 @@ static int mhi_init_inbound(struct uci_client *client_handle)
return ret_val;
}
-static int mhi_uci_send_packet(struct mhi_client_handle **client_handle,
- void *buf,
- u32 size)
-{
- u32 nr_avail_trbs = 0;
- u32 i = 0;
- void *data_loc = NULL;
- unsigned long memcpy_result = 0;
- int data_left_to_insert = 0;
- size_t data_to_insert_now = 0;
- u32 data_inserted_so_far = 0;
- int ret_val = 0;
- struct uci_client *uci_handle;
- struct uci_buf *uci_buf;
-
- uci_handle = container_of(client_handle, struct uci_client,
- out_attr.mhi_handle);
-
- nr_avail_trbs = atomic_read(&uci_handle->out_attr.avail_pkts);
- data_left_to_insert = size;
-
- for (i = 0; i < nr_avail_trbs; ++i) {
- data_to_insert_now = min_t(size_t, data_left_to_insert,
- uci_handle->out_attr.max_packet_size);
- data_loc = kmalloc(data_to_insert_now + sizeof(*uci_buf),
- GFP_KERNEL);
- if (!data_loc) {
- uci_log(uci_handle->uci_ipc_log, UCI_DBG_VERBOSE,
- "Failed to allocate memory 0x%zx\n",
- data_to_insert_now);
- return -ENOMEM;
- }
- uci_buf = data_loc + data_to_insert_now;
- uci_buf->data = data_loc;
- uci_buf->pkt_id = uci_handle->out_attr.pkt_count++;
- memcpy_result = copy_from_user(uci_buf->data,
- buf + data_inserted_so_far,
- data_to_insert_now);
- if (memcpy_result)
- goto error_xfer;
-
- uci_log(uci_handle->uci_ipc_log, UCI_DBG_VERBOSE,
- "At trb i = %d/%d, size = %lu, id %llu chan %d\n",
- i, nr_avail_trbs, data_to_insert_now, uci_buf->pkt_id,
- uci_handle->out_attr.chan_id);
- ret_val = mhi_queue_xfer(*client_handle, uci_buf->data,
- data_to_insert_now, MHI_EOT);
- if (ret_val) {
- goto error_xfer;
- } else {
- data_left_to_insert -= data_to_insert_now;
- data_inserted_so_far += data_to_insert_now;
- atomic_inc(&uci_handle->out_pkt_pend_ack);
- atomic_dec(&uci_handle->out_attr.avail_pkts);
- list_add_tail(&uci_buf->node,
- &uci_handle->out_attr.buf_head);
- }
- if (!data_left_to_insert)
- break;
- }
- return data_inserted_so_far;
-
-error_xfer:
- kfree(uci_buf->data);
- return data_inserted_so_far;
-}
-
static int mhi_uci_send_status_cmd(struct uci_client *client)
{
void *buf = NULL;
@@ -963,18 +896,11 @@ static ssize_t mhi_uci_client_write(struct file *file,
goto sys_interrupt;
}
- while (bytes_transferrd != count) {
- ret_val = mhi_uci_send_packet(&chan_attr->mhi_handle,
- (void *)buf, count);
- if (ret_val < 0)
- goto sys_interrupt;
+ while (count) {
+ size_t xfer_size;
+ void *data_loc = NULL;
+ struct uci_buf *uci_buf;
- bytes_transferrd += ret_val;
- if (bytes_transferrd == count)
- break;
- uci_log(uci_handle->uci_ipc_log, UCI_DBG_VERBOSE,
- "No descriptors available, did we poll, chan %d?\n",
- chan);
mutex_unlock(&chan_attr->chan_lock);
ret_val = wait_event_interruptible(chan_attr->wq,
(atomic_read(&chan_attr->avail_pkts) ||
@@ -991,6 +917,37 @@ static ssize_t mhi_uci_client_write(struct file *file,
ret_val = -ERESTARTSYS;
goto sys_interrupt;
}
+
+ xfer_size = min_t(size_t, count, chan_attr->max_packet_size);
+ data_loc = kmalloc(xfer_size + sizeof(*uci_buf), GFP_KERNEL);
+ if (!data_loc) {
+ uci_log(uci_handle->uci_ipc_log, UCI_DBG_VERBOSE,
+ "Failed to allocate memory %lu\n", xfer_size);
+ ret_val = -ENOMEM;
+ goto sys_interrupt;
+ }
+
+ uci_buf = data_loc + xfer_size;
+ uci_buf->data = data_loc;
+ uci_buf->pkt_id = uci_handle->out_attr.pkt_count++;
+ ret_val = copy_from_user(uci_buf->data, buf, xfer_size);
+ if (unlikely(ret_val)) {
+ kfree(uci_buf->data);
+ goto sys_interrupt;
+ }
+ ret_val = mhi_queue_xfer(chan_attr->mhi_handle, uci_buf->data,
+ xfer_size, MHI_EOT);
+ if (unlikely(ret_val)) {
+ kfree(uci_buf->data);
+ goto sys_interrupt;
+ }
+
+ bytes_transferrd += xfer_size;
+ count -= xfer_size;
+ buf += xfer_size;
+ atomic_inc(&uci_handle->out_pkt_pend_ack);
+ atomic_dec(&uci_handle->out_attr.avail_pkts);
+ list_add_tail(&uci_buf->node, &uci_handle->out_attr.buf_head);
}
mutex_unlock(&chan_attr->chan_lock);
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index be3bc2f4edd4..09cc64b3b695 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -807,6 +807,7 @@ static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data)
case 11:
case 7:
case 6:
+ case 1:
ideapad_input_report(priv, vpc_bit);
break;
case 5:
diff --git a/drivers/power/qcom/msm-core.c b/drivers/power/qcom/msm-core.c
index 43ad33ea234b..825c27e7a4c1 100644
--- a/drivers/power/qcom/msm-core.c
+++ b/drivers/power/qcom/msm-core.c
@@ -190,10 +190,12 @@ static void core_temp_notify(enum thermal_trip_type type,
struct cpu_activity_info *cpu_node =
(struct cpu_activity_info *) data;
+ temp /= scaling_factor;
+
trace_temp_notification(cpu_node->sensor_id,
type, temp, cpu_node->temp);
- cpu_node->temp = temp / scaling_factor;
+ cpu_node->temp = temp;
complete(&sampling_completion);
}
diff --git a/drivers/power/reset/msm-poweroff.c b/drivers/power/reset/msm-poweroff.c
index e17c65c1e4e7..209263ccced7 100644
--- a/drivers/power/reset/msm-poweroff.c
+++ b/drivers/power/reset/msm-poweroff.c
@@ -389,6 +389,7 @@ static void do_msm_restart(enum reboot_mode reboot_mode, const char *cmd)
msm_trigger_wdog_bite();
#endif
+ scm_disable_sdi();
halt_spmi_pmic_arbiter();
deassert_ps_hold();
diff --git a/drivers/power/supply/qcom/Makefile b/drivers/power/supply/qcom/Makefile
index de6c86984e3f..87ab2b24175f 100644
--- a/drivers/power/supply/qcom/Makefile
+++ b/drivers/power/supply/qcom/Makefile
@@ -4,6 +4,6 @@ obj-$(CONFIG_SMB1351_USB_CHARGER) += battery.o smb1351-charger.o pmic-voter.o
obj-$(CONFIG_MSM_BCL_CTL) += msm_bcl.o
obj-$(CONFIG_MSM_BCL_PERIPHERAL_CTL) += bcl_peripheral.o
obj-$(CONFIG_BATTERY_BCL) += battery_current_limit.o
-obj-$(CONFIG_QPNP_SMB2) += battery.o qpnp-smb2.o smb-lib.o pmic-voter.o storm-watch.o
+obj-$(CONFIG_QPNP_SMB2) += step-chg-jeita.o battery.o qpnp-smb2.o smb-lib.o pmic-voter.o storm-watch.o
obj-$(CONFIG_SMB138X_CHARGER) += battery.o smb138x-charger.o smb-lib.o pmic-voter.o storm-watch.o
obj-$(CONFIG_QPNP_QNOVO) += battery.o qpnp-qnovo.o
diff --git a/drivers/power/supply/qcom/qpnp-fg-gen3.c b/drivers/power/supply/qcom/qpnp-fg-gen3.c
index e03681ec13cc..265f98288745 100644
--- a/drivers/power/supply/qcom/qpnp-fg-gen3.c
+++ b/drivers/power/supply/qcom/qpnp-fg-gen3.c
@@ -1611,7 +1611,7 @@ static int fg_set_recharge_voltage(struct fg_chip *chip, int voltage_mv)
static int fg_charge_full_update(struct fg_chip *chip)
{
union power_supply_propval prop = {0, };
- int rc, msoc, bsoc, recharge_soc;
+ int rc, msoc, bsoc, recharge_soc, msoc_raw;
u8 full_soc[2] = {0xFF, 0xFF};
if (!chip->dt.hold_soc_while_full)
@@ -1647,6 +1647,7 @@ static int fg_charge_full_update(struct fg_chip *chip)
pr_err("Error in getting msoc, rc=%d\n", rc);
goto out;
}
+ msoc_raw = DIV_ROUND_CLOSEST(msoc * FULL_SOC_RAW, FULL_CAPACITY);
fg_dbg(chip, FG_STATUS, "msoc: %d bsoc: %x health: %d status: %d full: %d\n",
msoc, bsoc, chip->health, chip->charge_status,
@@ -1670,7 +1671,7 @@ static int fg_charge_full_update(struct fg_chip *chip)
fg_dbg(chip, FG_STATUS, "Terminated charging @ SOC%d\n",
msoc);
}
- } else if ((bsoc >> 8) <= recharge_soc && chip->charge_full) {
+ } else if (msoc_raw < recharge_soc && chip->charge_full) {
chip->delta_soc = FULL_CAPACITY - msoc;
/*
@@ -1700,8 +1701,8 @@ static int fg_charge_full_update(struct fg_chip *chip)
rc);
goto out;
}
- fg_dbg(chip, FG_STATUS, "bsoc: %d recharge_soc: %d delta_soc: %d\n",
- bsoc >> 8, recharge_soc, chip->delta_soc);
+ fg_dbg(chip, FG_STATUS, "msoc_raw = %d bsoc: %d recharge_soc: %d delta_soc: %d\n",
+ msoc_raw, bsoc >> 8, recharge_soc, chip->delta_soc);
} else {
goto out;
}
diff --git a/drivers/power/supply/qcom/qpnp-smb2.c b/drivers/power/supply/qcom/qpnp-smb2.c
index 0908eea3b186..630d02eb7a67 100644
--- a/drivers/power/supply/qcom/qpnp-smb2.c
+++ b/drivers/power/supply/qcom/qpnp-smb2.c
@@ -19,6 +19,7 @@
#include <linux/power_supply.h>
#include <linux/of.h>
#include <linux/of_irq.h>
+#include <linux/log2.h>
#include <linux/qpnp/qpnp-revid.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/of_regulator.h>
@@ -122,87 +123,6 @@ static struct smb_params v1_params = {
.max_u = 1575000,
.step_u = 25000,
},
- .step_soc_threshold[0] = {
- .name = "step charge soc threshold 1",
- .reg = STEP_CHG_SOC_OR_BATT_V_TH1_REG,
- .min_u = 0,
- .max_u = 100,
- .step_u = 1,
- },
- .step_soc_threshold[1] = {
- .name = "step charge soc threshold 2",
- .reg = STEP_CHG_SOC_OR_BATT_V_TH2_REG,
- .min_u = 0,
- .max_u = 100,
- .step_u = 1,
- },
- .step_soc_threshold[2] = {
- .name = "step charge soc threshold 3",
- .reg = STEP_CHG_SOC_OR_BATT_V_TH3_REG,
- .min_u = 0,
- .max_u = 100,
- .step_u = 1,
- },
- .step_soc_threshold[3] = {
- .name = "step charge soc threshold 4",
- .reg = STEP_CHG_SOC_OR_BATT_V_TH4_REG,
- .min_u = 0,
- .max_u = 100,
- .step_u = 1,
- },
- .step_soc = {
- .name = "step charge soc",
- .reg = STEP_CHG_SOC_VBATT_V_REG,
- .min_u = 0,
- .max_u = 100,
- .step_u = 1,
- .set_proc = smblib_mapping_soc_from_field_value,
- },
- .step_cc_delta[0] = {
- .name = "step charge current delta 1",
- .reg = STEP_CHG_CURRENT_DELTA1_REG,
- .min_u = 100000,
- .max_u = 3200000,
- .step_u = 100000,
- .get_proc = smblib_mapping_cc_delta_to_field_value,
- .set_proc = smblib_mapping_cc_delta_from_field_value,
- },
- .step_cc_delta[1] = {
- .name = "step charge current delta 2",
- .reg = STEP_CHG_CURRENT_DELTA2_REG,
- .min_u = 100000,
- .max_u = 3200000,
- .step_u = 100000,
- .get_proc = smblib_mapping_cc_delta_to_field_value,
- .set_proc = smblib_mapping_cc_delta_from_field_value,
- },
- .step_cc_delta[2] = {
- .name = "step charge current delta 3",
- .reg = STEP_CHG_CURRENT_DELTA3_REG,
- .min_u = 100000,
- .max_u = 3200000,
- .step_u = 100000,
- .get_proc = smblib_mapping_cc_delta_to_field_value,
- .set_proc = smblib_mapping_cc_delta_from_field_value,
- },
- .step_cc_delta[3] = {
- .name = "step charge current delta 4",
- .reg = STEP_CHG_CURRENT_DELTA4_REG,
- .min_u = 100000,
- .max_u = 3200000,
- .step_u = 100000,
- .get_proc = smblib_mapping_cc_delta_to_field_value,
- .set_proc = smblib_mapping_cc_delta_from_field_value,
- },
- .step_cc_delta[4] = {
- .name = "step charge current delta 5",
- .reg = STEP_CHG_CURRENT_DELTA5_REG,
- .min_u = 100000,
- .max_u = 3200000,
- .step_u = 100000,
- .get_proc = smblib_mapping_cc_delta_to_field_value,
- .set_proc = smblib_mapping_cc_delta_from_field_value,
- },
.freq_buck = {
.name = "buck switching frequency",
.reg = CFG_BUCKBOOST_FREQ_SELECT_BUCK_REG,
@@ -236,7 +156,6 @@ static struct smb_params pm660_params = {
},
};
-#define STEP_CHARGING_MAX_STEPS 5
struct smb_dt_props {
int usb_icl_ua;
int dc_icl_ua;
@@ -244,14 +163,13 @@ struct smb_dt_props {
int wipower_max_uw;
int min_freq_khz;
int max_freq_khz;
- u32 step_soc_threshold[STEP_CHARGING_MAX_STEPS - 1];
- s32 step_cc_delta[STEP_CHARGING_MAX_STEPS];
struct device_node *revid_dev_node;
int float_option;
int chg_inhibit_thr_mv;
bool no_battery;
bool hvdcp_disable;
bool auto_recharge_soc;
+ int wd_bark_time;
};
struct smb2 {
@@ -273,6 +191,11 @@ module_param_named(
#define MICRO_1P5A 1500000
#define MICRO_P1A 100000
#define OTG_DEFAULT_DEGLITCH_TIME_MS 50
+#define MIN_WD_BARK_TIME 16
+#define DEFAULT_WD_BARK_TIME 64
+#define BITE_WDOG_TIMEOUT_8S 0x3
+#define BARK_WDOG_TIMEOUT_MASK GENMASK(3, 2)
+#define BARK_WDOG_TIMEOUT_SHIFT 2
static int smb2_parse_dt(struct smb2 *chip)
{
struct smb_charger *chg = &chip->chg;
@@ -284,27 +207,13 @@ static int smb2_parse_dt(struct smb2 *chip)
return -EINVAL;
}
- chg->step_chg_enabled = true;
-
- if (of_property_count_u32_elems(node, "qcom,step-soc-thresholds")
- != STEP_CHARGING_MAX_STEPS - 1)
- chg->step_chg_enabled = false;
-
- rc = of_property_read_u32_array(node, "qcom,step-soc-thresholds",
- chip->dt.step_soc_threshold,
- STEP_CHARGING_MAX_STEPS - 1);
- if (rc < 0)
- chg->step_chg_enabled = false;
+ chg->step_chg_enabled = of_property_read_bool(node,
+ "qcom,step-charging-enable");
- if (of_property_count_u32_elems(node, "qcom,step-current-deltas")
- != STEP_CHARGING_MAX_STEPS)
- chg->step_chg_enabled = false;
-
- rc = of_property_read_u32_array(node, "qcom,step-current-deltas",
- chip->dt.step_cc_delta,
- STEP_CHARGING_MAX_STEPS);
- if (rc < 0)
- chg->step_chg_enabled = false;
+ rc = of_property_read_u32(node, "qcom,wd-bark-time-secs",
+ &chip->dt.wd_bark_time);
+ if (rc < 0 || chip->dt.wd_bark_time < MIN_WD_BARK_TIME)
+ chip->dt.wd_bark_time = DEFAULT_WD_BARK_TIME;
chip->dt.no_battery = of_property_read_bool(node,
"qcom,batteryless-platform");
@@ -989,7 +898,6 @@ static enum power_supply_property smb2_batt_props[] = {
POWER_SUPPLY_PROP_TEMP,
POWER_SUPPLY_PROP_TECHNOLOGY,
POWER_SUPPLY_PROP_STEP_CHARGING_ENABLED,
- POWER_SUPPLY_PROP_STEP_CHARGING_STEP,
POWER_SUPPLY_PROP_CHARGE_DONE,
POWER_SUPPLY_PROP_PARALLEL_DISABLE,
POWER_SUPPLY_PROP_SET_SHIP_MODE,
@@ -1047,9 +955,6 @@ static int smb2_batt_get_prop(struct power_supply *psy,
case POWER_SUPPLY_PROP_STEP_CHARGING_ENABLED:
val->intval = chg->step_chg_enabled;
break;
- case POWER_SUPPLY_PROP_STEP_CHARGING_STEP:
- rc = smblib_get_prop_step_chg_step(chg, val);
- break;
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
rc = smblib_get_prop_batt_voltage_now(chg, val);
break;
@@ -1163,6 +1068,9 @@ static int smb2_batt_set_prop(struct power_supply *psy,
vote(chg->fcc_votable, BATT_PROFILE_VOTER, false, 0);
}
break;
+ case POWER_SUPPLY_PROP_STEP_CHARGING_ENABLED:
+ chg->step_chg_enabled = !!val->intval;
+ break;
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
chg->batt_profile_fcc_ua = val->intval;
vote(chg->fcc_votable, BATT_PROFILE_VOTER, true, val->intval);
@@ -1203,6 +1111,7 @@ static int smb2_batt_prop_is_writeable(struct power_supply *psy,
case POWER_SUPPLY_PROP_DP_DM:
case POWER_SUPPLY_PROP_RERUN_AICL:
case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED:
+ case POWER_SUPPLY_PROP_STEP_CHARGING_ENABLED:
return 1;
default:
break;
@@ -1330,73 +1239,6 @@ static int smb2_init_vconn_regulator(struct smb2 *chip)
/***************************
* HARDWARE INITIALIZATION *
***************************/
-static int smb2_config_step_charging(struct smb2 *chip)
-{
- struct smb_charger *chg = &chip->chg;
- int rc = 0;
- int i;
-
- if (!chg->step_chg_enabled)
- return rc;
-
- for (i = 0; i < STEP_CHARGING_MAX_STEPS - 1; i++) {
- rc = smblib_set_charge_param(chg,
- &chg->param.step_soc_threshold[i],
- chip->dt.step_soc_threshold[i]);
- if (rc < 0) {
- pr_err("Couldn't configure soc thresholds rc = %d\n",
- rc);
- goto err_out;
- }
- }
-
- for (i = 0; i < STEP_CHARGING_MAX_STEPS; i++) {
- rc = smblib_set_charge_param(chg, &chg->param.step_cc_delta[i],
- chip->dt.step_cc_delta[i]);
- if (rc < 0) {
- pr_err("Couldn't configure cc delta rc = %d\n",
- rc);
- goto err_out;
- }
- }
-
- rc = smblib_write(chg, STEP_CHG_UPDATE_REQUEST_TIMEOUT_CFG_REG,
- STEP_CHG_UPDATE_REQUEST_TIMEOUT_40S);
- if (rc < 0) {
- dev_err(chg->dev,
- "Couldn't configure soc request timeout reg rc=%d\n",
- rc);
- goto err_out;
- }
-
- rc = smblib_write(chg, STEP_CHG_UPDATE_FAIL_TIMEOUT_CFG_REG,
- STEP_CHG_UPDATE_FAIL_TIMEOUT_120S);
- if (rc < 0) {
- dev_err(chg->dev,
- "Couldn't configure soc fail timeout reg rc=%d\n",
- rc);
- goto err_out;
- }
-
- /*
- * enable step charging, source soc, standard mode, go to final
- * state in case of failure.
- */
- rc = smblib_write(chg, CHGR_STEP_CHG_MODE_CFG_REG,
- STEP_CHARGING_ENABLE_BIT |
- STEP_CHARGING_SOURCE_SELECT_BIT |
- STEP_CHARGING_SOC_FAIL_OPTION_BIT);
- if (rc < 0) {
- dev_err(chg->dev, "Couldn't configure charger rc=%d\n", rc);
- goto err_out;
- }
-
- return 0;
-err_out:
- chg->step_chg_enabled = false;
- return rc;
-}
-
static int smb2_config_wipower_input_power(struct smb2 *chip, int uw)
{
int rc;
@@ -1572,7 +1414,7 @@ static int smb2_init_hw(struct smb2 *chip)
{
struct smb_charger *chg = &chip->chg;
int rc;
- u8 stat;
+ u8 stat, val;
if (chip->dt.no_battery)
chg->fake_capacity = 50;
@@ -1720,11 +1562,27 @@ static int smb2_init_hw(struct smb2 *chip)
return rc;
}
- /* configure step charging */
- rc = smb2_config_step_charging(chip);
- if (rc < 0) {
- dev_err(chg->dev, "Couldn't configure step charging rc=%d\n",
- rc);
+ val = (ilog2(chip->dt.wd_bark_time / 16) << BARK_WDOG_TIMEOUT_SHIFT) &
+ BARK_WDOG_TIMEOUT_MASK;
+ val |= BITE_WDOG_TIMEOUT_8S;
+ rc = smblib_masked_write(chg, SNARL_BARK_BITE_WD_CFG_REG,
+ BITE_WDOG_DISABLE_CHARGING_CFG_BIT |
+ BARK_WDOG_TIMEOUT_MASK | BITE_WDOG_TIMEOUT_MASK,
+ val);
+ if (rc) {
+ pr_err("Couldn't configue WD config rc=%d\n", rc);
+ return rc;
+ }
+
+ /* enable WD BARK and enable it on plugin */
+ rc = smblib_masked_write(chg, WD_CFG_REG,
+ WATCHDOG_TRIGGER_AFP_EN_BIT |
+ WDOG_TIMER_EN_ON_PLUGIN_BIT |
+ BARK_WDOG_INT_EN_BIT,
+ WDOG_TIMER_EN_ON_PLUGIN_BIT |
+ BARK_WDOG_INT_EN_BIT);
+ if (rc) {
+ pr_err("Couldn't configue WD config rc=%d\n", rc);
return rc;
}
@@ -1783,6 +1641,13 @@ static int smb2_init_hw(struct smb2 *chip)
return rc;
}
+ rc = smblib_read(chg, USBIN_OPTIONS_2_CFG_REG, &chg->float_cfg);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't read float charger options rc=%d\n",
+ rc);
+ return rc;
+ }
+
switch (chip->dt.chg_inhibit_thr_mv) {
case 50:
rc = smblib_masked_write(chg, CHARGE_INHIBIT_THRESHOLD_CFG_REG,
@@ -1847,6 +1712,12 @@ static int smb2_post_init(struct smb2 *chip)
struct smb_charger *chg = &chip->chg;
int rc;
+ /* In case the usb path is suspended, we would have missed disabling
+ * the icl change interrupt because the interrupt could have been
+ * not requested
+ */
+ rerun_election(chg->usb_icl_votable);
+
/* configure power role for dual-role */
rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
TYPEC_POWER_ROLE_CMD_MASK, 0);
@@ -1938,8 +1809,8 @@ static int smb2_determine_initial_status(struct smb2 *chip)
smblib_handle_usb_source_change(0, &irq_data);
smblib_handle_chg_state_change(0, &irq_data);
smblib_handle_icl_change(0, &irq_data);
- smblib_handle_step_chg_state_change(0, &irq_data);
- smblib_handle_step_chg_soc_update_request(0, &irq_data);
+ smblib_handle_batt_temp_changed(0, &irq_data);
+ smblib_handle_wdog_bark(0, &irq_data);
return 0;
}
@@ -1961,18 +1832,15 @@ static struct smb_irq_info smb2_irqs[] = {
},
[STEP_CHG_STATE_CHANGE_IRQ] = {
.name = "step-chg-state-change",
- .handler = smblib_handle_step_chg_state_change,
- .wake = true,
+ .handler = NULL,
},
[STEP_CHG_SOC_UPDATE_FAIL_IRQ] = {
.name = "step-chg-soc-update-fail",
- .handler = smblib_handle_step_chg_soc_update_fail,
- .wake = true,
+ .handler = NULL,
},
[STEP_CHG_SOC_UPDATE_REQ_IRQ] = {
.name = "step-chg-soc-update-request",
- .handler = smblib_handle_step_chg_soc_update_request,
- .wake = true,
+ .handler = NULL,
},
/* OTG IRQs */
[OTG_FAIL_IRQ] = {
@@ -1995,6 +1863,7 @@ static struct smb_irq_info smb2_irqs[] = {
[BATT_TEMP_IRQ] = {
.name = "bat-temp",
.handler = smblib_handle_batt_temp_changed,
+ .wake = true,
},
[BATT_OCP_IRQ] = {
.name = "bat-ocp",
@@ -2090,7 +1959,8 @@ static struct smb_irq_info smb2_irqs[] = {
},
[WDOG_BARK_IRQ] = {
.name = "wdog-bark",
- .handler = NULL,
+ .handler = smblib_handle_wdog_bark,
+ .wake = true,
},
[AICL_FAIL_IRQ] = {
.name = "aicl-fail",
@@ -2196,6 +2066,8 @@ static int smb2_request_interrupts(struct smb2 *chip)
return rc;
}
}
+ if (chg->irq_info[USBIN_ICL_CHANGE_IRQ].irq)
+ chg->usb_icl_change_irq_enabled = true;
return rc;
}
@@ -2328,15 +2200,15 @@ static int smb2_probe(struct platform_device *pdev)
return rc;
}
- rc = smblib_init(chg);
+ rc = smb2_parse_dt(chip);
if (rc < 0) {
- pr_err("Smblib_init failed rc=%d\n", rc);
+ pr_err("Couldn't parse device tree rc=%d\n", rc);
goto cleanup;
}
- rc = smb2_parse_dt(chip);
+ rc = smblib_init(chg);
if (rc < 0) {
- pr_err("Couldn't parse device tree rc=%d\n", rc);
+ pr_err("Smblib_init failed rc=%d\n", rc);
goto cleanup;
}
diff --git a/drivers/power/supply/qcom/smb-lib.c b/drivers/power/supply/qcom/smb-lib.c
index 16db425514c3..0ed07aa13855 100644
--- a/drivers/power/supply/qcom/smb-lib.c
+++ b/drivers/power/supply/qcom/smb-lib.c
@@ -23,6 +23,7 @@
#include "smb-lib.h"
#include "smb-reg.h"
#include "battery.h"
+#include "step-chg-jeita.h"
#include "storm-watch.h"
#define smblib_err(chg, fmt, ...) \
@@ -102,35 +103,6 @@ unlock:
return rc;
}
-static int smblib_get_step_cc_delta(struct smb_charger *chg, int *cc_delta_ua)
-{
- int rc, step_state;
- u8 stat;
-
- if (!chg->step_chg_enabled) {
- *cc_delta_ua = 0;
- return 0;
- }
-
- rc = smblib_read(chg, BATTERY_CHARGER_STATUS_1_REG, &stat);
- if (rc < 0) {
- smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_1 rc=%d\n",
- rc);
- return rc;
- }
-
- step_state = (stat & STEP_CHARGING_STATUS_MASK) >>
- STEP_CHARGING_STATUS_SHIFT;
- rc = smblib_get_charge_param(chg, &chg->param.step_cc_delta[step_state],
- cc_delta_ua);
- if (rc < 0) {
- smblib_err(chg, "Couldn't get step cc delta rc=%d\n", rc);
- return rc;
- }
-
- return 0;
-}
-
static int smblib_get_jeita_cc_delta(struct smb_charger *chg, int *cc_delta_ua)
{
int rc, cc_minus_ua;
@@ -149,7 +121,7 @@ static int smblib_get_jeita_cc_delta(struct smb_charger *chg, int *cc_delta_ua)
}
rc = smblib_get_charge_param(chg, &chg->param.jeita_cc_comp,
- &cc_minus_ua);
+ &cc_minus_ua);
if (rc < 0) {
smblib_err(chg, "Couldn't get jeita cc minus rc=%d\n", rc);
return rc;
@@ -402,38 +374,31 @@ int smblib_set_charge_param(struct smb_charger *chg,
return rc;
}
-static int step_charge_soc_update(struct smb_charger *chg, int capacity)
+int smblib_set_usb_suspend(struct smb_charger *chg, bool suspend)
{
int rc = 0;
+ int irq = chg->irq_info[USBIN_ICL_CHANGE_IRQ].irq;
- rc = smblib_set_charge_param(chg, &chg->param.step_soc, capacity);
- if (rc < 0) {
- smblib_err(chg, "Error in updating soc, rc=%d\n", rc);
- return rc;
- }
-
- rc = smblib_write(chg, STEP_CHG_SOC_VBATT_V_UPDATE_REG,
- STEP_CHG_SOC_VBATT_V_UPDATE_BIT);
- if (rc < 0) {
- smblib_err(chg,
- "Couldn't set STEP_CHG_SOC_VBATT_V_UPDATE_REG rc=%d\n",
- rc);
- return rc;
+ if (suspend && irq) {
+ if (chg->usb_icl_change_irq_enabled) {
+ disable_irq_nosync(irq);
+ chg->usb_icl_change_irq_enabled = false;
+ }
}
- return rc;
-}
-
-int smblib_set_usb_suspend(struct smb_charger *chg, bool suspend)
-{
- int rc = 0;
-
rc = smblib_masked_write(chg, USBIN_CMD_IL_REG, USBIN_SUSPEND_BIT,
suspend ? USBIN_SUSPEND_BIT : 0);
if (rc < 0)
smblib_err(chg, "Couldn't write %s to USBIN_SUSPEND_BIT rc=%d\n",
suspend ? "suspend" : "resume", rc);
+ if (!suspend && irq) {
+ if (!chg->usb_icl_change_irq_enabled) {
+ enable_irq(irq);
+ chg->usb_icl_change_irq_enabled = true;
+ }
+ }
+
return rc;
}
@@ -523,6 +488,45 @@ static int smblib_set_usb_pd_allowed_voltage(struct smb_charger *chg,
/********************
* HELPER FUNCTIONS *
********************/
+static int smblib_request_dpdm(struct smb_charger *chg, bool enable)
+{
+ int rc = 0;
+
+ /* fetch the DPDM regulator */
+ if (!chg->dpdm_reg && of_get_property(chg->dev->of_node,
+ "dpdm-supply", NULL)) {
+ chg->dpdm_reg = devm_regulator_get(chg->dev, "dpdm");
+ if (IS_ERR(chg->dpdm_reg)) {
+ rc = PTR_ERR(chg->dpdm_reg);
+ smblib_err(chg, "Couldn't get dpdm regulator rc=%d\n",
+ rc);
+ chg->dpdm_reg = NULL;
+ return rc;
+ }
+ }
+
+ if (enable) {
+ if (chg->dpdm_reg && !regulator_is_enabled(chg->dpdm_reg)) {
+ smblib_dbg(chg, PR_MISC, "enabling DPDM regulator\n");
+ rc = regulator_enable(chg->dpdm_reg);
+ if (rc < 0)
+ smblib_err(chg,
+ "Couldn't enable dpdm regulator rc=%d\n",
+ rc);
+ }
+ } else {
+ if (chg->dpdm_reg && regulator_is_enabled(chg->dpdm_reg)) {
+ smblib_dbg(chg, PR_MISC, "disabling DPDM regulator\n");
+ rc = regulator_disable(chg->dpdm_reg);
+ if (rc < 0)
+ smblib_err(chg,
+ "Couldn't disable dpdm regulator rc=%d\n",
+ rc);
+ }
+ }
+
+ return rc;
+}
static void smblib_rerun_apsd(struct smb_charger *chg)
{
@@ -549,10 +553,17 @@ static const struct apsd_result *smblib_update_usb_type(struct smb_charger *chg)
const struct apsd_result *apsd_result = smblib_get_apsd_result(chg);
/* if PD is active, APSD is disabled so won't have a valid result */
- if (chg->pd_active)
+ if (chg->pd_active) {
chg->real_charger_type = POWER_SUPPLY_TYPE_USB_PD;
- else
+ } else {
+ /*
+ * Update real charger type only if its not FLOAT
+ * detected as as SDP
+ */
+ if (!(apsd_result->pst == POWER_SUPPLY_TYPE_USB_FLOAT &&
+ chg->real_charger_type == POWER_SUPPLY_TYPE_USB))
chg->real_charger_type = apsd_result->pst;
+ }
smblib_dbg(chg, PR_MISC, "APSD=%s PD=%d\n",
apsd_result->name, chg->pd_active);
@@ -635,13 +646,9 @@ static void smblib_uusb_removal(struct smb_charger *chg)
cancel_delayed_work_sync(&chg->pl_enable_work);
- if (chg->dpdm_reg && regulator_is_enabled(chg->dpdm_reg)) {
- smblib_dbg(chg, PR_MISC, "disabling DPDM regulator\n");
- rc = regulator_disable(chg->dpdm_reg);
- if (rc < 0)
- smblib_err(chg, "Couldn't disable dpdm regulator rc=%d\n",
- rc);
- }
+ rc = smblib_request_dpdm(chg, false);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't to disable DPDM rc=%d\n", rc);
if (chg->wa_flags & BOOST_BACK_WA) {
data = chg->irq_info[SWITCH_POWER_OK_IRQ].irq_data;
@@ -733,24 +740,9 @@ int smblib_rerun_apsd_if_required(struct smb_charger *chg)
if (!val.intval)
return 0;
- /* fetch the DPDM regulator */
- if (!chg->dpdm_reg && of_get_property(chg->dev->of_node,
- "dpdm-supply", NULL)) {
- chg->dpdm_reg = devm_regulator_get(chg->dev, "dpdm");
- if (IS_ERR(chg->dpdm_reg)) {
- smblib_err(chg, "Couldn't get dpdm regulator rc=%ld\n",
- PTR_ERR(chg->dpdm_reg));
- chg->dpdm_reg = NULL;
- }
- }
-
- if (chg->dpdm_reg && !regulator_is_enabled(chg->dpdm_reg)) {
- smblib_dbg(chg, PR_MISC, "enabling DPDM regulator\n");
- rc = regulator_enable(chg->dpdm_reg);
- if (rc < 0)
- smblib_err(chg, "Couldn't enable dpdm regulator rc=%d\n",
- rc);
- }
+ rc = smblib_request_dpdm(chg, true);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't to enable DPDM rc=%d\n", rc);
chg->uusb_apsd_rerun_done = true;
smblib_rerun_apsd(chg);
@@ -820,6 +812,7 @@ static int set_sdp_current(struct smb_charger *chg, int icl_ua)
{
int rc;
u8 icl_options;
+ const struct apsd_result *apsd_result = smblib_get_apsd_result(chg);
/* power source is SDP */
switch (icl_ua) {
@@ -844,6 +837,21 @@ static int set_sdp_current(struct smb_charger *chg, int icl_ua)
return -EINVAL;
}
+ if (chg->real_charger_type == POWER_SUPPLY_TYPE_USB &&
+ apsd_result->pst == POWER_SUPPLY_TYPE_USB_FLOAT) {
+ /*
+ * change the float charger configuration to SDP, if this
+ * is the case of SDP being detected as FLOAT
+ */
+ rc = smblib_masked_write(chg, USBIN_OPTIONS_2_CFG_REG,
+ FORCE_FLOAT_SDP_CFG_BIT, FORCE_FLOAT_SDP_CFG_BIT);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't set float ICL options rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
rc = smblib_masked_write(chg, USBIN_ICL_OPTIONS_REG,
CFG_USB3P0_SEL_BIT | USB51_MODE_BIT, icl_options);
if (rc < 0) {
@@ -885,7 +893,6 @@ int smblib_set_icl_current(struct smb_charger *chg, int icl_ua)
if (icl_ua < USBIN_25MA)
return smblib_set_usb_suspend(chg, true);
- disable_irq_nosync(chg->irq_info[USBIN_ICL_CHANGE_IRQ].irq);
if (icl_ua == INT_MAX)
goto override_suspend_config;
@@ -943,7 +950,6 @@ override_suspend_config:
}
enable_icl_changed_interrupt:
- enable_irq(chg->irq_info[USBIN_ICL_CHANGE_IRQ].irq);
return rc;
}
@@ -1319,68 +1325,110 @@ int smblib_vconn_regulator_is_enabled(struct regulator_dev *rdev)
#define MAX_RETRY 15
#define MIN_DELAY_US 2000
#define MAX_DELAY_US 9000
-static int _smblib_vbus_regulator_enable(struct regulator_dev *rdev)
+static int otg_current[] = {250000, 500000, 1000000, 1500000};
+static int smblib_enable_otg_wa(struct smb_charger *chg)
{
- struct smb_charger *chg = rdev_get_drvdata(rdev);
- int rc, retry_count = 0, min_delay = MIN_DELAY_US;
u8 stat;
+ int rc, i, retry_count = 0, min_delay = MIN_DELAY_US;
- smblib_dbg(chg, PR_OTG, "halt 1 in 8 mode\n");
- rc = smblib_masked_write(chg, OTG_ENG_OTG_CFG_REG,
- ENG_BUCKBOOST_HALT1_8_MODE_BIT,
- ENG_BUCKBOOST_HALT1_8_MODE_BIT);
- if (rc < 0) {
- smblib_err(chg, "Couldn't set OTG_ENG_OTG_CFG_REG rc=%d\n",
- rc);
- return rc;
- }
+ for (i = 0; i < ARRAY_SIZE(otg_current); i++) {
+ smblib_dbg(chg, PR_OTG, "enabling OTG with %duA\n",
+ otg_current[i]);
+ rc = smblib_set_charge_param(chg, &chg->param.otg_cl,
+ otg_current[i]);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't set otg limit rc=%d\n", rc);
+ return rc;
+ }
- smblib_dbg(chg, PR_OTG, "enabling OTG\n");
- rc = smblib_write(chg, CMD_OTG_REG, OTG_EN_BIT);
- if (rc < 0) {
- smblib_err(chg, "Couldn't enable OTG regulator rc=%d\n", rc);
- return rc;
- }
+ rc = smblib_write(chg, CMD_OTG_REG, OTG_EN_BIT);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't enable OTG rc=%d\n", rc);
+ return rc;
+ }
- if (chg->wa_flags & OTG_WA) {
- /* check for softstart */
+ retry_count = 0;
+ min_delay = MIN_DELAY_US;
do {
usleep_range(min_delay, min_delay + 100);
rc = smblib_read(chg, OTG_STATUS_REG, &stat);
if (rc < 0) {
- smblib_err(chg,
- "Couldn't read OTG status rc=%d\n",
- rc);
+ smblib_err(chg, "Couldn't read OTG status rc=%d\n",
+ rc);
goto out;
}
if (stat & BOOST_SOFTSTART_DONE_BIT) {
rc = smblib_set_charge_param(chg,
&chg->param.otg_cl, chg->otg_cl_ua);
- if (rc < 0)
- smblib_err(chg,
- "Couldn't set otg limit\n");
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't set otg limit rc=%d\n",
+ rc);
+ goto out;
+ }
break;
}
-
/* increase the delay for following iterations */
if (retry_count > 5)
min_delay = MAX_DELAY_US;
+
} while (retry_count++ < MAX_RETRY);
if (retry_count >= MAX_RETRY) {
- smblib_dbg(chg, PR_OTG, "Boost Softstart not done\n");
- goto out;
+ smblib_dbg(chg, PR_OTG, "OTG enable failed with %duA\n",
+ otg_current[i]);
+ rc = smblib_write(chg, CMD_OTG_REG, 0);
+ if (rc < 0) {
+ smblib_err(chg, "disable OTG rc=%d\n", rc);
+ goto out;
+ }
+ } else {
+ smblib_dbg(chg, PR_OTG, "OTG enabled\n");
+ return 0;
}
}
+ if (i == ARRAY_SIZE(otg_current)) {
+ rc = -EINVAL;
+ goto out;
+ }
+
return 0;
out:
- /* disable OTG if softstart failed */
smblib_write(chg, CMD_OTG_REG, 0);
return rc;
}
+static int _smblib_vbus_regulator_enable(struct regulator_dev *rdev)
+{
+ struct smb_charger *chg = rdev_get_drvdata(rdev);
+ int rc;
+
+ smblib_dbg(chg, PR_OTG, "halt 1 in 8 mode\n");
+ rc = smblib_masked_write(chg, OTG_ENG_OTG_CFG_REG,
+ ENG_BUCKBOOST_HALT1_8_MODE_BIT,
+ ENG_BUCKBOOST_HALT1_8_MODE_BIT);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't set OTG_ENG_OTG_CFG_REG rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ smblib_dbg(chg, PR_OTG, "enabling OTG\n");
+
+ if (chg->wa_flags & OTG_WA) {
+ rc = smblib_enable_otg_wa(chg);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't enable OTG rc=%d\n", rc);
+ } else {
+ rc = smblib_write(chg, CMD_OTG_REG, OTG_EN_BIT);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't enable OTG rc=%d\n", rc);
+ }
+
+ return rc;
+}
+
int smblib_vbus_regulator_enable(struct regulator_dev *rdev)
{
struct smb_charger *chg = rdev_get_drvdata(rdev);
@@ -1749,30 +1797,6 @@ int smblib_get_prop_batt_temp(struct smb_charger *chg,
return rc;
}
-int smblib_get_prop_step_chg_step(struct smb_charger *chg,
- union power_supply_propval *val)
-{
- int rc;
- u8 stat;
-
- if (!chg->step_chg_enabled) {
- val->intval = -1;
- return 0;
- }
-
- rc = smblib_read(chg, BATTERY_CHARGER_STATUS_1_REG, &stat);
- if (rc < 0) {
- smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_1 rc=%d\n",
- rc);
- return rc;
- }
-
- val->intval = (stat & STEP_CHARGING_STATUS_MASK) >>
- STEP_CHARGING_STATUS_SHIFT;
-
- return rc;
-}
-
int smblib_get_prop_batt_charge_done(struct smb_charger *chg,
union power_supply_propval *val)
{
@@ -2421,6 +2445,31 @@ int smblib_get_prop_die_health(struct smb_charger *chg,
return 0;
}
+#define SDP_CURRENT_UA 500000
+#define CDP_CURRENT_UA 1500000
+#define DCP_CURRENT_UA 1500000
+#define HVDCP_CURRENT_UA 3000000
+#define TYPEC_DEFAULT_CURRENT_UA 900000
+#define TYPEC_MEDIUM_CURRENT_UA 1500000
+#define TYPEC_HIGH_CURRENT_UA 3000000
+static int get_rp_based_dcp_current(struct smb_charger *chg, int typec_mode)
+{
+ int rp_ua;
+
+ switch (typec_mode) {
+ case POWER_SUPPLY_TYPEC_SOURCE_HIGH:
+ rp_ua = TYPEC_HIGH_CURRENT_UA;
+ break;
+ case POWER_SUPPLY_TYPEC_SOURCE_MEDIUM:
+ case POWER_SUPPLY_TYPEC_SOURCE_DEFAULT:
+ /* fall through */
+ default:
+ rp_ua = DCP_CURRENT_UA;
+ }
+
+ return rp_ua;
+}
+
/*******************
* USB PSY SETTERS *
* *****************/
@@ -2438,14 +2487,54 @@ int smblib_set_prop_pd_current_max(struct smb_charger *chg,
return rc;
}
+static int smblib_handle_usb_current(struct smb_charger *chg,
+ int usb_current)
+{
+ int rc = 0, rp_ua, typec_mode;
+
+ if (chg->real_charger_type == POWER_SUPPLY_TYPE_USB_FLOAT) {
+ if (usb_current == -ETIMEDOUT) {
+ /*
+ * Valid FLOAT charger, report the current based
+ * of Rp
+ */
+ typec_mode = smblib_get_prop_typec_mode(chg);
+ rp_ua = get_rp_based_dcp_current(chg, typec_mode);
+ rc = vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER,
+ true, rp_ua);
+ if (rc < 0)
+ return rc;
+ } else {
+ /*
+ * FLOAT charger detected as SDP by USB driver,
+ * charge with the requested current and update the
+ * real_charger_type
+ */
+ chg->real_charger_type = POWER_SUPPLY_TYPE_USB;
+ rc = vote(chg->usb_icl_votable, USB_PSY_VOTER,
+ true, usb_current);
+ if (rc < 0)
+ return rc;
+ rc = vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER,
+ false, 0);
+ if (rc < 0)
+ return rc;
+ }
+ } else {
+ rc = vote(chg->usb_icl_votable, USB_PSY_VOTER,
+ true, usb_current);
+ }
+
+ return rc;
+}
+
int smblib_set_prop_usb_current_max(struct smb_charger *chg,
const union power_supply_propval *val)
{
int rc = 0;
if (!chg->pd_active) {
- rc = vote(chg->usb_icl_votable, USB_PSY_VOTER,
- true, val->intval);
+ rc = smblib_handle_usb_current(chg, val->intval);
} else if (chg->system_suspend_supported) {
if (val->intval <= USBIN_25MA)
rc = vote(chg->usb_icl_votable,
@@ -2828,46 +2917,72 @@ int smblib_set_prop_pd_in_hard_reset(struct smb_charger *chg,
return rc;
}
+static int smblib_recover_from_soft_jeita(struct smb_charger *chg)
+{
+ u8 stat_1, stat_2;
+ int rc;
+
+ rc = smblib_read(chg, BATTERY_CHARGER_STATUS_1_REG, &stat_1);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_1 rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = smblib_read(chg, BATTERY_CHARGER_STATUS_2_REG, &stat_2);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_2 rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ if ((chg->jeita_status && !(stat_2 & BAT_TEMP_STATUS_SOFT_LIMIT_MASK) &&
+ ((stat_1 & BATTERY_CHARGER_STATUS_MASK) == TERMINATE_CHARGE))) {
+ /*
+ * We are moving from JEITA soft -> Normal and charging
+ * is terminated
+ */
+ rc = smblib_write(chg, CHARGING_ENABLE_CMD_REG, 0);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't disable charging rc=%d\n",
+ rc);
+ return rc;
+ }
+ rc = smblib_write(chg, CHARGING_ENABLE_CMD_REG,
+ CHARGING_ENABLE_CMD_BIT);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't enable charging rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ chg->jeita_status = stat_2 & BAT_TEMP_STATUS_SOFT_LIMIT_MASK;
+
+ return 0;
+}
+
/***********************
* USB MAIN PSY GETTERS *
*************************/
int smblib_get_prop_fcc_delta(struct smb_charger *chg,
- union power_supply_propval *val)
+ union power_supply_propval *val)
{
- int rc, jeita_cc_delta_ua, step_cc_delta_ua, hw_cc_delta_ua = 0;
-
- rc = smblib_get_step_cc_delta(chg, &step_cc_delta_ua);
- if (rc < 0) {
- smblib_err(chg, "Couldn't get step cc delta rc=%d\n", rc);
- step_cc_delta_ua = 0;
- } else {
- hw_cc_delta_ua = step_cc_delta_ua;
- }
+ int rc, jeita_cc_delta_ua = 0;
rc = smblib_get_jeita_cc_delta(chg, &jeita_cc_delta_ua);
if (rc < 0) {
smblib_err(chg, "Couldn't get jeita cc delta rc=%d\n", rc);
jeita_cc_delta_ua = 0;
- } else if (jeita_cc_delta_ua < 0) {
- /* HW will take the min between JEITA and step charge */
- hw_cc_delta_ua = min(hw_cc_delta_ua, jeita_cc_delta_ua);
}
- val->intval = hw_cc_delta_ua;
+ val->intval = jeita_cc_delta_ua;
return 0;
}
/***********************
* USB MAIN PSY SETTERS *
*************************/
-
-#define SDP_CURRENT_UA 500000
-#define CDP_CURRENT_UA 1500000
-#define DCP_CURRENT_UA 1500000
-#define HVDCP_CURRENT_UA 3000000
-#define TYPEC_DEFAULT_CURRENT_UA 900000
-#define TYPEC_MEDIUM_CURRENT_UA 1500000
-#define TYPEC_HIGH_CURRENT_UA 3000000
int smblib_get_charge_current(struct smb_charger *chg,
int *total_current_ua)
{
@@ -3034,62 +3149,19 @@ irqreturn_t smblib_handle_chg_state_change(int irq, void *data)
return IRQ_HANDLED;
}
-irqreturn_t smblib_handle_step_chg_state_change(int irq, void *data)
-{
- struct smb_irq_data *irq_data = data;
- struct smb_charger *chg = irq_data->parent_data;
-
- smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
-
- if (chg->step_chg_enabled)
- rerun_election(chg->fcc_votable);
-
- return IRQ_HANDLED;
-}
-
-irqreturn_t smblib_handle_step_chg_soc_update_fail(int irq, void *data)
-{
- struct smb_irq_data *irq_data = data;
- struct smb_charger *chg = irq_data->parent_data;
-
- smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
-
- if (chg->step_chg_enabled)
- rerun_election(chg->fcc_votable);
-
- return IRQ_HANDLED;
-}
-
-#define STEP_SOC_REQ_MS 3000
-irqreturn_t smblib_handle_step_chg_soc_update_request(int irq, void *data)
+irqreturn_t smblib_handle_batt_temp_changed(int irq, void *data)
{
struct smb_irq_data *irq_data = data;
struct smb_charger *chg = irq_data->parent_data;
int rc;
- union power_supply_propval pval = {0, };
- smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
-
- if (!chg->bms_psy) {
- schedule_delayed_work(&chg->step_soc_req_work,
- msecs_to_jiffies(STEP_SOC_REQ_MS));
+ rc = smblib_recover_from_soft_jeita(chg);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't recover chg from soft jeita rc=%d\n",
+ rc);
return IRQ_HANDLED;
}
- rc = smblib_get_prop_batt_capacity(chg, &pval);
- if (rc < 0)
- smblib_err(chg, "Couldn't get batt capacity rc=%d\n", rc);
- else
- step_charge_soc_update(chg, pval.intval);
-
- return IRQ_HANDLED;
-}
-
-irqreturn_t smblib_handle_batt_temp_changed(int irq, void *data)
-{
- struct smb_irq_data *irq_data = data;
- struct smb_charger *chg = irq_data->parent_data;
-
rerun_election(chg->fcc_votable);
power_supply_changed(chg->batt_psy);
return IRQ_HANDLED;
@@ -3201,25 +3273,10 @@ void smblib_usb_plugin_locked(struct smb_charger *chg)
smblib_set_opt_freq_buck(chg, vbus_rising ? chg->chg_freq.freq_5V :
chg->chg_freq.freq_removal);
- /* fetch the DPDM regulator */
- if (!chg->dpdm_reg && of_get_property(chg->dev->of_node,
- "dpdm-supply", NULL)) {
- chg->dpdm_reg = devm_regulator_get(chg->dev, "dpdm");
- if (IS_ERR(chg->dpdm_reg)) {
- smblib_err(chg, "Couldn't get dpdm regulator rc=%ld\n",
- PTR_ERR(chg->dpdm_reg));
- chg->dpdm_reg = NULL;
- }
- }
-
if (vbus_rising) {
- if (chg->dpdm_reg && !regulator_is_enabled(chg->dpdm_reg)) {
- smblib_dbg(chg, PR_MISC, "enabling DPDM regulator\n");
- rc = regulator_enable(chg->dpdm_reg);
- if (rc < 0)
- smblib_err(chg, "Couldn't enable dpdm regulator rc=%d\n",
- rc);
- }
+ rc = smblib_request_dpdm(chg, true);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't to enable DPDM rc=%d\n", rc);
/* Schedule work to enable parallel charger */
vote(chg->awake_votable, PL_DELAY_VOTER, true, 0);
@@ -3239,13 +3296,9 @@ void smblib_usb_plugin_locked(struct smb_charger *chg)
}
}
- if (chg->dpdm_reg && regulator_is_enabled(chg->dpdm_reg)) {
- smblib_dbg(chg, PR_MISC, "disabling DPDM regulator\n");
- rc = regulator_disable(chg->dpdm_reg);
- if (rc < 0)
- smblib_err(chg, "Couldn't disable dpdm regulator rc=%d\n",
- rc);
- }
+ rc = smblib_request_dpdm(chg, false);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't disable DPDM rc=%d\n", rc);
}
if (chg->micro_usb_mode)
@@ -3466,24 +3519,6 @@ static void smblib_handle_hvdcp_detect_done(struct smb_charger *chg,
rising ? "rising" : "falling");
}
-static int get_rp_based_dcp_current(struct smb_charger *chg, int typec_mode)
-{
- int rp_ua;
-
- switch (typec_mode) {
- case POWER_SUPPLY_TYPEC_SOURCE_HIGH:
- rp_ua = TYPEC_HIGH_CURRENT_UA;
- break;
- case POWER_SUPPLY_TYPEC_SOURCE_MEDIUM:
- case POWER_SUPPLY_TYPEC_SOURCE_DEFAULT:
- /* fall through */
- default:
- rp_ua = DCP_CURRENT_UA;
- }
-
- return rp_ua;
-}
-
static void smblib_force_legacy_icl(struct smb_charger *chg, int pst)
{
int typec_mode;
@@ -3509,11 +3544,17 @@ static void smblib_force_legacy_icl(struct smb_charger *chg, int pst)
vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, true, 1500000);
break;
case POWER_SUPPLY_TYPE_USB_DCP:
- case POWER_SUPPLY_TYPE_USB_FLOAT:
typec_mode = smblib_get_prop_typec_mode(chg);
rp_ua = get_rp_based_dcp_current(chg, typec_mode);
vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, true, rp_ua);
break;
+ case POWER_SUPPLY_TYPE_USB_FLOAT:
+ /*
+ * limit ICL to 100mA, the USB driver will enumerate to check
+ * if this is a SDP and appropriately set the current
+ */
+ vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, true, 100000);
+ break;
case POWER_SUPPLY_TYPE_USB_HVDCP:
case POWER_SUPPLY_TYPE_USB_HVDCP_3:
vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, true, 3000000);
@@ -3645,13 +3686,9 @@ static void smblib_handle_typec_removal(struct smb_charger *chg)
chg->cc2_detach_wa_active = false;
- if (chg->dpdm_reg && regulator_is_enabled(chg->dpdm_reg)) {
- smblib_dbg(chg, PR_MISC, "disabling DPDM regulator\n");
- rc = regulator_disable(chg->dpdm_reg);
- if (rc < 0)
- smblib_err(chg, "Couldn't disable dpdm regulator rc=%d\n",
- rc);
- }
+ rc = smblib_request_dpdm(chg, false);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't disable DPDM rc=%d\n", rc);
if (chg->wa_flags & BOOST_BACK_WA) {
data = chg->irq_info[SWITCH_POWER_OK_IRQ].irq_data;
@@ -3708,6 +3745,13 @@ static void smblib_handle_typec_removal(struct smb_charger *chg)
chg->pd_hard_reset = 0;
chg->typec_legacy_valid = false;
+ /* write back the default FLOAT charger configuration */
+ rc = smblib_masked_write(chg, USBIN_OPTIONS_2_CFG_REG,
+ (u8)FLOAT_OPTIONS_MASK, chg->float_cfg);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't write float charger options rc=%d\n",
+ rc);
+
/* reset back to 120mS tCC debounce */
rc = smblib_masked_write(chg, MISC_CFG_REG, TCC_DEBOUNCE_20MS_BIT, 0);
if (rc < 0)
@@ -3787,10 +3831,14 @@ static void smblib_handle_typec_insertion(struct smb_charger *chg)
smblib_err(chg, "Couldn't disable APSD_START_ON_CC rc=%d\n",
rc);
- if (chg->typec_status[3] & UFP_DFP_MODE_STATUS_BIT)
+ if (chg->typec_status[3] & UFP_DFP_MODE_STATUS_BIT) {
typec_sink_insertion(chg);
- else
+ } else {
+ rc = smblib_request_dpdm(chg, true);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't to enable DPDM rc=%d\n", rc);
typec_sink_removal(chg);
+ }
}
static void smblib_handle_rp_change(struct smb_charger *chg, int typec_mode)
@@ -3803,6 +3851,24 @@ static void smblib_handle_rp_change(struct smb_charger *chg, int typec_mode)
return;
/*
+ * if APSD indicates FLOAT and the USB stack had detected SDP,
+ * do not respond to Rp changes as we do not confirm that its
+ * a legacy cable
+ */
+ if (chg->real_charger_type == POWER_SUPPLY_TYPE_USB)
+ return;
+ /*
+ * We want the ICL vote @ 100mA for a FLOAT charger
+ * until the detection by the USB stack is complete.
+ * Ignore the Rp changes unless there is a
+ * pre-existing valid vote.
+ */
+ if (apsd->pst == POWER_SUPPLY_TYPE_USB_FLOAT &&
+ get_client_vote(chg->usb_icl_votable,
+ LEGACY_UNKNOWN_VOTER) <= 100000)
+ return;
+
+ /*
* handle Rp change for DCP/FLOAT/OCP.
* Update the current only if the Rp is different from
* the last Rp value.
@@ -3987,10 +4053,15 @@ irqreturn_t smblib_handle_wdog_bark(int irq, void *data)
struct smb_charger *chg = irq_data->parent_data;
int rc;
+ smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
+
rc = smblib_write(chg, BARK_BITE_WDOG_PET_REG, BARK_BITE_WDOG_PET_BIT);
if (rc < 0)
smblib_err(chg, "Couldn't pet the dog rc=%d\n", rc);
+ if (chg->step_chg_enabled)
+ power_supply_changed(chg->batt_psy);
+
return IRQ_HANDLED;
}
@@ -4073,22 +4144,6 @@ static void bms_update_work(struct work_struct *work)
power_supply_changed(chg->batt_psy);
}
-static void step_soc_req_work(struct work_struct *work)
-{
- struct smb_charger *chg = container_of(work, struct smb_charger,
- step_soc_req_work.work);
- union power_supply_propval pval = {0, };
- int rc;
-
- rc = smblib_get_prop_batt_capacity(chg, &pval);
- if (rc < 0) {
- smblib_err(chg, "Couldn't get batt capacity rc=%d\n", rc);
- return;
- }
-
- step_charge_soc_update(chg, pval.intval);
-}
-
static void clear_hdc_work(struct work_struct *work)
{
struct smb_charger *chg = container_of(work, struct smb_charger,
@@ -4621,7 +4676,6 @@ int smblib_init(struct smb_charger *chg)
INIT_WORK(&chg->bms_update_work, bms_update_work);
INIT_WORK(&chg->rdstd_cc2_detach_work, rdstd_cc2_detach_work);
INIT_DELAYED_WORK(&chg->hvdcp_detect_work, smblib_hvdcp_detect_work);
- INIT_DELAYED_WORK(&chg->step_soc_req_work, step_soc_req_work);
INIT_DELAYED_WORK(&chg->clear_hdc_work, clear_hdc_work);
INIT_WORK(&chg->otg_oc_work, smblib_otg_oc_work);
INIT_WORK(&chg->vconn_oc_work, smblib_vconn_oc_work);
@@ -4643,6 +4697,13 @@ int smblib_init(struct smb_charger *chg)
return rc;
}
+ rc = qcom_step_chg_init(chg->step_chg_enabled);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't init qcom_step_chg_init rc=%d\n",
+ rc);
+ return rc;
+ }
+
rc = smblib_create_votables(chg);
if (rc < 0) {
smblib_err(chg, "Couldn't create votables rc=%d\n",
@@ -4677,7 +4738,6 @@ int smblib_deinit(struct smb_charger *chg)
cancel_work_sync(&chg->bms_update_work);
cancel_work_sync(&chg->rdstd_cc2_detach_work);
cancel_delayed_work_sync(&chg->hvdcp_detect_work);
- cancel_delayed_work_sync(&chg->step_soc_req_work);
cancel_delayed_work_sync(&chg->clear_hdc_work);
cancel_work_sync(&chg->otg_oc_work);
cancel_work_sync(&chg->vconn_oc_work);
@@ -4689,6 +4749,7 @@ int smblib_deinit(struct smb_charger *chg)
cancel_delayed_work_sync(&chg->bb_removal_work);
power_supply_unreg_notifier(&chg->nb);
smblib_destroy_votables(chg);
+ qcom_step_chg_deinit();
qcom_batt_deinit();
break;
case PARALLEL_SLAVE:
diff --git a/drivers/power/supply/qcom/smb-lib.h b/drivers/power/supply/qcom/smb-lib.h
index c97b84c34084..8c810352f78f 100644
--- a/drivers/power/supply/qcom/smb-lib.h
+++ b/drivers/power/supply/qcom/smb-lib.h
@@ -190,9 +190,6 @@ struct smb_params {
struct smb_chg_param dc_icl_div2_mid_hv;
struct smb_chg_param dc_icl_div2_hv;
struct smb_chg_param jeita_cc_comp;
- struct smb_chg_param step_soc_threshold[4];
- struct smb_chg_param step_soc;
- struct smb_chg_param step_cc_delta[5];
struct smb_chg_param freq_buck;
struct smb_chg_param freq_boost;
};
@@ -287,7 +284,6 @@ struct smb_charger {
struct work_struct rdstd_cc2_detach_work;
struct delayed_work hvdcp_detect_work;
struct delayed_work ps_change_timeout_work;
- struct delayed_work step_soc_req_work;
struct delayed_work clear_hdc_work;
struct work_struct otg_oc_work;
struct work_struct vconn_oc_work;
@@ -328,6 +324,9 @@ struct smb_charger {
int fake_input_current_limited;
bool pr_swap_in_progress;
int typec_mode;
+ int usb_icl_change_irq_enabled;
+ u32 jeita_status;
+ u8 float_cfg;
/* workaround flag */
u32 wa_flags;
@@ -382,9 +381,6 @@ int smblib_vconn_regulator_is_enabled(struct regulator_dev *rdev);
irqreturn_t smblib_handle_debug(int irq, void *data);
irqreturn_t smblib_handle_otg_overcurrent(int irq, void *data);
irqreturn_t smblib_handle_chg_state_change(int irq, void *data);
-irqreturn_t smblib_handle_step_chg_state_change(int irq, void *data);
-irqreturn_t smblib_handle_step_chg_soc_update_fail(int irq, void *data);
-irqreturn_t smblib_handle_step_chg_soc_update_request(int irq, void *data);
irqreturn_t smblib_handle_batt_temp_changed(int irq, void *data);
irqreturn_t smblib_handle_batt_psy_changed(int irq, void *data);
irqreturn_t smblib_handle_usb_psy_changed(int irq, void *data);
@@ -422,9 +418,6 @@ int smblib_get_prop_batt_current_now(struct smb_charger *chg,
union power_supply_propval *val);
int smblib_get_prop_batt_temp(struct smb_charger *chg,
union power_supply_propval *val);
-int smblib_get_prop_step_chg_step(struct smb_charger *chg,
- union power_supply_propval *val);
-
int smblib_set_prop_input_suspend(struct smb_charger *chg,
const union power_supply_propval *val);
int smblib_set_prop_batt_capacity(struct smb_charger *chg,
@@ -504,7 +497,7 @@ int smblib_set_prop_charge_qnovo_enable(struct smb_charger *chg,
void smblib_suspend_on_debug_battery(struct smb_charger *chg);
int smblib_rerun_apsd_if_required(struct smb_charger *chg);
int smblib_get_prop_fcc_delta(struct smb_charger *chg,
- union power_supply_propval *val);
+ union power_supply_propval *val);
int smblib_icl_override(struct smb_charger *chg, bool override);
int smblib_dp_dm(struct smb_charger *chg, int val);
int smblib_rerun_aicl(struct smb_charger *chg);
diff --git a/drivers/power/supply/qcom/smb138x-charger.c b/drivers/power/supply/qcom/smb138x-charger.c
index 335b160e24a5..ec74e3825dd5 100644
--- a/drivers/power/supply/qcom/smb138x-charger.c
+++ b/drivers/power/supply/qcom/smb138x-charger.c
@@ -845,6 +845,13 @@ static int smb138x_init_slave_hw(struct smb138x *chip)
}
}
+ /* configure to a fixed 700khz freq to avoid tdie errors */
+ rc = smblib_set_charge_param(chg, &chg->param.freq_buck, 700);
+ if (rc < 0) {
+ pr_err("Couldn't configure 700Khz switch freq rc=%d\n", rc);
+ return rc;
+ }
+
/* enable watchdog bark and bite interrupts, and disable the watchdog */
rc = smblib_masked_write(chg, WD_CFG_REG, WDOG_TIMER_EN_BIT
| WDOG_TIMER_EN_ON_PLUGIN_BIT | BITE_WDOG_INT_EN_BIT
diff --git a/drivers/power/supply/qcom/step-chg-jeita.c b/drivers/power/supply/qcom/step-chg-jeita.c
new file mode 100644
index 000000000000..a2c08be960be
--- /dev/null
+++ b/drivers/power/supply/qcom/step-chg-jeita.c
@@ -0,0 +1,272 @@
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#define pr_fmt(fmt) "QCOM-STEPCHG: %s: " fmt, __func__
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/power_supply.h>
+#include <linux/slab.h>
+#include <linux/pmic-voter.h>
+#include "step-chg-jeita.h"
+
+#define MAX_STEP_CHG_ENTRIES 8
+#define STEP_CHG_VOTER "STEP_CHG_VOTER"
+#define STATUS_CHANGE_VOTER "STATUS_CHANGE_VOTER"
+
+#define is_between(left, right, value) \
+ (((left) >= (right) && (left) >= (value) \
+ && (value) >= (right)) \
+ || ((left) <= (right) && (left) <= (value) \
+ && (value) <= (right)))
+
+struct step_chg_data {
+ u32 vbatt_soc_low;
+ u32 vbatt_soc_high;
+ u32 fcc_ua;
+};
+
+struct step_chg_cfg {
+ u32 psy_prop;
+ char *prop_name;
+ struct step_chg_data cfg[MAX_STEP_CHG_ENTRIES];
+};
+
+struct step_chg_info {
+ ktime_t last_update_time;
+ bool step_chg_enable;
+
+ struct votable *fcc_votable;
+ struct wakeup_source *step_chg_ws;
+ struct power_supply *batt_psy;
+ struct delayed_work status_change_work;
+ struct notifier_block nb;
+};
+
+static struct step_chg_info *the_chip;
+
+/*
+ * Step Charging Configuration
+ * Update the table based on the battery profile
+ * Supports VBATT and SOC based source
+ */
+static struct step_chg_cfg step_chg_config = {
+ .psy_prop = POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ .prop_name = "VBATT",
+ .cfg = {
+ /* VBAT_LOW VBAT_HIGH FCC */
+ {3600000, 4000000, 3000000},
+ {4000000, 4200000, 2800000},
+ {4200000, 4400000, 2000000},
+ },
+/*
+ * SOC STEP-CHG configuration example.
+ *
+ * .psy_prop = POWER_SUPPLY_PROP_CAPACITY,
+ * .prop_name = "SOC",
+ * .cfg = {
+ * //SOC_LOW SOC_HIGH FCC
+ * {20, 70, 3000000},
+ * {70, 90, 2750000},
+ * {90, 100, 2500000},
+ * },
+ */
+};
+
+static bool is_batt_available(struct step_chg_info *chip)
+{
+ if (!chip->batt_psy)
+ chip->batt_psy = power_supply_get_by_name("battery");
+
+ if (!chip->batt_psy)
+ return false;
+
+ return true;
+}
+
+static int get_fcc(int threshold)
+{
+ int i;
+
+ for (i = 0; i < MAX_STEP_CHG_ENTRIES; i++)
+ if (is_between(step_chg_config.cfg[i].vbatt_soc_low,
+ step_chg_config.cfg[i].vbatt_soc_high, threshold))
+ return step_chg_config.cfg[i].fcc_ua;
+
+ return -ENODATA;
+}
+
+static int handle_step_chg_config(struct step_chg_info *chip)
+{
+ union power_supply_propval pval = {0, };
+ int rc = 0, fcc_ua = 0;
+
+ rc = power_supply_get_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_STEP_CHARGING_ENABLED, &pval);
+ if (rc < 0)
+ chip->step_chg_enable = 0;
+ else
+ chip->step_chg_enable = pval.intval;
+
+ if (!chip->step_chg_enable) {
+ if (chip->fcc_votable)
+ vote(chip->fcc_votable, STEP_CHG_VOTER, false, 0);
+ return 0;
+ }
+
+ rc = power_supply_get_property(chip->batt_psy,
+ step_chg_config.psy_prop, &pval);
+ if (rc < 0) {
+ pr_err("Couldn't read %s property rc=%d\n",
+ step_chg_config.prop_name, rc);
+ return rc;
+ }
+
+ chip->fcc_votable = find_votable("FCC");
+ if (!chip->fcc_votable)
+ return -EINVAL;
+
+ fcc_ua = get_fcc(pval.intval);
+ if (fcc_ua < 0) {
+ /* remove the vote if no step-based fcc is found */
+ vote(chip->fcc_votable, STEP_CHG_VOTER, false, 0);
+ return 0;
+ }
+
+ vote(chip->fcc_votable, STEP_CHG_VOTER, true, fcc_ua);
+
+ pr_debug("%s = %d Step-FCC = %duA\n",
+ step_chg_config.prop_name, pval.intval, fcc_ua);
+
+ return 0;
+}
+
+#define STEP_CHG_HYSTERISIS_DELAY_US 5000000 /* 5 secs */
+static void status_change_work(struct work_struct *work)
+{
+ struct step_chg_info *chip = container_of(work,
+ struct step_chg_info, status_change_work.work);
+ int rc = 0;
+ u64 elapsed_us;
+
+ elapsed_us = ktime_us_delta(ktime_get(), chip->last_update_time);
+ if (elapsed_us < STEP_CHG_HYSTERISIS_DELAY_US)
+ goto release_ws;
+
+ if (!is_batt_available(chip))
+ goto release_ws;
+
+ rc = handle_step_chg_config(chip);
+ if (rc < 0)
+ goto release_ws;
+
+ chip->last_update_time = ktime_get();
+
+release_ws:
+ __pm_relax(chip->step_chg_ws);
+}
+
+static int step_chg_notifier_call(struct notifier_block *nb,
+ unsigned long ev, void *v)
+{
+ struct power_supply *psy = v;
+ struct step_chg_info *chip = container_of(nb, struct step_chg_info, nb);
+
+ if (ev != PSY_EVENT_PROP_CHANGED)
+ return NOTIFY_OK;
+
+ if ((strcmp(psy->desc->name, "battery") == 0)) {
+ __pm_stay_awake(chip->step_chg_ws);
+ schedule_delayed_work(&chip->status_change_work, 0);
+ }
+
+ return NOTIFY_OK;
+}
+
+static int step_chg_register_notifier(struct step_chg_info *chip)
+{
+ int rc;
+
+ chip->nb.notifier_call = step_chg_notifier_call;
+ rc = power_supply_reg_notifier(&chip->nb);
+ if (rc < 0) {
+ pr_err("Couldn't register psy notifier rc = %d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+int qcom_step_chg_init(bool step_chg_enable)
+{
+ int rc;
+ struct step_chg_info *chip;
+
+ if (the_chip) {
+ pr_err("Already initialized\n");
+ return -EINVAL;
+ }
+
+ chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->step_chg_ws = wakeup_source_register("qcom-step-chg");
+ if (!chip->step_chg_ws) {
+ rc = -EINVAL;
+ goto cleanup;
+ }
+
+ chip->step_chg_enable = step_chg_enable;
+
+ if (step_chg_enable && (!step_chg_config.psy_prop ||
+ !step_chg_config.prop_name)) {
+ /* fail if step-chg configuration is invalid */
+ pr_err("Step-chg configuration not defined - fail\n");
+ return -ENODATA;
+ }
+
+ INIT_DELAYED_WORK(&chip->status_change_work, status_change_work);
+
+ rc = step_chg_register_notifier(chip);
+ if (rc < 0) {
+ pr_err("Couldn't register psy notifier rc = %d\n", rc);
+ goto release_wakeup_source;
+ }
+
+ the_chip = chip;
+
+ if (step_chg_enable)
+ pr_info("Step charging enabled. Using %s source\n",
+ step_chg_config.prop_name);
+
+ return 0;
+
+release_wakeup_source:
+ wakeup_source_unregister(chip->step_chg_ws);
+cleanup:
+ kfree(chip);
+ return rc;
+}
+
+void qcom_step_chg_deinit(void)
+{
+ struct step_chg_info *chip = the_chip;
+
+ if (!chip)
+ return;
+
+ cancel_delayed_work_sync(&chip->status_change_work);
+ power_supply_unreg_notifier(&chip->nb);
+ wakeup_source_unregister(chip->step_chg_ws);
+ the_chip = NULL;
+ kfree(chip);
+}
diff --git a/drivers/power/supply/qcom/step-chg-jeita.h b/drivers/power/supply/qcom/step-chg-jeita.h
new file mode 100644
index 000000000000..928eeb7a670b
--- /dev/null
+++ b/drivers/power/supply/qcom/step-chg-jeita.h
@@ -0,0 +1,17 @@
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __STEP_CHG_H__
+#define __STEP_CHG_H__
+int qcom_step_chg_init(bool);
+void qcom_step_chg_deinit(void);
+#endif /* __STEP_CHG_H__ */
diff --git a/drivers/pwm/pwm-qpnp.c b/drivers/pwm/pwm-qpnp.c
index d57bf2f3b80c..e46b1d583f40 100644
--- a/drivers/pwm/pwm-qpnp.c
+++ b/drivers/pwm/pwm-qpnp.c
@@ -317,6 +317,7 @@ struct _qpnp_pwm_config {
struct pwm_period_config period;
int supported_sizes;
int force_pwm_size;
+ bool update_period;
};
/* Public facing structure */
@@ -1211,28 +1212,32 @@ static int _pwm_config(struct qpnp_pwm_chip *chip,
rc = qpnp_lpg_save_pwm_value(chip);
if (rc)
goto out;
- rc = qpnp_lpg_configure_pwm(chip);
- if (rc)
- goto out;
- rc = qpnp_configure_pwm_control(chip);
- if (rc)
- goto out;
- if (!rc && chip->enabled) {
- rc = qpnp_lpg_configure_pwm_state(chip, QPNP_PWM_ENABLE);
- if (rc) {
- pr_err("Error in configuring pwm state, rc=%d\n", rc);
- return rc;
- }
+ if (pwm_config->update_period) {
+ rc = qpnp_lpg_configure_pwm(chip);
+ if (rc)
+ goto out;
+ rc = qpnp_configure_pwm_control(chip);
+ if (rc)
+ goto out;
+ if (!rc && chip->enabled) {
+ rc = qpnp_lpg_configure_pwm_state(chip,
+ QPNP_PWM_ENABLE);
+ if (rc) {
+ pr_err("Error in configuring pwm state, rc=%d\n",
+ rc);
+ return rc;
+ }
- /* Enable the glitch removal after PWM is enabled */
- rc = qpnp_lpg_glitch_removal(chip, true);
- if (rc) {
- pr_err("Error in enabling glitch control, rc=%d\n", rc);
- return rc;
+ /* Enable the glitch removal after PWM is enabled */
+ rc = qpnp_lpg_glitch_removal(chip, true);
+ if (rc) {
+ pr_err("Error in enabling glitch control, rc=%d\n",
+ rc);
+ return rc;
+ }
}
}
-
pr_debug("duty/period=%u/%u %s: pwm_value=%d (of %d)\n",
(unsigned int)duty_value, (unsigned int)period_value,
(tm_lvl == LVL_USEC) ? "usec" : "nsec",
@@ -1375,12 +1380,14 @@ static int qpnp_pwm_config(struct pwm_chip *pwm_chip,
spin_lock_irqsave(&chip->lpg_lock, flags);
+ chip->pwm_config.update_period = false;
if (prev_period_us > INT_MAX / NSEC_PER_USEC ||
prev_period_us * NSEC_PER_USEC != period_ns) {
qpnp_lpg_calc_period(LVL_NSEC, period_ns, chip);
qpnp_lpg_save_period(chip);
pwm->period = period_ns;
chip->pwm_config.pwm_period = period_ns / NSEC_PER_USEC;
+ chip->pwm_config.update_period = true;
}
rc = _pwm_config(chip, LVL_NSEC, duty_ns, period_ns);
@@ -1619,6 +1626,7 @@ int pwm_config_us(struct pwm_device *pwm, int duty_us, int period_us)
spin_lock_irqsave(&chip->lpg_lock, flags);
+ chip->pwm_config.update_period = false;
if (chip->pwm_config.pwm_period != period_us) {
qpnp_lpg_calc_period(LVL_USEC, period_us, chip);
qpnp_lpg_save_period(chip);
@@ -1628,6 +1636,7 @@ int pwm_config_us(struct pwm_device *pwm, int duty_us, int period_us)
pwm->period = 0;
else
pwm->period = (unsigned int)period_us * NSEC_PER_USEC;
+ chip->pwm_config.update_period = true;
}
rc = _pwm_config(chip, LVL_USEC, duty_us, period_us);
@@ -1734,6 +1743,7 @@ static int qpnp_parse_pwm_dt_config(struct device_node *of_pwm_node,
qpnp_lpg_calc_period(LVL_USEC, period, chip);
qpnp_lpg_save_period(chip);
chip->pwm_config.pwm_period = period;
+ chip->pwm_config.update_period = true;
rc = _pwm_config(chip, LVL_USEC, chip->pwm_config.pwm_duty, period);
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index adf621430b41..188920367e84 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -2361,6 +2361,14 @@ static void regulator_disable_work(struct work_struct *work)
count = rdev->deferred_disables;
rdev->deferred_disables = 0;
+ /*
+ * Workqueue functions queue the new work instance while the previous
+ * work instance is being processed. Cancel the queued work instance
+ * as the work instance under processing does the job of the queued
+ * work instance.
+ */
+ cancel_delayed_work(&rdev->disable_work);
+
for (i = 0; i < count; i++) {
ret = _regulator_disable(rdev);
if (ret != 0)
@@ -2404,10 +2412,10 @@ int regulator_disable_deferred(struct regulator *regulator, int ms)
mutex_lock(&rdev->mutex);
rdev->deferred_disables++;
+ mod_delayed_work(system_power_efficient_wq, &rdev->disable_work,
+ msecs_to_jiffies(ms));
mutex_unlock(&rdev->mutex);
- queue_delayed_work(system_power_efficient_wq, &rdev->disable_work,
- msecs_to_jiffies(ms));
return 0;
}
EXPORT_SYMBOL_GPL(regulator_disable_deferred);
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 59ced8864b2f..0e6aaef9a038 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -3563,12 +3563,14 @@ lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
} else {
buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2;
lpfc_els_free_data(phba, buf_ptr1);
+ elsiocb->context2 = NULL;
}
}
if (elsiocb->context3) {
buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3;
lpfc_els_free_bpl(phba, buf_ptr);
+ elsiocb->context3 = NULL;
}
lpfc_sli_release_iocbq(phba, elsiocb);
return 0;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index f5aeda8f014f..38e90d9c2ced 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -5887,18 +5887,25 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
free_vfi_bmask:
kfree(phba->sli4_hba.vfi_bmask);
+ phba->sli4_hba.vfi_bmask = NULL;
free_xri_ids:
kfree(phba->sli4_hba.xri_ids);
+ phba->sli4_hba.xri_ids = NULL;
free_xri_bmask:
kfree(phba->sli4_hba.xri_bmask);
+ phba->sli4_hba.xri_bmask = NULL;
free_vpi_ids:
kfree(phba->vpi_ids);
+ phba->vpi_ids = NULL;
free_vpi_bmask:
kfree(phba->vpi_bmask);
+ phba->vpi_bmask = NULL;
free_rpi_ids:
kfree(phba->sli4_hba.rpi_ids);
+ phba->sli4_hba.rpi_ids = NULL;
free_rpi_bmask:
kfree(phba->sli4_hba.rpi_bmask);
+ phba->sli4_hba.rpi_bmask = NULL;
err_exit:
return rc;
}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 0e59731f95ad..1f6a3b86965f 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -2466,6 +2466,10 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
if (pkt->entry_status & RF_BUSY)
res = DID_BUS_BUSY << 16;
+ if (pkt->entry_type == NOTIFY_ACK_TYPE &&
+ pkt->handle == QLA_TGT_SKIP_HANDLE)
+ return;
+
sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
if (sp) {
sp->done(ha, sp, res);
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index f57d96984ae4..e6faa0b050d1 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -2865,7 +2865,7 @@ static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha,
pkt->entry_type = NOTIFY_ACK_TYPE;
pkt->entry_count = 1;
- pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
+ pkt->handle = QLA_TGT_SKIP_HANDLE;
nack = (struct nack_to_isp *)pkt;
nack->ox_id = ntfy->ox_id;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 9b3af788376c..8aa202faafb5 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2497,7 +2497,8 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
if (sdp->broken_fua) {
sd_first_printk(KERN_NOTICE, sdkp, "Disabling FUA\n");
sdkp->DPOFUA = 0;
- } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw) {
+ } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw &&
+ !sdkp->device->use_16_for_rw) {
sd_first_printk(KERN_NOTICE, sdkp,
"Uses READ/WRITE(6), disabling FUA\n");
sdkp->DPOFUA = 0;
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index a2f2cc0c2c51..c23023f43d30 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -1483,7 +1483,7 @@ start:
hba->clk_gating.state = REQ_CLKS_ON;
trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state);
- queue_work(hba->clk_gating.ungating_workq,
+ queue_work(hba->clk_gating.clk_gating_workq,
&hba->clk_gating.ungate_work);
/*
* fall through to check if we should wait for this
@@ -1751,7 +1751,8 @@ static enum hrtimer_restart ufshcd_clkgate_hrtimer_handler(
struct ufs_hba *hba = container_of(timer, struct ufs_hba,
clk_gating.gate_hrtimer);
- schedule_work(&hba->clk_gating.gate_work);
+ queue_work(hba->clk_gating.clk_gating_workq,
+ &hba->clk_gating.gate_work);
return HRTIMER_NORESTART;
}
@@ -1759,7 +1760,7 @@ static enum hrtimer_restart ufshcd_clkgate_hrtimer_handler(
static void ufshcd_init_clk_gating(struct ufs_hba *hba)
{
struct ufs_clk_gating *gating = &hba->clk_gating;
- char wq_name[sizeof("ufs_clk_ungating_00")];
+ char wq_name[sizeof("ufs_clk_gating_00")];
hba->clk_gating.state = CLKS_ON;
@@ -1788,9 +1789,10 @@ static void ufshcd_init_clk_gating(struct ufs_hba *hba)
hrtimer_init(&gating->gate_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
gating->gate_hrtimer.function = ufshcd_clkgate_hrtimer_handler;
- snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_ungating_%d",
+ snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
hba->host->host_no);
- hba->clk_gating.ungating_workq = create_singlethread_workqueue(wq_name);
+ hba->clk_gating.clk_gating_workq =
+ create_singlethread_workqueue(wq_name);
gating->is_enabled = true;
@@ -1854,7 +1856,7 @@ static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
ufshcd_cancel_gate_work(hba);
cancel_work_sync(&hba->clk_gating.ungate_work);
- destroy_workqueue(hba->clk_gating.ungating_workq);
+ destroy_workqueue(hba->clk_gating.clk_gating_workq);
}
static void ufshcd_set_auto_hibern8_timer(struct ufs_hba *hba, u32 delay)
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index d66205ff9f5d..da3ad78d3405 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -431,7 +431,7 @@ struct ufs_clk_gating {
struct device_attribute enable_attr;
bool is_enabled;
int active_reqs;
- struct workqueue_struct *ungating_workq;
+ struct workqueue_struct *clk_gating_workq;
};
/* Hibern8 state */
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 7dbbb29d24c6..03a2aadf0d3c 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -533,7 +533,9 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
{
struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc);
+ unsigned long flags;
int req_size;
+ int ret;
BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize);
@@ -561,8 +563,15 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
req_size = sizeof(cmd->req.cmd);
}
- if (virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd)) != 0)
+ ret = virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd));
+ if (ret == -EIO) {
+ cmd->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET;
+ spin_lock_irqsave(&req_vq->vq_lock, flags);
+ virtscsi_complete_cmd(vscsi, cmd);
+ spin_unlock_irqrestore(&req_vq->vq_lock, flags);
+ } else if (ret != 0) {
return SCSI_MLQUEUE_HOST_BUSY;
+ }
return 0;
}
diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile
index 6358d1256bb1..fccbdf313e08 100644
--- a/drivers/soc/Makefile
+++ b/drivers/soc/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_MACH_DOVE) += dove/
obj-$(CONFIG_ARCH_MEDIATEK) += mediatek/
obj-$(CONFIG_ARCH_QCOM) += qcom/
obj-$(CONFIG_ARCH_ROCKCHIP) += rockchip/
+obj-$(CONFIG_QCOM_SCM_QCPE) += qcom/
obj-$(CONFIG_ARCH_SUNXI) += sunxi/
obj-$(CONFIG_ARCH_TEGRA) += tegra/
obj-$(CONFIG_SOC_TI) += ti/
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 0c4414d27eeb..907960cfa9d5 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -379,6 +379,10 @@ config QCOM_SCM
bool "Secure Channel Manager (SCM) support"
default n
+config QCOM_SCM_QCPE
+ bool "Para-Virtualized Secure Channel Manager (SCM) support over QCPE"
+ default n
+
menuconfig QCOM_SCM_XPU
bool "Qualcomm XPU configuration driver"
depends on QCOM_SCM
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 5eeede23333d..0bf54bedd6ea 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -62,6 +62,7 @@ CFLAGS_scm.o :=$(call as-instr,.arch_extension sec,-DREQUIRES_SEC=1)
obj-$(CONFIG_QCOM_SCM_ERRATA) += scm-errata.o
obj-$(CONFIG_QCOM_SCM) += scm.o scm-boot.o
+obj-$(CONFIG_QCOM_SCM_QCPE) += scm_qcpe.o
obj-$(CONFIG_QCOM_SCM_XPU) += scm-xpu.o
obj-$(CONFIG_QCOM_WATCHDOG_V2) += watchdog_v2.o
obj-$(CONFIG_QCOM_MEMORY_DUMP) += memory_dump.o
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index 21be894414bc..bf815cb68f90 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -13,6 +13,7 @@
#define pr_fmt(fmt) "icnss: " fmt
#include <asm/dma-iommu.h>
+#include <linux/of_address.h>
#include <linux/clk.h>
#include <linux/iommu.h>
#include <linux/export.h>
@@ -83,34 +84,58 @@ module_param(qmi_timeout, ulong, 0600);
} while (0)
#define icnss_pr_err(_fmt, ...) do { \
- pr_err(_fmt, ##__VA_ARGS__); \
- icnss_ipc_log_string("ERR: " pr_fmt(_fmt), \
- ##__VA_ARGS__); \
+ printk("%s" pr_fmt(_fmt), KERN_ERR, ##__VA_ARGS__); \
+ icnss_ipc_log_string("%s" pr_fmt(_fmt), "", \
+ ##__VA_ARGS__); \
} while (0)
#define icnss_pr_warn(_fmt, ...) do { \
- pr_warn(_fmt, ##__VA_ARGS__); \
- icnss_ipc_log_string("WRN: " pr_fmt(_fmt), \
- ##__VA_ARGS__); \
+ printk("%s" pr_fmt(_fmt), KERN_WARNING, ##__VA_ARGS__); \
+ icnss_ipc_log_string("%s" pr_fmt(_fmt), "", \
+ ##__VA_ARGS__); \
} while (0)
#define icnss_pr_info(_fmt, ...) do { \
- pr_info(_fmt, ##__VA_ARGS__); \
- icnss_ipc_log_string("INF: " pr_fmt(_fmt), \
- ##__VA_ARGS__); \
+ printk("%s" pr_fmt(_fmt), KERN_INFO, ##__VA_ARGS__); \
+ icnss_ipc_log_string("%s" pr_fmt(_fmt), "", \
+ ##__VA_ARGS__); \
} while (0)
+#if defined(CONFIG_DYNAMIC_DEBUG)
#define icnss_pr_dbg(_fmt, ...) do { \
- pr_debug(_fmt, ##__VA_ARGS__); \
- icnss_ipc_log_string("DBG: " pr_fmt(_fmt), \
- ##__VA_ARGS__); \
+ pr_debug(_fmt, ##__VA_ARGS__); \
+ icnss_ipc_log_string(pr_fmt(_fmt), ##__VA_ARGS__); \
} while (0)
#define icnss_pr_vdbg(_fmt, ...) do { \
- pr_debug(_fmt, ##__VA_ARGS__); \
- icnss_ipc_log_long_string("DBG: " pr_fmt(_fmt), \
- ##__VA_ARGS__); \
+ pr_debug(_fmt, ##__VA_ARGS__); \
+ icnss_ipc_log_long_string(pr_fmt(_fmt), ##__VA_ARGS__); \
} while (0)
+#elif defined(DEBUG)
+#define icnss_pr_dbg(_fmt, ...) do { \
+ printk("%s" pr_fmt(_fmt), KERN_DEBUG, ##__VA_ARGS__); \
+ icnss_ipc_log_string("%s" pr_fmt(_fmt), "", \
+ ##__VA_ARGS__); \
+ } while (0)
+
+#define icnss_pr_vdbg(_fmt, ...) do { \
+ printk("%s" pr_fmt(_fmt), KERN_DEBUG, ##__VA_ARGS__); \
+ icnss_ipc_log_long_string("%s" pr_fmt(_fmt), "", \
+ ##__VA_ARGS__); \
+ } while (0)
+#else
+#define icnss_pr_dbg(_fmt, ...) do { \
+ no_printk("%s" pr_fmt(_fmt), KERN_DEBUG, ##__VA_ARGS__); \
+ icnss_ipc_log_string("%s" pr_fmt(_fmt), "", \
+ ##__VA_ARGS__); \
+ } while (0)
+
+#define icnss_pr_vdbg(_fmt, ...) do { \
+ no_printk("%s" pr_fmt(_fmt), KERN_DEBUG, ##__VA_ARGS__); \
+ icnss_ipc_log_long_string("%s" pr_fmt(_fmt), "", \
+ ##__VA_ARGS__); \
+ } while (0)
+#endif
#ifdef CONFIG_ICNSS_DEBUG
#define ICNSS_ASSERT(_condition) do { \
@@ -365,6 +390,7 @@ struct icnss_stats {
uint32_t vbatt_req;
uint32_t vbatt_resp;
uint32_t vbatt_req_err;
+ u32 rejuvenate_ind;
uint32_t rejuvenate_ack_req;
uint32_t rejuvenate_ack_resp;
uint32_t rejuvenate_ack_err;
@@ -456,6 +482,10 @@ static struct icnss_priv {
struct icnss_wlan_mac_addr wlan_mac_addr;
bool bypass_s1_smmu;
struct mutex dev_lock;
+ u8 cause_for_rejuvenation;
+ u8 requesting_sub_system;
+ u16 line_number;
+ char function_name[QMI_WLFW_FUNCTION_NAME_LEN_V01 + 1];
} *penv;
#ifdef CONFIG_ICNSS_DEBUG
@@ -1691,6 +1721,60 @@ out:
return ret;
}
+static int icnss_decode_rejuvenate_ind(void *msg, unsigned int msg_len)
+{
+ struct msg_desc ind_desc;
+ struct wlfw_rejuvenate_ind_msg_v01 ind_msg;
+ int ret = 0;
+
+ if (!penv || !penv->wlfw_clnt) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ memset(&ind_msg, 0, sizeof(ind_msg));
+
+ ind_desc.msg_id = QMI_WLFW_REJUVENATE_IND_V01;
+ ind_desc.max_msg_len = WLFW_REJUVENATE_IND_MSG_V01_MAX_MSG_LEN;
+ ind_desc.ei_array = wlfw_rejuvenate_ind_msg_v01_ei;
+
+ ret = qmi_kernel_decode(&ind_desc, &ind_msg, msg, msg_len);
+ if (ret < 0) {
+ icnss_pr_err("Failed to decode rejuvenate ind message: ret %d, msg_len %u\n",
+ ret, msg_len);
+ goto out;
+ }
+
+ if (ind_msg.cause_for_rejuvenation_valid)
+ penv->cause_for_rejuvenation = ind_msg.cause_for_rejuvenation;
+ else
+ penv->cause_for_rejuvenation = 0;
+ if (ind_msg.requesting_sub_system_valid)
+ penv->requesting_sub_system = ind_msg.requesting_sub_system;
+ else
+ penv->requesting_sub_system = 0;
+ if (ind_msg.line_number_valid)
+ penv->line_number = ind_msg.line_number;
+ else
+ penv->line_number = 0;
+ if (ind_msg.function_name_valid)
+ memcpy(penv->function_name, ind_msg.function_name,
+ QMI_WLFW_FUNCTION_NAME_LEN_V01 + 1);
+ else
+ memset(penv->function_name, 0,
+ QMI_WLFW_FUNCTION_NAME_LEN_V01 + 1);
+
+ icnss_pr_info("Cause for rejuvenation: 0x%x, requesting sub-system: 0x%x, line number: %u, function name: %s\n",
+ penv->cause_for_rejuvenation,
+ penv->requesting_sub_system,
+ penv->line_number,
+ penv->function_name);
+
+ penv->stats.rejuvenate_ind++;
+out:
+ return ret;
+}
+
static int wlfw_rejuvenate_ack_send_sync_msg(struct icnss_priv *priv)
{
int ret;
@@ -1884,6 +1968,7 @@ static void icnss_qmi_wlfw_clnt_ind(struct qmi_handle *handle,
msg_id, penv->state);
icnss_ignore_qmi_timeout(true);
+ icnss_decode_rejuvenate_ind(msg, msg_len);
event_data = kzalloc(sizeof(*event_data), GFP_KERNEL);
if (event_data == NULL)
return;
@@ -2518,21 +2603,22 @@ static int icnss_service_notifier_notify(struct notifier_block *nb,
if (event_data == NULL)
return notifier_from_errno(-ENOMEM);
+ event_data->crashed = true;
+
if (state == NULL) {
- event_data->crashed = true;
priv->stats.recovery.root_pd_crash++;
goto event_post;
}
switch (*state) {
case ROOT_PD_WDOG_BITE:
- event_data->crashed = true;
event_data->wdog_bite = true;
priv->stats.recovery.root_pd_crash++;
break;
case ROOT_PD_SHUTDOWN:
cause = ICNSS_ROOT_PD_SHUTDOWN;
priv->stats.recovery.root_pd_shutdown++;
+ event_data->crashed = false;
break;
case USER_PD_STATE_CHANGE:
if (test_bit(ICNSS_HOST_TRIGGERED_PDR, &priv->state)) {
@@ -2544,7 +2630,6 @@ static int icnss_service_notifier_notify(struct notifier_block *nb,
}
break;
default:
- event_data->crashed = true;
priv->stats.recovery.root_pd_crash++;
break;
}
@@ -3786,6 +3871,26 @@ static int icnss_stats_show_capability(struct seq_file *s,
return 0;
}
+static int icnss_stats_show_rejuvenate_info(struct seq_file *s,
+ struct icnss_priv *priv)
+{
+ if (priv->stats.rejuvenate_ind) {
+ seq_puts(s, "\n<---------------- Rejuvenate Info ----------------->\n");
+ seq_printf(s, "Number of Rejuvenations: %u\n",
+ priv->stats.rejuvenate_ind);
+ seq_printf(s, "Cause for Rejuvenation: 0x%x\n",
+ priv->cause_for_rejuvenation);
+ seq_printf(s, "Requesting Sub-System: 0x%x\n",
+ priv->requesting_sub_system);
+ seq_printf(s, "Line Number: %u\n",
+ priv->line_number);
+ seq_printf(s, "Function Name: %s\n",
+ priv->function_name);
+ }
+
+ return 0;
+}
+
static int icnss_stats_show_events(struct seq_file *s, struct icnss_priv *priv)
{
int i;
@@ -3851,6 +3956,7 @@ static int icnss_stats_show(struct seq_file *s, void *data)
ICNSS_STATS_DUMP(s, priv, vbatt_req);
ICNSS_STATS_DUMP(s, priv, vbatt_resp);
ICNSS_STATS_DUMP(s, priv, vbatt_req_err);
+ ICNSS_STATS_DUMP(s, priv, rejuvenate_ind);
ICNSS_STATS_DUMP(s, priv, rejuvenate_ack_req);
ICNSS_STATS_DUMP(s, priv, rejuvenate_ack_resp);
ICNSS_STATS_DUMP(s, priv, rejuvenate_ack_err);
@@ -3875,6 +3981,8 @@ static int icnss_stats_show(struct seq_file *s, void *data)
icnss_stats_show_capability(s, priv);
+ icnss_stats_show_rejuvenate_info(s, priv);
+
icnss_stats_show_events(s, priv);
icnss_stats_show_state(s, priv);
@@ -4215,6 +4323,9 @@ static int icnss_probe(struct platform_device *pdev)
int i;
struct device *dev = &pdev->dev;
struct icnss_priv *priv;
+ const __be32 *addrp;
+ u64 prop_size = 0;
+ struct device_node *np;
if (penv) {
icnss_pr_err("Driver is already initialized\n");
@@ -4286,24 +4397,53 @@ static int icnss_probe(struct platform_device *pdev)
}
}
- ret = of_property_read_u32(dev->of_node, "qcom,wlan-msa-memory",
- &priv->msa_mem_size);
+ np = of_parse_phandle(dev->of_node,
+ "qcom,wlan-msa-fixed-region", 0);
+ if (np) {
+ addrp = of_get_address(np, 0, &prop_size, NULL);
+ if (!addrp) {
+ icnss_pr_err("Failed to get assigned-addresses or property\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ priv->msa_pa = of_translate_address(np, addrp);
+ if (priv->msa_pa == OF_BAD_ADDR) {
+ icnss_pr_err("Failed to translate MSA PA from device-tree\n");
+ ret = -EINVAL;
+ goto out;
+ }
- if (ret || priv->msa_mem_size == 0) {
- icnss_pr_err("Fail to get MSA Memory Size: %u, ret: %d\n",
- priv->msa_mem_size, ret);
- goto out;
- }
+ priv->msa_va = memremap(priv->msa_pa,
+ (unsigned long)prop_size, MEMREMAP_WT);
+ if (!priv->msa_va) {
+ icnss_pr_err("MSA PA ioremap failed: phy addr: %pa\n",
+ &priv->msa_pa);
+ ret = -EINVAL;
+ goto out;
+ }
+ priv->msa_mem_size = prop_size;
+ } else {
+ ret = of_property_read_u32(dev->of_node, "qcom,wlan-msa-memory",
+ &priv->msa_mem_size);
+ if (ret || priv->msa_mem_size == 0) {
+ icnss_pr_err("Fail to get MSA Memory Size: %u ret: %d\n",
+ priv->msa_mem_size, ret);
+ goto out;
+ }
- priv->msa_va = dmam_alloc_coherent(&pdev->dev, priv->msa_mem_size,
- &priv->msa_pa, GFP_KERNEL);
- if (!priv->msa_va) {
- icnss_pr_err("DMA alloc failed for MSA\n");
- ret = -ENOMEM;
- goto out;
+ priv->msa_va = dmam_alloc_coherent(&pdev->dev,
+ priv->msa_mem_size, &priv->msa_pa, GFP_KERNEL);
+
+ if (!priv->msa_va) {
+ icnss_pr_err("DMA alloc failed for MSA\n");
+ ret = -ENOMEM;
+ goto out;
+ }
}
- icnss_pr_dbg("MSA pa: %pa, MSA va: 0x%p\n", &priv->msa_pa,
- priv->msa_va);
+
+ icnss_pr_dbg("MSA pa: %pa, MSA va: 0x%p MSA Memory Size: 0x%x\n",
+ &priv->msa_pa, (void *)priv->msa_va, priv->msa_mem_size);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"smmu_iova_base");
diff --git a/drivers/soc/qcom/ipc_router_mhi_xprt.c b/drivers/soc/qcom/ipc_router_mhi_xprt.c
index e5f6104bd7de..adf4078818a5 100644
--- a/drivers/soc/qcom/ipc_router_mhi_xprt.c
+++ b/drivers/soc/qcom/ipc_router_mhi_xprt.c
@@ -132,12 +132,11 @@ struct ipc_router_mhi_xprt {
struct ipc_router_mhi_xprt_work {
struct ipc_router_mhi_xprt *mhi_xprtp;
enum MHI_CLIENT_CHANNEL chan_id;
- struct work_struct work;
};
static void mhi_xprt_read_data(struct work_struct *work);
-static void mhi_xprt_enable_event(struct work_struct *work);
-static void mhi_xprt_disable_event(struct work_struct *work);
+static void mhi_xprt_enable_event(struct ipc_router_mhi_xprt_work *xprt_work);
+static void mhi_xprt_disable_event(struct ipc_router_mhi_xprt_work *xprt_work);
/**
* ipc_router_mhi_xprt_config - Config. Info. of each MHI XPRT
@@ -574,8 +573,6 @@ static int ipc_router_mhi_close(struct msm_ipc_router_xprt *xprt)
mhi_xprtp->ch_hndl.in_chan_enabled = false;
mutex_unlock(&mhi_xprtp->ch_hndl.state_lock);
flush_workqueue(mhi_xprtp->wq);
- mhi_close_channel(mhi_xprtp->ch_hndl.in_handle);
- mhi_close_channel(mhi_xprtp->ch_hndl.out_handle);
return 0;
}
@@ -600,10 +597,8 @@ static void mhi_xprt_sft_close_done(struct msm_ipc_router_xprt *xprt)
*
* This work is scheduled when the MHI link to the peripheral is up.
*/
-static void mhi_xprt_enable_event(struct work_struct *work)
+static void mhi_xprt_enable_event(struct ipc_router_mhi_xprt_work *xprt_work)
{
- struct ipc_router_mhi_xprt_work *xprt_work =
- container_of(work, struct ipc_router_mhi_xprt_work, work);
struct ipc_router_mhi_xprt *mhi_xprtp = xprt_work->mhi_xprtp;
int rc;
bool notify = false;
@@ -613,7 +608,7 @@ static void mhi_xprt_enable_event(struct work_struct *work)
if (rc) {
IPC_RTR_ERR("%s Failed to open chan 0x%x, rc %d\n",
__func__, mhi_xprtp->ch_hndl.out_chan_id, rc);
- goto out_enable_event;
+ return;
}
mutex_lock(&mhi_xprtp->ch_hndl.state_lock);
mhi_xprtp->ch_hndl.out_chan_enabled = true;
@@ -625,7 +620,7 @@ static void mhi_xprt_enable_event(struct work_struct *work)
if (rc) {
IPC_RTR_ERR("%s Failed to open chan 0x%x, rc %d\n",
__func__, mhi_xprtp->ch_hndl.in_chan_id, rc);
- goto out_enable_event;
+ return;
}
mutex_lock(&mhi_xprtp->ch_hndl.state_lock);
mhi_xprtp->ch_hndl.in_chan_enabled = true;
@@ -643,11 +638,11 @@ static void mhi_xprt_enable_event(struct work_struct *work)
}
if (xprt_work->chan_id != mhi_xprtp->ch_hndl.in_chan_id)
- goto out_enable_event;
+ return;
rc = mhi_xprt_queue_in_buffers(mhi_xprtp, mhi_xprtp->ch_hndl.num_trbs);
if (rc > 0)
- goto out_enable_event;
+ return;
IPC_RTR_ERR("%s: Could not queue one TRB atleast\n", __func__);
mutex_lock(&mhi_xprtp->ch_hndl.state_lock);
@@ -656,9 +651,6 @@ static void mhi_xprt_enable_event(struct work_struct *work)
if (notify)
msm_ipc_router_xprt_notify(&mhi_xprtp->xprt,
IPC_ROUTER_XPRT_EVENT_CLOSE, NULL);
- mhi_close_channel(mhi_xprtp->ch_hndl.in_handle);
-out_enable_event:
- kfree(xprt_work);
}
/**
@@ -667,10 +659,8 @@ out_enable_event:
*
* This work is scheduled when the MHI link to the peripheral is down.
*/
-static void mhi_xprt_disable_event(struct work_struct *work)
+static void mhi_xprt_disable_event(struct ipc_router_mhi_xprt_work *xprt_work)
{
- struct ipc_router_mhi_xprt_work *xprt_work =
- container_of(work, struct ipc_router_mhi_xprt_work, work);
struct ipc_router_mhi_xprt *mhi_xprtp = xprt_work->mhi_xprtp;
bool notify = false;
@@ -681,7 +671,6 @@ static void mhi_xprt_disable_event(struct work_struct *work)
mhi_xprtp->ch_hndl.out_chan_enabled = false;
mutex_unlock(&mhi_xprtp->ch_hndl.state_lock);
wake_up(&mhi_xprtp->write_wait_q);
- mhi_close_channel(mhi_xprtp->ch_hndl.out_handle);
} else if (xprt_work->chan_id == mhi_xprtp->ch_hndl.in_chan_id) {
mutex_lock(&mhi_xprtp->ch_hndl.state_lock);
notify = mhi_xprtp->ch_hndl.out_chan_enabled &&
@@ -691,7 +680,6 @@ static void mhi_xprt_disable_event(struct work_struct *work)
/* Queue a read work to remove any partially read packets */
queue_work(mhi_xprtp->wq, &mhi_xprtp->read_work);
flush_workqueue(mhi_xprtp->wq);
- mhi_close_channel(mhi_xprtp->ch_hndl.in_handle);
}
if (notify) {
@@ -702,7 +690,6 @@ static void mhi_xprt_disable_event(struct work_struct *work)
__func__, mhi_xprtp->xprt.name);
wait_for_completion(&mhi_xprtp->sft_close_complete);
}
- kfree(xprt_work);
}
/**
@@ -743,7 +730,7 @@ static void mhi_xprt_xfer_event(struct mhi_cb_info *cb_info)
static void ipc_router_mhi_xprt_cb(struct mhi_cb_info *cb_info)
{
struct ipc_router_mhi_xprt *mhi_xprtp;
- struct ipc_router_mhi_xprt_work *xprt_work;
+ struct ipc_router_mhi_xprt_work xprt_work;
if (cb_info->result == NULL) {
IPC_RTR_ERR("%s: Result not available in cb_info\n", __func__);
@@ -751,23 +738,16 @@ static void ipc_router_mhi_xprt_cb(struct mhi_cb_info *cb_info)
}
mhi_xprtp = (struct ipc_router_mhi_xprt *)(cb_info->result->user_data);
+ xprt_work.mhi_xprtp = mhi_xprtp;
+ xprt_work.chan_id = cb_info->chan;
switch (cb_info->cb_reason) {
- case MHI_CB_MHI_ENABLED:
+ case MHI_CB_MHI_SHUTDOWN:
+ case MHI_CB_SYS_ERROR:
case MHI_CB_MHI_DISABLED:
- xprt_work = kmalloc(sizeof(*xprt_work), GFP_KERNEL);
- if (!xprt_work) {
- IPC_RTR_ERR("%s: Couldn't handle %d event on %s\n",
- __func__, cb_info->cb_reason,
- mhi_xprtp->xprt_name);
- return;
- }
- xprt_work->mhi_xprtp = mhi_xprtp;
- xprt_work->chan_id = cb_info->chan;
- if (cb_info->cb_reason == MHI_CB_MHI_ENABLED)
- INIT_WORK(&xprt_work->work, mhi_xprt_enable_event);
- else
- INIT_WORK(&xprt_work->work, mhi_xprt_disable_event);
- queue_work(mhi_xprtp->wq, &xprt_work->work);
+ mhi_xprt_disable_event(&xprt_work);
+ break;
+ case MHI_CB_MHI_ENABLED:
+ mhi_xprt_enable_event(&xprt_work);
break;
case MHI_CB_XFER:
mhi_xprt_xfer_event(cb_info);
@@ -788,22 +768,37 @@ static void ipc_router_mhi_xprt_cb(struct mhi_cb_info *cb_info)
* This function is called when a new XPRT is added.
*/
static int ipc_router_mhi_driver_register(
- struct ipc_router_mhi_xprt *mhi_xprtp)
+ struct ipc_router_mhi_xprt *mhi_xprtp, struct device *dev)
{
- int rc_status;
-
- rc_status = mhi_register_channel(&mhi_xprtp->ch_hndl.out_handle, NULL);
- if (rc_status) {
+ int rc;
+ const char *node_name = "qcom,mhi";
+ struct mhi_client_info_t *mhi_info;
+
+ if (!mhi_is_device_ready(dev, node_name))
+ return -EPROBE_DEFER;
+
+ mhi_info = &mhi_xprtp->ch_hndl.out_clnt_info;
+ mhi_info->chan = mhi_xprtp->ch_hndl.out_chan_id;
+ mhi_info->dev = dev;
+ mhi_info->node_name = node_name;
+ mhi_info->user_data = mhi_xprtp;
+ rc = mhi_register_channel(&mhi_xprtp->ch_hndl.out_handle, mhi_info);
+ if (rc) {
IPC_RTR_ERR("%s: Error %d registering out_chan for %s\n",
- __func__, rc_status, mhi_xprtp->xprt_name);
+ __func__, rc, mhi_xprtp->xprt_name);
return -EFAULT;
}
- rc_status = mhi_register_channel(&mhi_xprtp->ch_hndl.in_handle, NULL);
- if (rc_status) {
+ mhi_info = &mhi_xprtp->ch_hndl.in_clnt_info;
+ mhi_info->chan = mhi_xprtp->ch_hndl.in_chan_id;
+ mhi_info->dev = dev;
+ mhi_info->node_name = node_name;
+ mhi_info->user_data = mhi_xprtp;
+ rc = mhi_register_channel(&mhi_xprtp->ch_hndl.in_handle, mhi_info);
+ if (rc) {
mhi_deregister_channel(mhi_xprtp->ch_hndl.out_handle);
IPC_RTR_ERR("%s: Error %d registering in_chan for %s\n",
- __func__, rc_status, mhi_xprtp->xprt_name);
+ __func__, rc, mhi_xprtp->xprt_name);
return -EFAULT;
}
return 0;
@@ -820,7 +815,8 @@ static int ipc_router_mhi_driver_register(
* the MHI XPRT configurations from device tree.
*/
static int ipc_router_mhi_config_init(
- struct ipc_router_mhi_xprt_config *mhi_xprt_config)
+ struct ipc_router_mhi_xprt_config *mhi_xprt_config,
+ struct device *dev)
{
struct ipc_router_mhi_xprt *mhi_xprtp;
char wq_name[XPRT_NAME_LEN];
@@ -879,7 +875,7 @@ static int ipc_router_mhi_config_init(
INIT_LIST_HEAD(&mhi_xprtp->rx_addr_map_list);
spin_lock_init(&mhi_xprtp->rx_addr_map_list_lock);
- rc = ipc_router_mhi_driver_register(mhi_xprtp);
+ rc = ipc_router_mhi_driver_register(mhi_xprtp, dev);
return rc;
}
@@ -963,7 +959,7 @@ static int ipc_router_mhi_xprt_probe(struct platform_device *pdev)
return rc;
}
- rc = ipc_router_mhi_config_init(&mhi_xprt_config);
+ rc = ipc_router_mhi_config_init(&mhi_xprt_config, &pdev->dev);
if (rc) {
IPC_RTR_ERR("%s: init failed\n", __func__);
return rc;
diff --git a/drivers/soc/qcom/scm_qcpe.c b/drivers/soc/qcom/scm_qcpe.c
new file mode 100644
index 000000000000..54a978157bda
--- /dev/null
+++ b/drivers/soc/qcom/scm_qcpe.c
@@ -0,0 +1,1137 @@
+/* Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+
+#include <asm/cacheflush.h>
+#include <asm/compiler.h>
+
+#include <soc/qcom/scm.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/scm.h>
+
+#include <uapi/linux/habmm.h>
+
+#define SCM_ENOMEM (-5)
+#define SCM_EOPNOTSUPP (-4)
+#define SCM_EINVAL_ADDR (-3)
+#define SCM_EINVAL_ARG (-2)
+#define SCM_ERROR (-1)
+#define SCM_INTERRUPTED (1)
+#define SCM_EBUSY (-55)
+#define SCM_V2_EBUSY (-12)
+
+static DEFINE_MUTEX(scm_lock);
+
+/*
+ * MSM8996 V2 requires a lock to protect against
+ * concurrent accesses between the limits management
+ * driver and the clock controller
+ */
+DEFINE_MUTEX(scm_lmh_lock);
+
+#define SCM_EBUSY_WAIT_MS 30
+#define SCM_EBUSY_MAX_RETRY 67
+
+#define N_EXT_SCM_ARGS 7
+#define FIRST_EXT_ARG_IDX 3
+#define SMC_ATOMIC_SYSCALL 31
+#define N_REGISTER_ARGS (MAX_SCM_ARGS - N_EXT_SCM_ARGS + 1)
+#define SMC64_MASK 0x40000000
+#define SMC_ATOMIC_MASK 0x80000000
+#define IS_CALL_AVAIL_CMD 1
+
+#define SCM_BUF_LEN(__cmd_size, __resp_size) ({ \
+ size_t x = __cmd_size + __resp_size; \
+ size_t y = sizeof(struct scm_command) + sizeof(struct scm_response); \
+ size_t result; \
+ if (x < __cmd_size || (x + y) < x) \
+ result = 0; \
+ else \
+ result = x + y; \
+ result; \
+ })
+/**
+ * struct scm_command - one SCM command buffer
+ * @len: total available memory for command and response
+ * @buf_offset: start of command buffer
+ * @resp_hdr_offset: start of response buffer
+ * @id: command to be executed
+ * @buf: buffer returned from scm_get_command_buffer()
+ *
+ * An SCM command is laid out in memory as follows:
+ *
+ * ------------------- <--- struct scm_command
+ * | command header |
+ * ------------------- <--- scm_get_command_buffer()
+ * | command buffer |
+ * ------------------- <--- struct scm_response and
+ * | response header | scm_command_to_response()
+ * ------------------- <--- scm_get_response_buffer()
+ * | response buffer |
+ * -------------------
+ *
+ * There can be arbitrary padding between the headers and buffers so
+ * you should always use the appropriate scm_get_*_buffer() routines
+ * to access the buffers in a safe manner.
+ */
+struct scm_command {
+ u32 len;
+ u32 buf_offset;
+ u32 resp_hdr_offset;
+ u32 id;
+ u32 buf[0];
+};
+
+/**
+ * struct scm_response - one SCM response buffer
+ * @len: total available memory for response
+ * @buf_offset: start of response data relative to start of scm_response
+ * @is_complete: indicates if the command has finished processing
+ */
+struct scm_response {
+ u32 len;
+ u32 buf_offset;
+ u32 is_complete;
+};
+
+#ifdef CONFIG_ARM64
+
+#define R0_STR "x0"
+#define R1_STR "x1"
+#define R2_STR "x2"
+#define R3_STR "x3"
+#define R4_STR "x4"
+#define R5_STR "x5"
+#define R6_STR "x6"
+
+/* Outer caches unsupported on ARM64 platforms */
+#define outer_inv_range(x, y)
+#define outer_flush_range(x, y)
+
+#define __cpuc_flush_dcache_area __flush_dcache_area
+
+#else
+
+#define R0_STR "r0"
+#define R1_STR "r1"
+#define R2_STR "r2"
+#define R3_STR "r3"
+#define R4_STR "r4"
+#define R5_STR "r5"
+#define R6_STR "r6"
+
+#endif
+
+/**
+ * scm_command_to_response() - Get a pointer to a scm_response
+ * @cmd: command
+ *
+ * Returns a pointer to a response for a command.
+ */
+static inline struct scm_response *scm_command_to_response(
+ const struct scm_command *cmd)
+{
+ return (void *)cmd + cmd->resp_hdr_offset;
+}
+
+/**
+ * scm_get_command_buffer() - Get a pointer to a command buffer
+ * @cmd: command
+ *
+ * Returns a pointer to the command buffer of a command.
+ */
+static inline void *scm_get_command_buffer(const struct scm_command *cmd)
+{
+ return (void *)cmd->buf;
+}
+
+/**
+ * scm_get_response_buffer() - Get a pointer to a response buffer
+ * @rsp: response
+ *
+ * Returns a pointer to a response buffer of a response.
+ */
+static inline void *scm_get_response_buffer(const struct scm_response *rsp)
+{
+ return (void *)rsp + rsp->buf_offset;
+}
+
+static int scm_remap_error(int err)
+{
+ switch (err) {
+ case SCM_ERROR:
+ return -EIO;
+ case SCM_EINVAL_ADDR:
+ case SCM_EINVAL_ARG:
+ return -EINVAL;
+ case SCM_EOPNOTSUPP:
+ return -EOPNOTSUPP;
+ case SCM_ENOMEM:
+ return -ENOMEM;
+ case SCM_EBUSY:
+ return SCM_EBUSY;
+ case SCM_V2_EBUSY:
+ return SCM_V2_EBUSY;
+ }
+ return -EINVAL;
+}
+
+static int scm_call_qcpe(u32 fn_id, struct scm_desc *desc)
+{
+ static bool opened;
+ static u32 handle;
+ u32 ret;
+ u32 size_bytes;
+
+ struct smc_params_s {
+ uint64_t x0;
+ uint64_t x1;
+ uint64_t x2;
+ uint64_t x3;
+ uint64_t x4;
+ uint64_t x5;
+ uint64_t sid;
+ } smc_params;
+
+ pr_info("scm_call_qcpe: IN: 0x%x, 0x%x, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx",
+ fn_id, desc->arginfo, desc->args[0], desc->args[1],
+ desc->args[2], desc->args[3], desc->args[4],
+ desc->args[5], desc->args[6]);
+
+ if (!opened) {
+ ret = habmm_socket_open(&handle, MM_QCPE_VM1, 0, 0);
+ if (ret != HAB_OK) {
+ pr_err("scm_call2: habmm_socket_open failed with ret = %d",
+ ret);
+ return ret;
+ }
+ opened = true;
+ }
+
+ smc_params.x0 = fn_id | 0x40000000; /* SMC64_MASK */
+ smc_params.x1 = desc->arginfo;
+ smc_params.x2 = desc->args[0];
+ smc_params.x3 = desc->args[1];
+ smc_params.x4 = desc->args[2];
+ smc_params.x5 = desc->x5;
+ smc_params.sid = 0;
+
+ ret = habmm_socket_send(handle, &smc_params, sizeof(smc_params), 0);
+ if (ret != HAB_OK)
+ return ret;
+
+ size_bytes = sizeof(smc_params);
+
+ ret = habmm_socket_recv(handle, &smc_params, &size_bytes, 0, 0);
+ if (ret != HAB_OK)
+ return ret;
+
+ desc->ret[0] = smc_params.x1;
+ desc->ret[1] = smc_params.x2;
+ desc->ret[2] = smc_params.x3;
+
+ pr_info("scm_call_qcpe: OUT: 0x%llx, 0x%llx, 0x%llx",
+ desc->ret[0], desc->ret[1], desc->ret[2]);
+
+ return 0;
+}
+
+static u32 smc(u32 cmd_addr)
+{
+ int context_id;
+ int ret;
+ uint64_t x0;
+ struct scm_desc desc = {0};
+
+ register u32 r0 asm("r0") = 1;
+ register u32 r1 asm("r1") = (uintptr_t)&context_id;
+ register u32 r2 asm("r2") = cmd_addr;
+
+ x0 = r0;
+ desc.arginfo = r1;
+ desc.args[0] = r2;
+
+ ret = scm_call_qcpe(x0, &desc);
+
+ if (ret < 0)
+ return scm_remap_error(ret);
+
+ return 0;
+}
+
+static int __scm_call(const struct scm_command *cmd)
+{
+ int ret;
+ u32 cmd_addr = virt_to_phys(cmd);
+
+ /*
+ * Flush the command buffer so that the secure world sees
+ * the correct data.
+ */
+ __cpuc_flush_dcache_area((void *)cmd, cmd->len);
+ outer_flush_range(cmd_addr, cmd_addr + cmd->len);
+
+ ret = smc(cmd_addr);
+ if (ret < 0) {
+ if (ret != SCM_EBUSY)
+ pr_err("scm_call failed with error code %d\n", ret);
+ ret = scm_remap_error(ret);
+ }
+ return ret;
+}
+
+#ifndef CONFIG_ARM64
+static void scm_inv_range(unsigned long start, unsigned long end)
+{
+ u32 cacheline_size, ctr;
+
+ asm volatile("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr));
+ cacheline_size = 4 << ((ctr >> 16) & 0xf);
+
+ start = round_down(start, cacheline_size);
+ end = round_up(end, cacheline_size);
+ outer_inv_range(start, end);
+ while (start < end) {
+ asm ("mcr p15, 0, %0, c7, c6, 1" : : "r" (start)
+ : "memory");
+ start += cacheline_size;
+ }
+ mb(); /* Make sure memory is visible to TZ */
+ isb();
+}
+#else
+
+static void scm_inv_range(unsigned long start, unsigned long end)
+{
+ dmac_inv_range((void *)start, (void *)end);
+}
+#endif
+
+/**
+ * scm_call_common() - Send an SCM command
+ * @svc_id: service identifier
+ * @cmd_id: command identifier
+ * @cmd_buf: command buffer
+ * @cmd_len: length of the command buffer
+ * @resp_buf: response buffer
+ * @resp_len: length of the response buffer
+ * @scm_buf: internal scm structure used for passing data
+ * @scm_buf_len: length of the internal scm structure
+ *
+ * Core function to scm call. Initializes the given cmd structure with
+ * appropriate values and makes the actual scm call. Validation of cmd
+ * pointer and length must occur in the calling function.
+ *
+ * Returns the appropriate error code from the scm call
+ */
+
+static int scm_call_common(u32 svc_id, u32 cmd_id, const void *cmd_buf,
+ size_t cmd_len, void *resp_buf, size_t resp_len,
+ struct scm_command *scm_buf,
+ size_t scm_buf_length)
+{
+ int ret;
+ struct scm_response *rsp;
+ unsigned long start, end;
+
+ scm_buf->len = scm_buf_length;
+ scm_buf->buf_offset = offsetof(struct scm_command, buf);
+ scm_buf->resp_hdr_offset = scm_buf->buf_offset + cmd_len;
+ scm_buf->id = (svc_id << 10) | cmd_id;
+
+ if (cmd_buf)
+ memcpy(scm_get_command_buffer(scm_buf), cmd_buf, cmd_len);
+
+ mutex_lock(&scm_lock);
+ ret = __scm_call(scm_buf);
+ mutex_unlock(&scm_lock);
+ if (ret)
+ return ret;
+
+ rsp = scm_command_to_response(scm_buf);
+ start = (unsigned long)rsp;
+
+ do {
+ scm_inv_range(start, start + sizeof(*rsp));
+ } while (!rsp->is_complete);
+
+ end = (unsigned long)scm_get_response_buffer(rsp) + resp_len;
+ scm_inv_range(start, end);
+
+ if (resp_buf)
+ memcpy(resp_buf, scm_get_response_buffer(rsp), resp_len);
+
+ return ret;
+}
+
+/*
+ * Sometimes the secure world may be busy waiting for a particular resource.
+ * In those situations, it is expected that the secure world returns a special
+ * error code (SCM_EBUSY). Retry any scm_call that fails with this error code,
+ * but with a timeout in place. Also, don't move this into scm_call_common,
+ * since we want the first attempt to be the "fastpath".
+ */
+static int _scm_call_retry(u32 svc_id, u32 cmd_id, const void *cmd_buf,
+ size_t cmd_len, void *resp_buf, size_t resp_len,
+ struct scm_command *cmd,
+ size_t len)
+{
+ int ret, retry_count = 0;
+
+ do {
+ ret = scm_call_common(svc_id, cmd_id, cmd_buf, cmd_len,
+ resp_buf, resp_len, cmd, len);
+ if (ret == SCM_EBUSY)
+ msleep(SCM_EBUSY_WAIT_MS);
+ if (retry_count == 33)
+ pr_warn("scm: secure world has been busy for 1 second!\n");
+ } while (ret == SCM_EBUSY && (retry_count++ < SCM_EBUSY_MAX_RETRY));
+
+ if (ret == SCM_EBUSY)
+ pr_err("scm: secure world busy (rc = SCM_EBUSY)\n");
+
+ return ret;
+}
+
+/**
+ * scm_call_noalloc - Send an SCM command
+ *
+ * Same as scm_call except clients pass in a buffer (@scm_buf) to be used for
+ * scm internal structures. The buffer should be allocated with
+ * DEFINE_SCM_BUFFER to account for the proper alignment and size.
+ */
+int scm_call_noalloc(u32 svc_id, u32 cmd_id, const void *cmd_buf,
+ size_t cmd_len, void *resp_buf, size_t resp_len,
+ void *scm_buf, size_t scm_buf_len)
+{
+ int ret;
+ size_t len = SCM_BUF_LEN(cmd_len, resp_len);
+
+ if (len == 0)
+ return -EINVAL;
+
+ if (!IS_ALIGNED((unsigned long)scm_buf, PAGE_SIZE))
+ return -EINVAL;
+
+ memset(scm_buf, 0, scm_buf_len);
+
+ ret = scm_call_common(svc_id, cmd_id, cmd_buf, cmd_len, resp_buf,
+ resp_len, scm_buf, len);
+ return ret;
+
+}
+
+struct scm_extra_arg {
+ union {
+ u32 args32[N_EXT_SCM_ARGS];
+ u64 args64[N_EXT_SCM_ARGS];
+ };
+};
+
+static enum scm_interface_version {
+ SCM_UNKNOWN,
+ SCM_LEGACY,
+ SCM_ARMV8_32,
+ SCM_ARMV8_64,
+} scm_version = SCM_UNKNOWN;
+
+/* This will be set to specify SMC32 or SMC64 */
+static u32 scm_version_mask;
+
+bool is_scm_armv8(void)
+{
+ int ret;
+ u64 ret1, x0;
+
+ struct scm_desc desc = {0};
+
+ if (likely(scm_version != SCM_UNKNOWN))
+ return (scm_version == SCM_ARMV8_32) ||
+ (scm_version == SCM_ARMV8_64);
+ /*
+ * This is a one time check that runs on the first ever
+ * invocation of is_scm_armv8. We might be called in atomic
+ * context so no mutexes etc. Also, we can't use the scm_call2
+ * or scm_call2_APIs directly since they depend on this init.
+ */
+
+ /* First try a SMC64 call */
+ scm_version = SCM_ARMV8_64;
+ ret1 = 0;
+ x0 = SCM_SIP_FNID(SCM_SVC_INFO, IS_CALL_AVAIL_CMD) | SMC_ATOMIC_MASK;
+
+ desc.arginfo = SCM_ARGS(1);
+ desc.args[0] = x0;
+
+ ret = scm_call_qcpe(x0 | SMC64_MASK, &desc);
+
+ ret1 = desc.arginfo;
+
+ if (ret || !ret1) {
+ /* Try SMC32 call */
+ ret1 = 0;
+
+ desc.arginfo = SCM_ARGS(1);
+ desc.args[0] = x0;
+
+ ret = scm_call_qcpe(x0, &desc);
+
+ if (ret || !ret1)
+ scm_version = SCM_LEGACY;
+ else
+ scm_version = SCM_ARMV8_32;
+ } else
+ scm_version_mask = SMC64_MASK;
+
+ pr_debug("scm_call: scm version is %x, mask is %x\n", scm_version,
+ scm_version_mask);
+
+ return (scm_version == SCM_ARMV8_32) ||
+ (scm_version == SCM_ARMV8_64);
+}
+EXPORT_SYMBOL(is_scm_armv8);
+
+/*
+ * If there are more than N_REGISTER_ARGS, allocate a buffer and place
+ * the additional arguments in it. The extra argument buffer will be
+ * pointed to by X5.
+ */
+static int allocate_extra_arg_buffer(struct scm_desc *desc, gfp_t flags)
+{
+ int i, j;
+ struct scm_extra_arg *argbuf;
+ int arglen = desc->arginfo & 0xf;
+ size_t argbuflen = PAGE_ALIGN(sizeof(struct scm_extra_arg));
+
+ desc->x5 = desc->args[FIRST_EXT_ARG_IDX];
+
+ if (likely(arglen <= N_REGISTER_ARGS)) {
+ desc->extra_arg_buf = NULL;
+ return 0;
+ }
+
+ argbuf = kzalloc(argbuflen, flags);
+ if (!argbuf)
+ return -ENOMEM;
+
+ desc->extra_arg_buf = argbuf;
+
+ j = FIRST_EXT_ARG_IDX;
+ if (scm_version == SCM_ARMV8_64)
+ for (i = 0; i < N_EXT_SCM_ARGS; i++)
+ argbuf->args64[i] = desc->args[j++];
+ else
+ for (i = 0; i < N_EXT_SCM_ARGS; i++)
+ argbuf->args32[i] = desc->args[j++];
+ desc->x5 = virt_to_phys(argbuf);
+ __cpuc_flush_dcache_area(argbuf, argbuflen);
+ outer_flush_range(virt_to_phys(argbuf),
+ virt_to_phys(argbuf) + argbuflen);
+
+ return 0;
+}
+
+/**
+ * scm_call2() - Invoke a syscall in the secure world
+ * @fn_id: The function ID for this syscall
+ * @desc: Descriptor structure containing arguments and return values
+ *
+ * Sends a command to the SCM and waits for the command to finish processing.
+ * This should *only* be called in pre-emptible context.
+ *
+ * A note on cache maintenance:
+ * Note that any buffers that are expected to be accessed by the secure world
+ * must be flushed before invoking scm_call and invalidated in the cache
+ * immediately after scm_call returns. An important point that must be noted
+ * is that on ARMV8 architectures, invalidation actually also causes a dirty
+ * cache line to be cleaned (flushed + unset-dirty-bit). Therefore it is of
+ * paramount importance that the buffer be flushed before invoking scm_call2,
+ * even if you don't care about the contents of that buffer.
+ *
+ * Note that cache maintenance on the argument buffer (desc->args) is taken care
+ * of by scm_call2; however, callers are responsible for any other cached
+ * buffers passed over to the secure world.
+*/
+int scm_call2(u32 fn_id, struct scm_desc *desc)
+{
+ int arglen = desc->arginfo & 0xf;
+ int ret;
+ u64 x0;
+
+ if (unlikely(!is_scm_armv8()))
+ return -ENODEV;
+
+ ret = allocate_extra_arg_buffer(desc, GFP_NOIO);
+ if (ret)
+ return ret;
+
+ x0 = fn_id | scm_version_mask;
+
+ mutex_lock(&scm_lock);
+
+ if (SCM_SVC_ID(fn_id) == SCM_SVC_LMH)
+ mutex_lock(&scm_lmh_lock);
+
+ desc->ret[0] = desc->ret[1] = desc->ret[2] = 0;
+
+ trace_scm_call_start(x0, desc);
+
+ ret = scm_call_qcpe(x0, desc);
+
+ trace_scm_call_end(desc);
+
+ if (SCM_SVC_ID(fn_id) == SCM_SVC_LMH)
+ mutex_unlock(&scm_lmh_lock);
+
+ mutex_unlock(&scm_lock);
+
+ if (ret < 0)
+ pr_err("scm_call failed: func id %#llx, ret: %d, syscall returns: %#llx, %#llx, %#llx\n",
+ x0, ret, desc->ret[0], desc->ret[1], desc->ret[2]);
+
+ if (arglen > N_REGISTER_ARGS)
+ kfree(desc->extra_arg_buf);
+ if (ret < 0)
+ return scm_remap_error(ret);
+ return 0;
+}
+EXPORT_SYMBOL(scm_call2);
+
+/**
+ * scm_call2_atomic() - Invoke a syscall in the secure world
+ *
+ * Similar to scm_call2 except that this can be invoked in atomic context.
+ * There is also no retry mechanism implemented. Please ensure that the
+ * secure world syscall can be executed in such a context and can complete
+ * in a timely manner.
+ */
+int scm_call2_atomic(u32 fn_id, struct scm_desc *desc)
+{
+ int arglen = desc->arginfo & 0xf;
+ int ret;
+ u64 x0;
+
+ if (unlikely(!is_scm_armv8()))
+ return -ENODEV;
+
+ ret = allocate_extra_arg_buffer(desc, GFP_ATOMIC);
+ if (ret)
+ return ret;
+
+ x0 = fn_id | BIT(SMC_ATOMIC_SYSCALL) | scm_version_mask;
+
+ pr_debug("scm_call: func id %#llx, args: %#x, %#llx, %#llx, %#llx, %#llx\n",
+ x0, desc->arginfo, desc->args[0], desc->args[1],
+ desc->args[2], desc->x5);
+
+ ret = scm_call_qcpe(x0, desc);
+
+ if (ret < 0)
+ pr_err("scm_call failed: func id %#llx, arginfo: %#x, args: %#llx, %#llx, %#llx, %#llx, ret: %d, syscall returns: %#llx, %#llx, %#llx\n",
+ x0, desc->arginfo, desc->args[0], desc->args[1],
+ desc->args[2], desc->x5, ret, desc->ret[0],
+ desc->ret[1], desc->ret[2]);
+
+ if (arglen > N_REGISTER_ARGS)
+ kfree(desc->extra_arg_buf);
+ if (ret < 0)
+ return scm_remap_error(ret);
+ return ret;
+}
+
+/**
+ * scm_call() - Send an SCM command
+ * @svc_id: service identifier
+ * @cmd_id: command identifier
+ * @cmd_buf: command buffer
+ * @cmd_len: length of the command buffer
+ * @resp_buf: response buffer
+ * @resp_len: length of the response buffer
+ *
+ * Sends a command to the SCM and waits for the command to finish processing.
+ *
+ * A note on cache maintenance:
+ * Note that any buffers that are expected to be accessed by the secure world
+ * must be flushed before invoking scm_call and invalidated in the cache
+ * immediately after scm_call returns. Cache maintenance on the command and
+ * response buffers is taken care of by scm_call; however, callers are
+ * responsible for any other cached buffers passed over to the secure world.
+ */
+int scm_call(u32 svc_id, u32 cmd_id, const void *cmd_buf, size_t cmd_len,
+ void *resp_buf, size_t resp_len)
+{
+ struct scm_command *cmd;
+ int ret;
+ size_t len = SCM_BUF_LEN(cmd_len, resp_len);
+
+ if (len == 0 || PAGE_ALIGN(len) < len)
+ return -EINVAL;
+
+ cmd = kzalloc(PAGE_ALIGN(len), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ ret = scm_call_common(svc_id, cmd_id, cmd_buf, cmd_len, resp_buf,
+ resp_len, cmd, len);
+ if (unlikely(ret == SCM_EBUSY))
+ ret = _scm_call_retry(svc_id, cmd_id, cmd_buf, cmd_len,
+ resp_buf, resp_len, cmd, PAGE_ALIGN(len));
+ kfree(cmd);
+ return ret;
+}
+EXPORT_SYMBOL(scm_call);
+
+#define SCM_CLASS_REGISTER (0x2 << 8)
+#define SCM_MASK_IRQS BIT(5)
+#define SCM_ATOMIC(svc, cmd, n) (((((svc) << 10)|((cmd) & 0x3ff)) << 12) | \
+ SCM_CLASS_REGISTER | \
+ SCM_MASK_IRQS | \
+ (n & 0xf))
+
+/**
+ * scm_call_atomic1() - Send an atomic SCM command with one argument
+ * @svc_id: service identifier
+ * @cmd_id: command identifier
+ * @arg1: first argument
+ *
+ * This shall only be used with commands that are guaranteed to be
+ * uninterruptable, atomic and SMP safe.
+ */
+s32 scm_call_atomic1(u32 svc, u32 cmd, u32 arg1)
+{
+ int context_id;
+ int ret;
+ uint64_t x0;
+ struct scm_desc desc = {0};
+
+ register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 1);
+ register u32 r1 asm("r1") = (uintptr_t)&context_id;
+ register u32 r2 asm("r2") = arg1;
+
+ x0 = r0;
+ desc.arginfo = r1;
+ desc.args[0] = r2;
+
+ ret = scm_call_qcpe(x0, &desc);
+
+ if (ret < 0)
+ return scm_remap_error(ret);
+
+ return 0;
+}
+EXPORT_SYMBOL(scm_call_atomic1);
+
+/**
+ * scm_call_atomic1_1() - SCM command with one argument and one return value
+ * @svc_id: service identifier
+ * @cmd_id: command identifier
+ * @arg1: first argument
+ * @ret1: first return value
+ *
+ * This shall only be used with commands that are guaranteed to be
+ * uninterruptable, atomic and SMP safe.
+ */
+s32 scm_call_atomic1_1(u32 svc, u32 cmd, u32 arg1, u32 *ret1)
+{
+ int context_id;
+ int ret;
+ uint64_t x0;
+ struct scm_desc desc = {0};
+
+ register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 1);
+ register u32 r1 asm("r1") = (uintptr_t)&context_id;
+ register u32 r2 asm("r2") = arg1;
+
+ x0 = r0;
+ desc.arginfo = r1;
+ desc.args[0] = r2;
+
+ ret = scm_call_qcpe(x0, &desc);
+
+ if (ret < 0)
+ return scm_remap_error(ret);
+
+ *ret1 = desc.arginfo;
+
+ return 0;
+}
+EXPORT_SYMBOL(scm_call_atomic1_1);
+
+/**
+ * scm_call_atomic2() - Send an atomic SCM command with two arguments
+ * @svc_id: service identifier
+ * @cmd_id: command identifier
+ * @arg1: first argument
+ * @arg2: second argument
+ *
+ * This shall only be used with commands that are guaranteed to be
+ * uninterruptable, atomic and SMP safe.
+ */
+s32 scm_call_atomic2(u32 svc, u32 cmd, u32 arg1, u32 arg2)
+{
+ int context_id;
+ int ret;
+ uint64_t x0;
+ struct scm_desc desc = {0};
+
+ register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 2);
+ register u32 r1 asm("r1") = (uintptr_t)&context_id;
+ register u32 r2 asm("r2") = arg1;
+ register u32 r3 asm("r3") = arg2;
+
+ x0 = r0;
+ desc.arginfo = r1;
+ desc.args[0] = r2;
+ desc.args[1] = r3;
+
+ ret = scm_call_qcpe(x0, &desc);
+
+ if (ret < 0)
+ return scm_remap_error(ret);
+
+ return 0;
+}
+EXPORT_SYMBOL(scm_call_atomic2);
+
+/**
+ * scm_call_atomic3() - Send an atomic SCM command with three arguments
+ * @svc_id: service identifier
+ * @cmd_id: command identifier
+ * @arg1: first argument
+ * @arg2: second argument
+ * @arg3: third argument
+ *
+ * This shall only be used with commands that are guaranteed to be
+ * uninterruptable, atomic and SMP safe.
+ */
+s32 scm_call_atomic3(u32 svc, u32 cmd, u32 arg1, u32 arg2, u32 arg3)
+{
+ int context_id;
+ int ret;
+ uint64_t x0;
+ struct scm_desc desc = {0};
+
+ register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 3);
+ register u32 r1 asm("r1") = (uintptr_t)&context_id;
+ register u32 r2 asm("r2") = arg1;
+ register u32 r3 asm("r3") = arg2;
+ register u32 r4 asm("r4") = arg3;
+
+ x0 = r0;
+ desc.arginfo = r1;
+ desc.args[0] = r2;
+ desc.args[1] = r3;
+ desc.args[2] = r4;
+
+ ret = scm_call_qcpe(x0, &desc);
+
+ if (ret < 0)
+ return scm_remap_error(ret);
+
+ return 0;
+}
+EXPORT_SYMBOL(scm_call_atomic3);
+
+s32 scm_call_atomic4_3(u32 svc, u32 cmd, u32 arg1, u32 arg2,
+ u32 arg3, u32 arg4, u32 *ret1, u32 *ret2)
+{
+ int ret;
+ int context_id;
+ uint64_t x0;
+ struct scm_desc desc = {0};
+
+ register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 4);
+ register u32 r1 asm("r1") = (uintptr_t)&context_id;
+ register u32 r2 asm("r2") = arg1;
+ register u32 r3 asm("r3") = arg2;
+ register u32 r4 asm("r4") = arg3;
+ register u32 r5 asm("r5") = arg4;
+
+ x0 = r0;
+ desc.arginfo = r1;
+ desc.args[0] = r2;
+ desc.args[1] = r3;
+ desc.args[2] = r4;
+ desc.args[3] = r5;
+
+ ret = scm_call_qcpe(x0, &desc);
+
+ if (ret < 0)
+ return scm_remap_error(ret);
+
+ *ret1 = desc.arginfo;
+ *ret2 = desc.args[0];
+
+ return 0;
+}
+EXPORT_SYMBOL(scm_call_atomic4_3);
+
+/**
+ * scm_call_atomic5_3() - SCM command with five argument and three return value
+ * @svc_id: service identifier
+ * @cmd_id: command identifier
+ * @arg1: first argument
+ * @arg2: second argument
+ * @arg3: third argument
+ * @arg4: fourth argument
+ * @arg5: fifth argument
+ * @ret1: first return value
+ * @ret2: second return value
+ * @ret3: third return value
+ *
+ * This shall only be used with commands that are guaranteed to be
+ * uninterruptable, atomic and SMP safe.
+ */
+s32 scm_call_atomic5_3(u32 svc, u32 cmd, u32 arg1, u32 arg2,
+ u32 arg3, u32 arg4, u32 arg5, u32 *ret1, u32 *ret2, u32 *ret3)
+{
+ int ret;
+ int context_id;
+ uint64_t x0;
+ struct scm_desc desc = {0};
+
+ register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 5);
+ register u32 r1 asm("r1") = (uintptr_t)&context_id;
+ register u32 r2 asm("r2") = arg1;
+ register u32 r3 asm("r3") = arg2;
+ register u32 r4 asm("r4") = arg3;
+ register u32 r5 asm("r5") = arg4;
+ register u32 r6 asm("r6") = arg5;
+
+ x0 = r0;
+ desc.arginfo = r1;
+ desc.args[0] = r2;
+ desc.args[1] = r3;
+ desc.args[2] = r4;
+ desc.args[3] = r5;
+ desc.args[4] = r6;
+
+ ret = scm_call_qcpe(x0, &desc);
+
+ if (ret < 0)
+ return scm_remap_error(ret);
+
+ *ret1 = desc.arginfo;
+ *ret2 = desc.args[0];
+ *ret3 = desc.args[1];
+
+ return 0;
+}
+EXPORT_SYMBOL(scm_call_atomic5_3);
+
+u32 scm_get_version(void)
+{
+ int context_id;
+ static u32 version = -1;
+ int ret;
+ uint64_t x0;
+ struct scm_desc desc = {0};
+
+ register u32 r0 asm("r0");
+ register u32 r1 asm("r1");
+
+ if (version != -1)
+ return version;
+
+ mutex_lock(&scm_lock);
+
+ r0 = 0x1 << 8;
+ r1 = (uintptr_t)&context_id;
+
+ x0 = r0;
+ desc.arginfo = r1;
+
+ ret = scm_call_qcpe(x0, &desc);
+
+ version = desc.arginfo;
+
+ mutex_unlock(&scm_lock);
+
+ if (ret < 0)
+ return scm_remap_error(ret);
+
+ return version;
+}
+EXPORT_SYMBOL(scm_get_version);
+
+#define SCM_IO_READ 0x1
+#define SCM_IO_WRITE 0x2
+
+u32 scm_io_read(phys_addr_t address)
+{
+ struct scm_desc desc = {
+ .args[0] = address,
+ .arginfo = SCM_ARGS(1),
+ };
+
+ if (!is_scm_armv8())
+ return scm_call_atomic1(SCM_SVC_IO, SCM_IO_READ, address);
+
+ scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_IO, SCM_IO_READ), &desc);
+ return desc.ret[0];
+}
+EXPORT_SYMBOL(scm_io_read);
+
+int scm_io_write(phys_addr_t address, u32 val)
+{
+ int ret;
+
+ if (!is_scm_armv8())
+ ret = scm_call_atomic2(SCM_SVC_IO, SCM_IO_WRITE, address, val);
+ else {
+ struct scm_desc desc = {
+ .args[0] = address,
+ .args[1] = val,
+ .arginfo = SCM_ARGS(2),
+ };
+ ret = scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_IO, SCM_IO_WRITE),
+ &desc);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(scm_io_write);
+
+int scm_is_call_available(u32 svc_id, u32 cmd_id)
+{
+ int ret;
+ struct scm_desc desc = {0};
+
+ if (!is_scm_armv8()) {
+ u32 ret_val = 0;
+ u32 svc_cmd = (svc_id << 10) | cmd_id;
+
+ ret = scm_call(SCM_SVC_INFO, IS_CALL_AVAIL_CMD, &svc_cmd,
+ sizeof(svc_cmd), &ret_val, sizeof(ret_val));
+ if (ret)
+ return ret;
+
+ return ret_val;
+ }
+ desc.arginfo = SCM_ARGS(1);
+ desc.args[0] = SCM_SIP_FNID(svc_id, cmd_id);
+ ret = scm_call2(SCM_SIP_FNID(SCM_SVC_INFO, IS_CALL_AVAIL_CMD), &desc);
+ if (ret)
+ return ret;
+
+ return desc.ret[0];
+}
+EXPORT_SYMBOL(scm_is_call_available);
+
+#define GET_FEAT_VERSION_CMD 3
+int scm_get_feat_version(u32 feat)
+{
+ struct scm_desc desc = {0};
+ int ret;
+
+ if (!is_scm_armv8()) {
+ if (scm_is_call_available(SCM_SVC_INFO, GET_FEAT_VERSION_CMD)) {
+ u32 version;
+
+ if (!scm_call(SCM_SVC_INFO, GET_FEAT_VERSION_CMD, &feat,
+ sizeof(feat), &version, sizeof(version)))
+ return version;
+ }
+ return 0;
+ }
+
+ ret = scm_is_call_available(SCM_SVC_INFO, GET_FEAT_VERSION_CMD);
+ if (ret <= 0)
+ return 0;
+
+ desc.args[0] = feat;
+ desc.arginfo = SCM_ARGS(1);
+ ret = scm_call2(SCM_SIP_FNID(SCM_SVC_INFO, GET_FEAT_VERSION_CMD),
+ &desc);
+ if (!ret)
+ return desc.ret[0];
+
+ return 0;
+}
+EXPORT_SYMBOL(scm_get_feat_version);
+
+#define RESTORE_SEC_CFG 2
+int scm_restore_sec_cfg(u32 device_id, u32 spare, int *scm_ret)
+{
+ struct scm_desc desc = {0};
+ int ret;
+ struct restore_sec_cfg {
+ u32 device_id;
+ u32 spare;
+ } cfg;
+
+ cfg.device_id = device_id;
+ cfg.spare = spare;
+
+ if (IS_ERR_OR_NULL(scm_ret))
+ return -EINVAL;
+
+ if (!is_scm_armv8())
+ return scm_call(SCM_SVC_MP, RESTORE_SEC_CFG, &cfg, sizeof(cfg),
+ scm_ret, sizeof(*scm_ret));
+
+ desc.args[0] = device_id;
+ desc.args[1] = spare;
+ desc.arginfo = SCM_ARGS(2);
+
+ ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP, RESTORE_SEC_CFG), &desc);
+ if (ret)
+ return ret;
+
+ *scm_ret = desc.ret[0];
+ return 0;
+}
+EXPORT_SYMBOL(scm_restore_sec_cfg);
+
+/*
+ * SCM call command ID to check secure mode
+ * Return zero for secure device.
+ * Return one for non secure device or secure
+ * device with debug enabled device.
+ */
+#define TZ_INFO_GET_SECURE_STATE 0x4
+bool scm_is_secure_device(void)
+{
+ struct scm_desc desc = {0};
+ int ret = 0, resp;
+
+ desc.args[0] = 0;
+ desc.arginfo = 0;
+ if (!is_scm_armv8()) {
+ ret = scm_call(SCM_SVC_INFO, TZ_INFO_GET_SECURE_STATE, NULL,
+ 0, &resp, sizeof(resp));
+ } else {
+ ret = scm_call2(SCM_SIP_FNID(SCM_SVC_INFO,
+ TZ_INFO_GET_SECURE_STATE),
+ &desc);
+ resp = desc.ret[0];
+ }
+
+ if (ret) {
+ pr_err("%s: SCM call failed\n", __func__);
+ return false;
+ }
+
+ if ((resp & BIT(0)) || (resp & BIT(2)))
+ return true;
+ else
+ return false;
+}
+EXPORT_SYMBOL(scm_is_secure_device);
diff --git a/drivers/soc/qcom/service-notifier.c b/drivers/soc/qcom/service-notifier.c
index 221ae0c1fefb..f4c67f1cd9d9 100644
--- a/drivers/soc/qcom/service-notifier.c
+++ b/drivers/soc/qcom/service-notifier.c
@@ -84,6 +84,7 @@ static DEFINE_MUTEX(service_list_lock);
struct ind_req_resp {
char service_path[SERVREG_NOTIF_NAME_LENGTH];
int transaction_id;
+ int curr_state;
};
/*
@@ -200,8 +201,30 @@ static void send_ind_ack(struct work_struct *work)
struct qmi_servreg_notif_set_ack_req_msg_v01 req;
struct msg_desc req_desc, resp_desc;
struct qmi_servreg_notif_set_ack_resp_msg_v01 resp = { { 0, 0 } };
+ struct service_notif_info *service_notif;
+ enum pd_subsys_state state = USER_PD_STATE_CHANGE;
int rc;
+ service_notif = _find_service_info(data->ind_msg.service_path);
+ if (!service_notif)
+ return;
+ if ((int)data->ind_msg.curr_state < QMI_STATE_MIN_VAL ||
+ (int)data->ind_msg.curr_state > QMI_STATE_MAX_VAL)
+ pr_err("Unexpected indication notification state %d\n",
+ data->ind_msg.curr_state);
+ else {
+ mutex_lock(&notif_add_lock);
+ mutex_lock(&service_list_lock);
+ rc = service_notif_queue_notification(service_notif,
+ data->ind_msg.curr_state, &state);
+ if (rc & NOTIFY_STOP_MASK)
+ pr_err("Notifier callback aborted for %s with error %d\n",
+ data->ind_msg.service_path, rc);
+ service_notif->curr_state = data->ind_msg.curr_state;
+ mutex_unlock(&service_list_lock);
+ mutex_unlock(&notif_add_lock);
+ }
+
req.transaction_id = data->ind_msg.transaction_id;
snprintf(req.service_name, ARRAY_SIZE(req.service_name), "%s",
data->ind_msg.service_path);
@@ -236,11 +259,9 @@ static void root_service_service_ind_cb(struct qmi_handle *handle,
unsigned int msg_len, void *ind_cb_priv)
{
struct qmi_client_info *data = (struct qmi_client_info *)ind_cb_priv;
- struct service_notif_info *service_notif;
struct msg_desc ind_desc;
struct qmi_servreg_notif_state_updated_ind_msg_v01 ind_msg = {
QMI_STATE_MIN_VAL, "", 0xFFFF };
- enum pd_subsys_state state = USER_PD_STATE_CHANGE;
int rc;
ind_desc.msg_id = SERVREG_NOTIF_STATE_UPDATED_IND_MSG;
@@ -256,27 +277,8 @@ static void root_service_service_ind_cb(struct qmi_handle *handle,
ind_msg.service_name, ind_msg.curr_state,
ind_msg.transaction_id);
- service_notif = _find_service_info(ind_msg.service_name);
- if (!service_notif)
- return;
-
- if ((int)ind_msg.curr_state < QMI_STATE_MIN_VAL ||
- (int)ind_msg.curr_state > QMI_STATE_MAX_VAL)
- pr_err("Unexpected indication notification state %d\n",
- ind_msg.curr_state);
- else {
- mutex_lock(&notif_add_lock);
- mutex_lock(&service_list_lock);
- rc = service_notif_queue_notification(service_notif,
- ind_msg.curr_state, &state);
- if (rc & NOTIFY_STOP_MASK)
- pr_err("Notifier callback aborted for %s with error %d\n",
- ind_msg.service_name, rc);
- service_notif->curr_state = ind_msg.curr_state;
- mutex_unlock(&service_list_lock);
- mutex_unlock(&notif_add_lock);
- }
data->ind_msg.transaction_id = ind_msg.transaction_id;
+ data->ind_msg.curr_state = ind_msg.curr_state;
snprintf(data->ind_msg.service_path,
ARRAY_SIZE(data->ind_msg.service_path), "%s",
ind_msg.service_name);
@@ -373,6 +375,12 @@ static void root_service_service_arrive(struct work_struct *work)
mutex_unlock(&qmi_client_release_lock);
pr_info("Connection established between QMI handle and %d service\n",
data->instance_id);
+ /* Register for indication messages about service */
+ rc = qmi_register_ind_cb(data->clnt_handle,
+ root_service_service_ind_cb, (void *)data);
+ if (rc < 0)
+ pr_err("Indication callback register failed(instance-id: %d) rc:%d\n",
+ data->instance_id, rc);
mutex_lock(&notif_add_lock);
mutex_lock(&service_list_lock);
list_for_each_entry(service_notif, &service_list, list) {
@@ -395,12 +403,6 @@ static void root_service_service_arrive(struct work_struct *work)
}
mutex_unlock(&service_list_lock);
mutex_unlock(&notif_add_lock);
- /* Register for indication messages about service */
- rc = qmi_register_ind_cb(data->clnt_handle,
- root_service_service_ind_cb, (void *)data);
- if (rc < 0)
- pr_err("Indication callback register failed(instance-id: %d) rc:%d\n",
- data->instance_id, rc);
}
static void root_service_service_exit(struct qmi_client_info *data,
diff --git a/drivers/soc/qcom/spcom.c b/drivers/soc/qcom/spcom.c
index 7f03aabf415e..24de162e5401 100644
--- a/drivers/soc/qcom/spcom.c
+++ b/drivers/soc/qcom/spcom.c
@@ -196,6 +196,7 @@ struct spcom_channel {
* glink state: CONNECTED / LOCAL_DISCONNECTED, REMOTE_DISCONNECTED
*/
unsigned glink_state;
+ bool is_closing;
/* Events notification */
struct completion connect;
@@ -245,7 +246,7 @@ struct spcom_device {
int channel_count;
/* private */
- struct mutex lock;
+ struct mutex cmd_lock;
/* Link state */
struct completion link_state_changed;
@@ -483,7 +484,17 @@ static void spcom_notify_state(void *handle, const void *priv, unsigned event)
switch (event) {
case GLINK_CONNECTED:
pr_debug("GLINK_CONNECTED, ch name [%s].\n", ch->name);
+ mutex_lock(&ch->lock);
+
+ if (ch->is_closing) {
+ pr_err("Unexpected CONNECTED while closing [%s].\n",
+ ch->name);
+ mutex_unlock(&ch->lock);
+ return;
+ }
+
ch->glink_state = event;
+
/*
* if spcom_notify_state() is called within glink_open()
* then ch->glink_handle is not updated yet.
@@ -493,7 +504,16 @@ static void spcom_notify_state(void *handle, const void *priv, unsigned event)
ch->glink_handle = handle;
}
- /* prepare default rx buffer after connected */
+ /* signal before unlock mutex & before calling glink */
+ complete_all(&ch->connect);
+
+ /*
+ * Prepare default rx buffer.
+ * glink_queue_rx_intent() can be called only AFTER connected.
+ * We do it here, ASAP, to allow rx data.
+ */
+
+ pr_debug("call glink_queue_rx_intent() ch [%s].\n", ch->name);
ret = glink_queue_rx_intent(ch->glink_handle,
ch, ch->rx_buf_size);
if (ret) {
@@ -503,7 +523,9 @@ static void spcom_notify_state(void *handle, const void *priv, unsigned event)
ch->rx_buf_size);
ch->rx_buf_ready = true;
}
- complete_all(&ch->connect);
+
+ pr_debug("GLINK_CONNECTED, ch name [%s] done.\n", ch->name);
+ mutex_unlock(&ch->lock);
break;
case GLINK_LOCAL_DISCONNECTED:
/*
@@ -668,6 +690,13 @@ static int spcom_init_channel(struct spcom_channel *ch, const char *name)
ch->glink_state = GLINK_LOCAL_DISCONNECTED;
ch->actual_rx_size = 0;
ch->rx_buf_size = SPCOM_RX_BUF_SIZE;
+ ch->is_closing = false;
+ ch->glink_handle = NULL;
+ ch->ref_count = 0;
+ ch->rx_abort = false;
+ ch->tx_abort = false;
+ ch->txn_id = INITIAL_TXN_ID; /* use non-zero nonce for debug */
+ ch->pid = 0;
return 0;
}
@@ -738,6 +767,8 @@ static int spcom_open(struct spcom_channel *ch, unsigned int timeout_msec)
/* init completion before calling glink_open() */
reinit_completion(&ch->connect);
+ ch->is_closing = false;
+
handle = glink_open(&cfg);
if (IS_ERR_OR_NULL(handle)) {
pr_err("glink_open failed.\n");
@@ -752,6 +783,8 @@ static int spcom_open(struct spcom_channel *ch, unsigned int timeout_msec)
ch->pid = current_pid();
ch->txn_id = INITIAL_TXN_ID;
+ mutex_unlock(&ch->lock);
+
pr_debug("Wait for connection on channel [%s] timeout_msec [%d].\n",
name, timeout_msec);
@@ -768,8 +801,6 @@ static int spcom_open(struct spcom_channel *ch, unsigned int timeout_msec)
pr_debug("Channel [%s] opened, no timeout.\n", name);
}
- mutex_unlock(&ch->lock);
-
return 0;
exit_err:
mutex_unlock(&ch->lock);
@@ -796,6 +827,8 @@ static int spcom_close(struct spcom_channel *ch)
return 0;
}
+ ch->is_closing = true;
+
ret = glink_close(ch->glink_handle);
if (ret)
pr_err("glink_close() fail, ret [%d].\n", ret);
@@ -811,6 +844,7 @@ static int spcom_close(struct spcom_channel *ch)
ch->pid = 0;
pr_debug("Channel closed [%s].\n", ch->name);
+
mutex_unlock(&ch->lock);
return 0;
@@ -1952,6 +1986,8 @@ static int spcom_handle_write(struct spcom_channel *ch,
swap_id = htonl(cmd->cmd_id);
memcpy(cmd_name, &swap_id, sizeof(int));
+ mutex_lock(&spcom_dev->cmd_lock);
+
pr_debug("cmd_id [0x%x] cmd_name [%s].\n", cmd_id, cmd_name);
switch (cmd_id) {
@@ -1972,9 +2008,11 @@ static int spcom_handle_write(struct spcom_channel *ch,
break;
default:
pr_err("Invalid Command Id [0x%x].\n", (int) cmd->cmd_id);
- return -EINVAL;
+ ret = -EINVAL;
}
+ mutex_unlock(&spcom_dev->cmd_lock);
+
return ret;
}
@@ -2675,7 +2713,7 @@ static int spcom_probe(struct platform_device *pdev)
return -ENOMEM;
spcom_dev = dev;
- mutex_init(&dev->lock);
+ mutex_init(&spcom_dev->cmd_lock);
init_completion(&dev->link_state_changed);
spcom_dev->link_state = GLINK_LINK_STATE_DOWN;
@@ -2748,7 +2786,7 @@ static int __init spcom_init(void)
{
int ret;
- pr_info("spcom driver Ver 1.0 23-Nov-2015.\n");
+ pr_info("spcom driver version 1.1 17-July-2017.\n");
ret = platform_driver_register(&spcom_driver);
if (ret)
diff --git a/drivers/soc/qcom/watchdog_v2.c b/drivers/soc/qcom/watchdog_v2.c
index 745a069df88a..625030f1f256 100644
--- a/drivers/soc/qcom/watchdog_v2.c
+++ b/drivers/soc/qcom/watchdog_v2.c
@@ -134,6 +134,8 @@ static int msm_watchdog_suspend(struct device *dev)
return 0;
__raw_writel(1, wdog_dd->base + WDT0_RST);
if (wdog_dd->wakeup_irq_enable) {
+ /* Make sure register write is complete before proceeding */
+ mb();
wdog_dd->last_pet = sched_clock();
return 0;
}
@@ -148,8 +150,15 @@ static int msm_watchdog_resume(struct device *dev)
{
struct msm_watchdog_data *wdog_dd =
(struct msm_watchdog_data *)dev_get_drvdata(dev);
- if (!enable || wdog_dd->wakeup_irq_enable)
+ if (!enable)
+ return 0;
+ if (wdog_dd->wakeup_irq_enable) {
+ __raw_writel(1, wdog_dd->base + WDT0_RST);
+ /* Make sure register write is complete before proceeding */
+ mb();
+ wdog_dd->last_pet = sched_clock();
return 0;
+ }
__raw_writel(1, wdog_dd->base + WDT0_EN);
__raw_writel(1, wdog_dd->base + WDT0_RST);
mb();
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index 7d3af3eacf57..1ddba9ae8c0f 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -651,7 +651,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
buf = t->rx_buf;
t->rx_dma = dma_map_single(&spi->dev, buf,
t->len, DMA_FROM_DEVICE);
- if (!t->rx_dma) {
+ if (dma_mapping_error(&spi->dev, !t->rx_dma)) {
ret = -EFAULT;
goto err_rx_map;
}
@@ -665,7 +665,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
buf = (void *)t->tx_buf;
t->tx_dma = dma_map_single(&spi->dev, buf,
t->len, DMA_TO_DEVICE);
- if (!t->tx_dma) {
+ if (dma_mapping_error(&spi->dev, t->tx_dma)) {
ret = -EFAULT;
goto err_tx_map;
}
diff --git a/drivers/staging/android/sync_debug.c b/drivers/staging/android/sync_debug.c
index aaa96c3df45b..5fbd3766b981 100644
--- a/drivers/staging/android/sync_debug.c
+++ b/drivers/staging/android/sync_debug.c
@@ -87,7 +87,7 @@ static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence)
int status = 1;
struct sync_timeline *parent = sync_pt_parent(pt);
- if (fence_is_signaled_locked(&pt->base))
+ if (test_bit(FENCE_FLAG_SIGNALED_BIT, &pt->base.flags))
status = pt->base.status;
seq_printf(s, " %s%spt %s",
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 200d3de8bc1e..a180c000e246 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -1112,6 +1112,18 @@ iscsit_get_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr,
*/
if (dump_payload)
goto after_immediate_data;
+ /*
+ * Check for underflow case where both EDTL and immediate data payload
+ * exceeds what is presented by CDB's TRANSFER LENGTH, and what has
+ * already been set in target_cmd_size_check() as se_cmd->data_length.
+ *
+ * For this special case, fail the command and dump the immediate data
+ * payload.
+ */
+ if (cmd->first_burst_len > cmd->se_cmd.data_length) {
+ cmd->sense_reason = TCM_INVALID_CDB_FIELD;
+ goto after_immediate_data;
+ }
immed_ret = iscsit_handle_immediate_data(cmd, hdr,
cmd->first_burst_len);
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index 253a91bff943..272e6f755322 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -132,7 +132,7 @@ int init_se_kmem_caches(void);
void release_se_kmem_caches(void);
u32 scsi_get_new_index(scsi_index_t);
void transport_subsystem_check_init(void);
-void transport_cmd_finish_abort(struct se_cmd *, int);
+int transport_cmd_finish_abort(struct se_cmd *, int);
unsigned char *transport_dump_cmd_direction(struct se_cmd *);
void transport_dump_dev_state(struct se_device *, char *, int *);
void transport_dump_dev_info(struct se_device *, struct se_lun *,
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 46b1991fbb50..c9be953496ec 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -75,7 +75,7 @@ void core_tmr_release_req(struct se_tmr_req *tmr)
kfree(tmr);
}
-static void core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas)
+static int core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas)
{
unsigned long flags;
bool remove = true, send_tas;
@@ -91,7 +91,7 @@ static void core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas)
transport_send_task_abort(cmd);
}
- transport_cmd_finish_abort(cmd, remove);
+ return transport_cmd_finish_abort(cmd, remove);
}
static int target_check_cdb_and_preempt(struct list_head *list,
@@ -185,8 +185,8 @@ void core_tmr_abort_task(
cancel_work_sync(&se_cmd->work);
transport_wait_for_tasks(se_cmd);
- transport_cmd_finish_abort(se_cmd, true);
- target_put_sess_cmd(se_cmd);
+ if (!transport_cmd_finish_abort(se_cmd, true))
+ target_put_sess_cmd(se_cmd);
printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for"
" ref_tag: %llu\n", ref_tag);
@@ -286,8 +286,8 @@ static void core_tmr_drain_tmr_list(
cancel_work_sync(&cmd->work);
transport_wait_for_tasks(cmd);
- transport_cmd_finish_abort(cmd, 1);
- target_put_sess_cmd(cmd);
+ if (!transport_cmd_finish_abort(cmd, 1))
+ target_put_sess_cmd(cmd);
}
}
@@ -385,8 +385,8 @@ static void core_tmr_drain_state_list(
cancel_work_sync(&cmd->work);
transport_wait_for_tasks(cmd);
- core_tmr_handle_tas_abort(cmd, tas);
- target_put_sess_cmd(cmd);
+ if (!core_tmr_handle_tas_abort(cmd, tas))
+ target_put_sess_cmd(cmd);
}
}
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 60743bf27f37..37c77db6e737 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -639,9 +639,10 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
percpu_ref_put(&lun->lun_ref);
}
-void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
+int transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
{
bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF);
+ int ret = 0;
if (cmd->se_cmd_flags & SCF_SE_LUN_CMD)
transport_lun_remove_cmd(cmd);
@@ -653,9 +654,11 @@ void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
cmd->se_tfo->aborted_task(cmd);
if (transport_cmd_check_stop_to_fabric(cmd))
- return;
+ return 1;
if (remove && ack_kref)
- transport_put_cmd(cmd);
+ ret = transport_put_cmd(cmd);
+
+ return ret;
}
static void target_complete_failure_work(struct work_struct *work)
diff --git a/drivers/thermal/msm_thermal.c b/drivers/thermal/msm_thermal.c
index b1f4c3f27111..4b586f62cdc7 100644
--- a/drivers/thermal/msm_thermal.c
+++ b/drivers/thermal/msm_thermal.c
@@ -2858,7 +2858,7 @@ static void msm_thermal_bite(int zone_id, int temp)
tsens_id, temp);
}
/* If it is a secure device ignore triggering the thermal bite. */
- if (scm_is_secure_device())
+ if (!scm_is_secure_device())
return;
if (!is_scm_armv8()) {
scm_call_atomic1(SCM_SVC_BOOT, THERM_SECURE_BITE_CMD, 0);
diff --git a/drivers/tty/serial/msm_serial_hs.c b/drivers/tty/serial/msm_serial_hs.c
index 830ef92ffe80..fc9faaee3170 100644
--- a/drivers/tty/serial/msm_serial_hs.c
+++ b/drivers/tty/serial/msm_serial_hs.c
@@ -272,7 +272,7 @@ static struct of_device_id msm_hs_match_table[] = {
#define UARTDM_TX_BUF_SIZE UART_XMIT_SIZE
#define UARTDM_RX_BUF_SIZE 512
#define RETRY_TIMEOUT 5
-#define UARTDM_NR 256
+#define UARTDM_NR 4
#define BAM_PIPE_MIN 0
#define BAM_PIPE_MAX 11
#define BUS_SCALING 1
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 1b4fb562ce4b..927c84ea4921 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -39,6 +39,7 @@
#define DWC3_MSG_MAX 500
/* Global constants */
+#define DWC3_ZLP_BUF_SIZE 1024 /* size of a superspeed bulk */
#define DWC3_EP0_BOUNCE_SIZE 512
#define DWC3_ENDPOINTS_NUM 32
#define DWC3_XHCI_RESOURCES_NUM 2
@@ -748,6 +749,7 @@ struct dwc3_scratchpad_array {
* @ctrl_req: usb control request which is used for ep0
* @ep0_trb: trb which is used for the ctrl_req
* @ep0_bounce: bounce buffer for ep0
+ * @zlp_buf: used when request->zero is set
* @setup_buf: used while precessing STD USB requests
* @ctrl_req_addr: dma address of ctrl_req
* @ep0_trb: dma address of ep0_trb
@@ -853,6 +855,7 @@ struct dwc3 {
struct usb_ctrlrequest *ctrl_req;
struct dwc3_trb *ep0_trb;
void *ep0_bounce;
+ void *zlp_buf;
void *scratchbuf;
u8 *setup_buf;
dma_addr_t ctrl_req_addr;
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index c2d788bc4bc5..c3077ac11709 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -54,6 +54,8 @@
#include "debug.h"
#include "xhci.h"
+#define SDP_CONNETION_CHECK_TIME 10000 /* in ms */
+
/* time out to wait for USB cable status notification (in ms)*/
#define SM_INIT_TIMEOUT 30000
@@ -70,6 +72,8 @@ MODULE_PARM_DESC(cpu_to_affin, "affin usb irq to this cpu");
/* XHCI registers */
#define USB3_HCSPARAMS1 (0x4)
+#define USB3_HCCPARAMS2 (0x1c)
+#define HCC_CTC(p) ((p) & (1 << 3))
#define USB3_PORTSC (0x420)
/**
@@ -211,6 +215,7 @@ struct dwc3_msm {
struct notifier_block dwc3_cpu_notifier;
struct notifier_block usbdev_nb;
bool hc_died;
+ bool xhci_ss_compliance_enable;
struct extcon_dev *extcon_vbus;
struct extcon_dev *extcon_id;
@@ -227,6 +232,7 @@ struct dwc3_msm {
int pm_qos_latency;
struct pm_qos_request pm_qos_req_dma;
struct delayed_work perf_vote_work;
+ struct delayed_work sdp_check;
};
#define USB_HSPHY_3P3_VOL_MIN 3050000 /* uV */
@@ -2625,6 +2631,42 @@ done:
return NOTIFY_DONE;
}
+
+static void check_for_sdp_connection(struct work_struct *w)
+{
+ int ret;
+ union power_supply_propval pval = {0};
+ struct dwc3_msm *mdwc =
+ container_of(w, struct dwc3_msm, sdp_check.work);
+ struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+
+ if (!mdwc->vbus_active)
+ return;
+
+ /* floating D+/D- lines detected */
+ if (dwc->gadget.state < USB_STATE_DEFAULT &&
+ dwc3_gadget_get_link_state(dwc) != DWC3_LINK_STATE_CMPLY) {
+ if (!mdwc->usb_psy) {
+ mdwc->usb_psy = power_supply_get_by_name("usb");
+ if (!mdwc->usb_psy) {
+ dev_dbg(mdwc->dev,
+ "Could not get usb power_supply\n");
+ return;
+ }
+ }
+ pval.intval = -ETIMEDOUT;
+ ret = power_supply_set_property(mdwc->usb_psy,
+ POWER_SUPPLY_PROP_CURRENT_MAX, &pval);
+ if (ret)
+ dev_dbg(mdwc->dev,
+ "power supply error when setting property\n");
+
+ mdwc->vbus_active = 0;
+ dbg_event(0xFF, "Q RW SPD CHK", mdwc->vbus_active);
+ queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
+ }
+}
+
static int dwc3_msm_vbus_notifier(struct notifier_block *nb,
unsigned long event, void *ptr)
{
@@ -2796,6 +2838,34 @@ static ssize_t speed_store(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RW(speed);
static void msm_dwc3_perf_vote_work(struct work_struct *w);
+static ssize_t xhci_link_compliance_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+
+ if (mdwc->xhci_ss_compliance_enable)
+ return snprintf(buf, PAGE_SIZE, "y\n");
+ else
+ return snprintf(buf, PAGE_SIZE, "n\n");
+}
+
+static ssize_t xhci_link_compliance_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+ bool value;
+ int ret;
+
+ ret = strtobool(buf, &value);
+ if (!ret) {
+ mdwc->xhci_ss_compliance_enable = value;
+ return count;
+ }
+
+ return ret;
+}
+
+static DEVICE_ATTR_RW(xhci_link_compliance);
static int dwc3_msm_probe(struct platform_device *pdev)
{
@@ -2833,6 +2903,7 @@ static int dwc3_msm_probe(struct platform_device *pdev)
INIT_WORK(&mdwc->vbus_draw_work, dwc3_msm_vbus_draw_work);
INIT_DELAYED_WORK(&mdwc->sm_work, dwc3_otg_sm_work);
INIT_DELAYED_WORK(&mdwc->perf_vote_work, msm_dwc3_perf_vote_work);
+ INIT_DELAYED_WORK(&mdwc->sdp_check, check_for_sdp_connection);
mdwc->dwc3_wq = alloc_ordered_workqueue("dwc3_wq", 0);
if (!mdwc->dwc3_wq) {
@@ -3139,6 +3210,7 @@ static int dwc3_msm_probe(struct platform_device *pdev)
device_create_file(&pdev->dev, &dev_attr_mode);
device_create_file(&pdev->dev, &dev_attr_speed);
+ device_create_file(&pdev->dev, &dev_attr_xhci_link_compliance);
host_mode = usb_get_dr_mode(&mdwc->dwc3->dev) == USB_DR_MODE_HOST;
if (host_mode ||
@@ -3170,6 +3242,7 @@ static int dwc3_msm_remove(struct platform_device *pdev)
int ret_pm;
device_remove_file(&pdev->dev, &dev_attr_mode);
+ device_remove_file(&pdev->dev, &dev_attr_xhci_link_compliance);
if (cpu_to_affin)
unregister_cpu_notifier(&mdwc->dwc3_cpu_notifier);
@@ -3434,6 +3507,25 @@ static int dwc3_otg_start_host(struct dwc3_msm *mdwc, int on)
}
/*
+ * If the Compliance Transition Capability(CTC) flag of
+ * HCCPARAMS2 register is set and xhci_link_compliance sysfs
+ * param has been enabled by the user for the SuperSpeed host
+ * controller, then write 10 (Link in Compliance Mode State)
+ * onto the Port Link State(PLS) field of the PORTSC register
+ * for 3.0 host controller which is at an offset of USB3_PORTSC
+ * + 0x10 from the DWC3 base address. Also, disable the runtime
+ * PM of 3.0 root hub (root hub of shared_hcd of xhci device)
+ */
+ if (HCC_CTC(dwc3_msm_read_reg(mdwc->base, USB3_HCCPARAMS2))
+ && mdwc->xhci_ss_compliance_enable
+ && dwc->maximum_speed == USB_SPEED_SUPER) {
+ dwc3_msm_write_reg(mdwc->base, USB3_PORTSC + 0x10,
+ 0x10340);
+ pm_runtime_disable(&hcd_to_xhci(platform_get_drvdata(
+ dwc->xhci))->shared_hcd->self.root_hub->dev);
+ }
+
+ /*
* In some cases it is observed that USB PHY is not going into
* suspend with host mode suspend functionality. Hence disable
* XHCI's runtime PM here if disable_host_mode_pm is set.
@@ -3586,28 +3678,38 @@ static int dwc3_otg_start_peripheral(struct dwc3_msm *mdwc, int on)
return 0;
}
-static int dwc3_msm_gadget_vbus_draw(struct dwc3_msm *mdwc, unsigned mA)
+int get_psy_type(struct dwc3_msm *mdwc)
{
union power_supply_propval pval = {0};
- int ret;
if (mdwc->charging_disabled)
- return 0;
-
- if (mdwc->max_power == mA)
- return 0;
+ return -EINVAL;
if (!mdwc->usb_psy) {
mdwc->usb_psy = power_supply_get_by_name("usb");
if (!mdwc->usb_psy) {
- dev_warn(mdwc->dev, "Could not get usb power_supply\n");
+ dev_err(mdwc->dev, "Could not get usb psy\n");
return -ENODEV;
}
}
- power_supply_get_property(mdwc->usb_psy,
- POWER_SUPPLY_PROP_REAL_TYPE, &pval);
- if (pval.intval != POWER_SUPPLY_TYPE_USB)
+ power_supply_get_property(mdwc->usb_psy, POWER_SUPPLY_PROP_REAL_TYPE,
+ &pval);
+
+ return pval.intval;
+}
+
+static int dwc3_msm_gadget_vbus_draw(struct dwc3_msm *mdwc, unsigned mA)
+{
+ union power_supply_propval pval = {0};
+ int ret, psy_type;
+
+ if (mdwc->max_power == mA)
+ return 0;
+
+ psy_type = get_psy_type(mdwc);
+ if (psy_type != POWER_SUPPLY_TYPE_USB &&
+ psy_type != POWER_SUPPLY_TYPE_USB_FLOAT)
return 0;
dev_info(mdwc->dev, "Avail curr from USB = %u\n", mA);
@@ -3684,6 +3786,10 @@ static void dwc3_otg_sm_work(struct work_struct *w)
work = 1;
} else if (test_bit(B_SESS_VLD, &mdwc->inputs)) {
dev_dbg(mdwc->dev, "b_sess_vld\n");
+ if (get_psy_type(mdwc) == POWER_SUPPLY_TYPE_USB_FLOAT)
+ queue_delayed_work(mdwc->dwc3_wq,
+ &mdwc->sdp_check,
+ msecs_to_jiffies(SDP_CONNETION_CHECK_TIME));
/*
* Increment pm usage count upon cable connect. Count
* is decremented in OTG_STATE_B_PERIPHERAL state on
@@ -3707,6 +3813,7 @@ static void dwc3_otg_sm_work(struct work_struct *w)
!test_bit(ID, &mdwc->inputs)) {
dev_dbg(mdwc->dev, "!id || !bsv\n");
mdwc->otg_state = OTG_STATE_B_IDLE;
+ cancel_delayed_work_sync(&mdwc->sdp_check);
dwc3_otg_start_peripheral(mdwc, 0);
/*
* Decrement pm usage count upon cable disconnect
@@ -3739,6 +3846,7 @@ static void dwc3_otg_sm_work(struct work_struct *w)
if (!test_bit(B_SESS_VLD, &mdwc->inputs)) {
dev_dbg(mdwc->dev, "BSUSP: !bsv\n");
mdwc->otg_state = OTG_STATE_B_IDLE;
+ cancel_delayed_work_sync(&mdwc->sdp_check);
dwc3_otg_start_peripheral(mdwc, 0);
} else if (!test_bit(B_SUSPEND, &mdwc->inputs)) {
dev_dbg(mdwc->dev, "BSUSP !susp\n");
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 658fcca485d8..94709587f238 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1340,6 +1340,32 @@ static bool dwc3_gadget_is_suspended(struct dwc3 *dwc)
return false;
}
+static void __dwc3_gadget_ep_zlp_complete(struct usb_ep *ep,
+ struct usb_request *request)
+{
+ dwc3_gadget_ep_free_request(ep, request);
+}
+
+static int __dwc3_gadget_ep_queue_zlp(struct dwc3 *dwc, struct dwc3_ep *dep)
+{
+ struct dwc3_request *req;
+ struct usb_request *request;
+ struct usb_ep *ep = &dep->endpoint;
+
+ dwc3_trace(trace_dwc3_gadget, "queueing ZLP\n");
+ request = dwc3_gadget_ep_alloc_request(ep, GFP_ATOMIC);
+ if (!request)
+ return -ENOMEM;
+
+ request->length = 0;
+ request->buf = dwc->zlp_buf;
+ request->complete = __dwc3_gadget_ep_zlp_complete;
+
+ req = to_dwc3_request(request);
+
+ return __dwc3_gadget_ep_queue(dep, req);
+}
+
static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
gfp_t gfp_flags)
{
@@ -1387,6 +1413,16 @@ static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
ret = __dwc3_gadget_ep_queue(dep, req);
+ /*
+ * Okay, here's the thing, if gadget driver has requested for a ZLP by
+ * setting request->zero, instead of doing magic, we will just queue an
+ * extra usb_request ourselves so that it gets handled the same way as
+ * any other request.
+ */
+ if (ret == 0 && request->zero && request->length &&
+ (request->length % ep->maxpacket == 0))
+ ret = __dwc3_gadget_ep_queue_zlp(dwc, dep);
+
out:
spin_unlock_irqrestore(&dwc->lock, flags);
@@ -3554,6 +3590,12 @@ int dwc3_gadget_init(struct dwc3 *dwc)
goto err3;
}
+ dwc->zlp_buf = kzalloc(DWC3_ZLP_BUF_SIZE, GFP_KERNEL);
+ if (!dwc->zlp_buf) {
+ ret = -ENOMEM;
+ goto err4;
+ }
+
dwc->gadget.ops = &dwc3_gadget_ops;
dwc->gadget.speed = USB_SPEED_UNKNOWN;
dwc->gadget.sg_supported = true;
@@ -3595,12 +3637,12 @@ int dwc3_gadget_init(struct dwc3 *dwc)
ret = dwc3_gadget_init_endpoints(dwc);
if (ret)
- goto err4;
+ goto err5;
ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget);
if (ret) {
dev_err(dwc->dev, "failed to register udc\n");
- goto err4;
+ goto err5;
}
if (!dwc->is_drd) {
@@ -3612,6 +3654,9 @@ int dwc3_gadget_init(struct dwc3 *dwc)
return 0;
+err5:
+ kfree(dwc->zlp_buf);
+
err4:
dwc3_gadget_free_endpoints(dwc);
dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
@@ -3649,6 +3694,7 @@ void dwc3_gadget_exit(struct dwc3 *dwc)
dwc->ep0_bounce, dwc->ep0_bounce_addr);
kfree(dwc->setup_buf);
+ kfree(dwc->zlp_buf);
dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2,
dwc->ep0_trb, dwc->ep0_trb_addr);
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index eeccae8bfc1b..442d44278f33 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -35,7 +35,8 @@
(speed == USB_SPEED_SUPER ?\
SSUSB_GADGET_VBUS_DRAW : CONFIG_USB_GADGET_VBUS_DRAW)
-static bool disable_l1_for_hs;
+/* disable LPM by default */
+static bool disable_l1_for_hs = true;
module_param(disable_l1_for_hs, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(disable_l1_for_hs,
"Disable support for L1 LPM for HS devices");
diff --git a/drivers/usb/gadget/function/f_accessory.c b/drivers/usb/gadget/function/f_accessory.c
index 1ef3442cf618..7950f25136a5 100644
--- a/drivers/usb/gadget/function/f_accessory.c
+++ b/drivers/usb/gadget/function/f_accessory.c
@@ -346,6 +346,7 @@ static void acc_complete_set_string(struct usb_ep *ep, struct usb_request *req)
struct acc_dev *dev = ep->driver_data;
char *string_dest = NULL;
int length = req->actual;
+ unsigned long flags;
if (req->status != 0) {
pr_err("acc_complete_set_string, err %d\n", req->status);
@@ -371,22 +372,26 @@ static void acc_complete_set_string(struct usb_ep *ep, struct usb_request *req)
case ACCESSORY_STRING_SERIAL:
string_dest = dev->serial;
break;
+ default:
+ pr_err("unknown accessory string index %d\n",
+ dev->string_index);
+ return;
}
- if (string_dest) {
- unsigned long flags;
- if (length >= ACC_STRING_SIZE)
- length = ACC_STRING_SIZE - 1;
-
- spin_lock_irqsave(&dev->lock, flags);
- memcpy(string_dest, req->buf, length);
- /* ensure zero termination */
- string_dest[length] = 0;
- spin_unlock_irqrestore(&dev->lock, flags);
- } else {
- pr_err("unknown accessory string index %d\n",
- dev->string_index);
+ if (!length) {
+ pr_debug("zero length for accessory string index %d\n",
+ dev->string_index);
+ return;
}
+
+ if (length >= ACC_STRING_SIZE)
+ length = ACC_STRING_SIZE - 1;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ memcpy(string_dest, req->buf, length);
+ /* ensure zero termination */
+ string_dest[length] = 0;
+ spin_unlock_irqrestore(&dev->lock, flags);
}
static void acc_complete_set_hid_report_desc(struct usb_ep *ep,
diff --git a/drivers/usb/gadget/function/f_qdss.c b/drivers/usb/gadget/function/f_qdss.c
index 777acb489875..8536f10a2e35 100644
--- a/drivers/usb/gadget/function/f_qdss.c
+++ b/drivers/usb/gadget/function/f_qdss.c
@@ -183,15 +183,28 @@ static struct usb_qdss_opts *to_fi_usb_qdss_opts(struct usb_function_instance *f
}
/*----------------------------------------------------------------------*/
-static void qdss_ctrl_write_complete(struct usb_ep *ep,
+static void qdss_write_complete(struct usb_ep *ep,
struct usb_request *req)
{
struct f_qdss *qdss = ep->driver_data;
struct qdss_request *d_req = req->context;
+ struct usb_ep *in;
+ struct list_head *list_pool;
+ enum qdss_state state;
unsigned long flags;
pr_debug("qdss_ctrl_write_complete\n");
+ if (qdss->debug_inface_enabled) {
+ in = qdss->port.ctrl_in;
+ list_pool = &qdss->ctrl_write_pool;
+ state = USB_QDSS_CTRL_WRITE_DONE;
+ } else {
+ in = qdss->port.data;
+ list_pool = &qdss->data_write_pool;
+ state = USB_QDSS_DATA_WRITE_DONE;
+ }
+
if (!req->status) {
/* send zlp */
if ((req->length >= ep->maxpacket) &&
@@ -199,13 +212,13 @@ static void qdss_ctrl_write_complete(struct usb_ep *ep,
req->length = 0;
d_req->actual = req->actual;
d_req->status = req->status;
- if (!usb_ep_queue(qdss->port.ctrl_in, req, GFP_ATOMIC))
+ if (!usb_ep_queue(in, req, GFP_ATOMIC))
return;
}
}
spin_lock_irqsave(&qdss->lock, flags);
- list_add_tail(&req->list, &qdss->ctrl_write_pool);
+ list_add_tail(&req->list, list_pool);
if (req->length != 0) {
d_req->actual = req->actual;
d_req->status = req->status;
@@ -213,8 +226,7 @@ static void qdss_ctrl_write_complete(struct usb_ep *ep,
spin_unlock_irqrestore(&qdss->lock, flags);
if (qdss->ch.notify)
- qdss->ch.notify(qdss->ch.priv, USB_QDSS_CTRL_WRITE_DONE, d_req,
- NULL);
+ qdss->ch.notify(qdss->ch.priv, state, d_req, NULL);
}
static void qdss_ctrl_read_complete(struct usb_ep *ep,
@@ -252,6 +264,12 @@ void usb_qdss_free_req(struct usb_qdss_ch *ch)
return;
}
+ list_for_each_safe(act, tmp, &qdss->data_write_pool) {
+ req = list_entry(act, struct usb_request, list);
+ list_del(&req->list);
+ usb_ep_free_request(qdss->port.data, req);
+ }
+
list_for_each_safe(act, tmp, &qdss->ctrl_write_pool) {
req = list_entry(act, struct usb_request, list);
list_del(&req->list);
@@ -271,23 +289,41 @@ int usb_qdss_alloc_req(struct usb_qdss_ch *ch, int no_write_buf,
{
struct f_qdss *qdss = ch->priv_usb;
struct usb_request *req;
+ struct usb_ep *in;
+ struct list_head *list_pool;
int i;
pr_debug("usb_qdss_alloc_req\n");
- if (no_write_buf <= 0 || no_read_buf <= 0 || !qdss) {
+ if (!qdss) {
+ pr_err("usb_qdss_alloc_req: channel %s closed\n", ch->name);
+ return -ENODEV;
+ }
+
+ if ((qdss->debug_inface_enabled &&
+ (no_write_buf <= 0 || no_read_buf <= 0)) ||
+ (!qdss->debug_inface_enabled &&
+ (no_write_buf <= 0 || no_read_buf))) {
pr_err("usb_qdss_alloc_req: missing params\n");
return -ENODEV;
}
+ if (qdss->debug_inface_enabled) {
+ in = qdss->port.ctrl_in;
+ list_pool = &qdss->ctrl_write_pool;
+ } else {
+ in = qdss->port.data;
+ list_pool = &qdss->data_write_pool;
+ }
+
for (i = 0; i < no_write_buf; i++) {
- req = usb_ep_alloc_request(qdss->port.ctrl_in, GFP_ATOMIC);
+ req = usb_ep_alloc_request(in, GFP_ATOMIC);
if (!req) {
pr_err("usb_qdss_alloc_req: ctrl_in allocation err\n");
goto fail;
}
- req->complete = qdss_ctrl_write_complete;
- list_add_tail(&req->list, &qdss->ctrl_write_pool);
+ req->complete = qdss_write_complete;
+ list_add_tail(&req->list, list_pool);
}
for (i = 0; i < no_read_buf; i++) {
@@ -479,21 +515,20 @@ static void usb_qdss_disconnect_work(struct work_struct *work)
qdss = container_of(work, struct f_qdss, disconnect_w);
pr_debug("usb_qdss_disconnect_work\n");
- /*
- * Uninitialized init data i.e. ep specific operation.
- * Notify qdss to cancel all active transfers.
- */
- if (qdss->ch.app_conn) {
+
+ /* Notify qdss to cancel all active transfers */
+ if (qdss->ch.notify)
+ qdss->ch.notify(qdss->ch.priv,
+ USB_QDSS_DISCONNECT,
+ NULL,
+ NULL);
+
+ /* Uninitialized init data i.e. ep specific operation */
+ if (qdss->ch.app_conn && !strcmp(qdss->ch.name, USB_QDSS_CH_MSM)) {
status = uninit_data(qdss->port.data);
if (status)
pr_err("%s: uninit_data error\n", __func__);
- if (qdss->ch.notify)
- qdss->ch.notify(qdss->ch.priv,
- USB_QDSS_DISCONNECT,
- NULL,
- NULL);
-
status = set_qdss_data_connection(qdss, 0);
if (status)
pr_err("qdss_disconnect error");
@@ -550,15 +585,16 @@ static void usb_qdss_connect_work(struct work_struct *work)
}
pr_debug("usb_qdss_connect_work\n");
+
+ if (!strcmp(qdss->ch.name, USB_QDSS_CH_MDM))
+ goto notify;
+
status = set_qdss_data_connection(qdss, 1);
if (status) {
pr_err("set_qdss_data_connection error(%d)", status);
return;
}
- if (qdss->ch.notify)
- qdss->ch.notify(qdss->ch.priv, USB_QDSS_CONNECT,
- NULL, &qdss->ch);
spin_lock_irqsave(&qdss->lock, flags);
req = qdss->endless_req;
spin_unlock_irqrestore(&qdss->lock, flags);
@@ -566,8 +602,15 @@ static void usb_qdss_connect_work(struct work_struct *work)
return;
status = usb_ep_queue(qdss->port.data, req, GFP_ATOMIC);
- if (status)
+ if (status) {
pr_err("%s: usb_ep_queue error (%d)\n", __func__, status);
+ return;
+ }
+
+notify:
+ if (qdss->ch.notify)
+ qdss->ch.notify(qdss->ch.priv, USB_QDSS_CONNECT,
+ NULL, &qdss->ch);
}
static int qdss_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
@@ -706,6 +749,7 @@ static struct f_qdss *alloc_usb_qdss(char *channel_name)
spin_lock_init(&qdss->lock);
INIT_LIST_HEAD(&qdss->ctrl_read_pool);
INIT_LIST_HEAD(&qdss->ctrl_write_pool);
+ INIT_LIST_HEAD(&qdss->data_write_pool);
INIT_WORK(&qdss->connect_w, usb_qdss_connect_work);
INIT_WORK(&qdss->disconnect_w, usb_qdss_disconnect_work);
@@ -801,6 +845,50 @@ int usb_qdss_ctrl_write(struct usb_qdss_ch *ch, struct qdss_request *d_req)
}
EXPORT_SYMBOL(usb_qdss_ctrl_write);
+int usb_qdss_write(struct usb_qdss_ch *ch, struct qdss_request *d_req)
+{
+ struct f_qdss *qdss = ch->priv_usb;
+ unsigned long flags;
+ struct usb_request *req = NULL;
+
+ pr_debug("usb_qdss_ctrl_write\n");
+
+ if (!qdss)
+ return -ENODEV;
+
+ spin_lock_irqsave(&qdss->lock, flags);
+
+ if (qdss->usb_connected == 0) {
+ spin_unlock_irqrestore(&qdss->lock, flags);
+ return -EIO;
+ }
+
+ if (list_empty(&qdss->data_write_pool)) {
+ pr_err("error: usb_qdss_data_write list is empty\n");
+ spin_unlock_irqrestore(&qdss->lock, flags);
+ return -EAGAIN;
+ }
+
+ req = list_first_entry(&qdss->data_write_pool, struct usb_request,
+ list);
+ list_del(&req->list);
+ spin_unlock_irqrestore(&qdss->lock, flags);
+
+ req->buf = d_req->buf;
+ req->length = d_req->length;
+ req->context = d_req;
+ if (usb_ep_queue(qdss->port.data, req, GFP_ATOMIC)) {
+ spin_lock_irqsave(&qdss->lock, flags);
+ list_add_tail(&req->list, &qdss->data_write_pool);
+ spin_unlock_irqrestore(&qdss->lock, flags);
+ pr_err("qdss usb_ep_queue failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(usb_qdss_write);
+
struct usb_qdss_ch *usb_qdss_open(const char *name, void *priv,
void (*notify)(void *, unsigned, struct qdss_request *,
struct usb_qdss_ch *))
@@ -859,7 +947,9 @@ void usb_qdss_close(struct usb_qdss_ch *ch)
pr_debug("usb_qdss_close\n");
spin_lock_irqsave(&qdss_lock, flags);
- if (!qdss || !qdss->usb_connected) {
+ ch->priv_usb = NULL;
+ if (!qdss || !qdss->usb_connected ||
+ !strcmp(qdss->ch.name, USB_QDSS_CH_MDM)) {
ch->app_conn = 0;
spin_unlock_irqrestore(&qdss_lock, flags);
return;
diff --git a/drivers/usb/gadget/function/f_qdss.h b/drivers/usb/gadget/function/f_qdss.h
index cad0f4cc06f9..fb7c01c0f939 100644
--- a/drivers/usb/gadget/function/f_qdss.h
+++ b/drivers/usb/gadget/function/f_qdss.h
@@ -53,6 +53,10 @@ struct f_qdss {
struct usb_qdss_ch ch;
struct list_head ctrl_read_pool;
struct list_head ctrl_write_pool;
+
+ /* for mdm channel SW path */
+ struct list_head data_write_pool;
+
struct work_struct connect_w;
struct work_struct disconnect_w;
spinlock_t lock;
diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c
index 03aeec2e878c..0cbe95304417 100644
--- a/drivers/usb/pd/policy_engine.c
+++ b/drivers/usb/pd/policy_engine.c
@@ -182,7 +182,7 @@ static void *usbpd_ipc_log;
#define PS_HARD_RESET_TIME 25
#define PS_SOURCE_ON 400
#define PS_SOURCE_OFF 750
-#define SWAP_SOURCE_START_TIME 20
+#define FIRST_SOURCE_CAP_TIME 200
#define VDM_BUSY_TIME 50
#define VCONN_ON_TIME 100
@@ -790,17 +790,27 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state)
pd->pd_phy_opened = true;
}
- pd->current_state = PE_SRC_SEND_CAPABILITIES;
if (pd->in_pr_swap) {
- kick_sm(pd, SWAP_SOURCE_START_TIME);
pd->in_pr_swap = false;
val.intval = 0;
power_supply_set_property(pd->usb_psy,
POWER_SUPPLY_PROP_PR_SWAP, &val);
- break;
}
- /* fall-through */
+ /*
+ * A sink might remove its terminations (during some Type-C
+ * compliance tests or a sink attempting to do Try.SRC)
+ * at this point just after we enabled VBUS. Sending PD
+ * messages now would delay detecting the detach beyond the
+ * required timing. Instead, delay sending out the first
+ * source capabilities to allow for the other side to
+ * completely settle CC debounce and allow HW to detect detach
+ * sooner in the meantime. PD spec allows up to
+ * tFirstSourceCap (250ms).
+ */
+ pd->current_state = PE_SRC_SEND_CAPABILITIES;
+ kick_sm(pd, FIRST_SOURCE_CAP_TIME);
+ break;
case PE_SRC_SEND_CAPABILITIES:
kick_sm(pd, 0);
@@ -910,6 +920,7 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state)
if (pd->psy_type == POWER_SUPPLY_TYPE_USB ||
pd->psy_type == POWER_SUPPLY_TYPE_USB_CDP ||
+ pd->psy_type == POWER_SUPPLY_TYPE_USB_FLOAT ||
usb_compliance_mode)
start_usb_peripheral(pd);
}
@@ -1691,7 +1702,6 @@ static void usbpd_sm(struct work_struct *w)
if (pd->current_pr == PR_SINK) {
usbpd_set_state(pd, PE_SNK_STARTUP);
} else if (pd->current_pr == PR_SRC) {
- enable_vbus(pd);
if (!pd->vconn_enabled &&
pd->typec_mode ==
POWER_SUPPLY_TYPEC_SINK_POWERED_CABLE) {
@@ -1701,6 +1711,7 @@ static void usbpd_sm(struct work_struct *w)
else
pd->vconn_enabled = true;
}
+ enable_vbus(pd);
usbpd_set_state(pd, PE_SRC_STARTUP);
}
diff --git a/drivers/usb/phy/phy-msm-qusb-v2.c b/drivers/usb/phy/phy-msm-qusb-v2.c
index de4f93afdc97..e5f38e42e165 100644
--- a/drivers/usb/phy/phy-msm-qusb-v2.c
+++ b/drivers/usb/phy/phy-msm-qusb-v2.c
@@ -652,13 +652,16 @@ static int qusb_phy_set_suspend(struct usb_phy *phy, int suspend)
writel_relaxed(intr_mask,
qphy->base + QUSB2PHY_INTR_CTRL);
- /* enable phy auto-resume */
- writel_relaxed(0x91,
+ if (linestate & (LINESTATE_DP | LINESTATE_DM)) {
+
+ /* enable phy auto-resume */
+ writel_relaxed(0x91,
qphy->base + QUSB2PHY_TEST1);
- /* flush the previous write before next write */
- wmb();
- writel_relaxed(0x90,
- qphy->base + QUSB2PHY_TEST1);
+ /* flush the previous write before next write */
+ wmb();
+ writel_relaxed(0x90,
+ qphy->base + QUSB2PHY_TEST1);
+ }
dev_dbg(phy->dev, "%s: intr_mask = %x\n",
__func__, intr_mask);
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
index 7fbe19d5279e..81b2b9f808b5 100644
--- a/drivers/usb/usbip/vhci_hcd.c
+++ b/drivers/usb/usbip/vhci_hcd.c
@@ -215,14 +215,19 @@ done:
static inline void hub_descriptor(struct usb_hub_descriptor *desc)
{
+ int width;
+
memset(desc, 0, sizeof(*desc));
desc->bDescriptorType = USB_DT_HUB;
- desc->bDescLength = 9;
desc->wHubCharacteristics = cpu_to_le16(
HUB_CHAR_INDV_PORT_LPSM | HUB_CHAR_COMMON_OCPM);
+
desc->bNbrPorts = VHCI_NPORTS;
- desc->u.hs.DeviceRemovable[0] = 0xff;
- desc->u.hs.DeviceRemovable[1] = 0xff;
+ BUILD_BUG_ON(VHCI_NPORTS > USB_MAXCHILDREN);
+ width = desc->bNbrPorts / 8 + 1;
+ desc->bDescLength = USB_DT_HUB_NONVAR_SIZE + 2 * width;
+ memset(&desc->u.hs.DeviceRemovable[0], 0, width);
+ memset(&desc->u.hs.DeviceRemovable[width], 0xff, width);
}
static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index 1a9f18b40be6..34e4b3ad8b92 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -1163,6 +1163,10 @@ static int tce_iommu_attach_group(void *iommu_data,
/* pr_debug("tce_vfio: Attaching group #%u to iommu %p\n",
iommu_group_id(iommu_group), iommu_group); */
table_group = iommu_group_get_iommudata(iommu_group);
+ if (!table_group) {
+ ret = -ENODEV;
+ goto unlock_exit;
+ }
if (tce_groups_attached(container) && (!table_group->ops ||
!table_group->ops->take_ownership ||
diff --git a/drivers/video/fbdev/msm/mdp3_ppp.c b/drivers/video/fbdev/msm/mdp3_ppp.c
index eaacdd875747..4f7a4a1189f4 100644
--- a/drivers/video/fbdev/msm/mdp3_ppp.c
+++ b/drivers/video/fbdev/msm/mdp3_ppp.c
@@ -1,4 +1,5 @@
/* Copyright (c) 2007, 2013-2014, 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
* Copyright (C) 2007 Google Incorporated
*
* This software is licensed under the terms of the GNU General Public
@@ -39,6 +40,7 @@
#define MDP_PPP_MAX_BPP 4
#define MDP_PPP_DYNAMIC_FACTOR 3
#define MDP_PPP_MAX_READ_WRITE 3
+#define MDP_PPP_MAX_WIDTH 0xFFF
#define ENABLE_SOLID_FILL 0x2
#define DISABLE_SOLID_FILL 0x0
#define BLEND_LATENCY 3
@@ -152,6 +154,11 @@ int mdp3_ppp_get_img(struct mdp_img *img, struct mdp_blit_req *req,
return -EINVAL;
}
+ if (img->width > MDP_PPP_MAX_WIDTH) {
+ pr_err("%s incorrect width %d\n", __func__, img->width);
+ return -EINVAL;
+ }
+
fb_data.flags = img->priv;
fb_data.memory_id = img->memory_id;
fb_data.offset = 0;
diff --git a/drivers/video/fbdev/msm/mdss.h b/drivers/video/fbdev/msm/mdss.h
index d6e8a213c215..5548f0f09f8a 100644
--- a/drivers/video/fbdev/msm/mdss.h
+++ b/drivers/video/fbdev/msm/mdss.h
@@ -547,6 +547,7 @@ struct mdss_data_type {
u32 sec_session_cnt;
wait_queue_head_t secure_waitq;
struct cx_ipeak_client *mdss_cx_ipeak;
+ struct mult_factor bus_throughput_factor;
};
extern struct mdss_data_type *mdss_res;
diff --git a/drivers/video/fbdev/msm/mdss_dba_utils.c b/drivers/video/fbdev/msm/mdss_dba_utils.c
index c6ff92ed1686..3330f8f62b78 100644
--- a/drivers/video/fbdev/msm/mdss_dba_utils.c
+++ b/drivers/video/fbdev/msm/mdss_dba_utils.c
@@ -576,6 +576,7 @@ int mdss_dba_utils_video_on(void *data, struct mdss_panel_info *pinfo)
video_cfg.h_pulse_width = pinfo->lcdc.h_pulse_width;
video_cfg.v_pulse_width = pinfo->lcdc.v_pulse_width;
video_cfg.pclk_khz = (unsigned long)pinfo->clk_rate / 1000;
+ video_cfg.hdmi_mode = !hdmi_edid_is_dvi_mode(ud->edid_data);
/* Calculate number of DSI lanes configured */
video_cfg.num_of_input_lanes = 0;
@@ -591,8 +592,6 @@ int mdss_dba_utils_video_on(void *data, struct mdss_panel_info *pinfo)
/* Get scan information from EDID */
video_cfg.vic = mdss_dba_get_vic_panel_info(ud, pinfo);
ud->current_vic = video_cfg.vic;
- video_cfg.hdmi_mode = hdmi_edid_get_sink_mode(ud->edid_data,
- video_cfg.vic);
video_cfg.scaninfo = hdmi_edid_get_sink_scaninfo(ud->edid_data,
video_cfg.vic);
if (ud->ops.video_on)
diff --git a/drivers/video/fbdev/msm/mdss_dp.c b/drivers/video/fbdev/msm/mdss_dp.c
index c8afb3a244c3..a98e5a9007bd 100644
--- a/drivers/video/fbdev/msm/mdss_dp.c
+++ b/drivers/video/fbdev/msm/mdss_dp.c
@@ -1094,8 +1094,14 @@ static int dp_audio_info_setup(struct platform_device *pdev,
struct mdss_dp_drv_pdata *dp_ctrl = platform_get_drvdata(pdev);
if (!dp_ctrl || !params) {
- DEV_ERR("%s: invalid input\n", __func__);
- return -ENODEV;
+ pr_err("invalid input\n");
+ rc = -ENODEV;
+ goto end;
+ }
+
+ if (!dp_ctrl->power_on) {
+ pr_debug("DP is already power off\n");
+ goto end;
}
mdss_dp_audio_setup_sdps(&dp_ctrl->ctrl_io, params->num_of_channels);
@@ -1103,6 +1109,7 @@ static int dp_audio_info_setup(struct platform_device *pdev,
mdss_dp_set_safe_to_exit_level(&dp_ctrl->ctrl_io, dp_ctrl->lane_cnt);
mdss_dp_audio_enable(&dp_ctrl->ctrl_io, true);
+end:
return rc;
} /* dp_audio_info_setup */
@@ -1134,6 +1141,11 @@ static void dp_audio_teardown_done(struct platform_device *pdev)
return;
}
+ if (!dp->power_on) {
+ pr_err("DP is already power off\n");
+ return;
+ }
+
mdss_dp_audio_enable(&dp->ctrl_io, false);
/* Make sure the DP audio engine is disabled */
wmb();
@@ -2176,6 +2188,10 @@ static int mdss_dp_process_hpd_high(struct mdss_dp_drv_pdata *dp)
ret = mdss_dp_dpcd_cap_read(dp);
if (ret || !mdss_dp_aux_is_link_rate_valid(dp->dpcd.max_link_rate) ||
!mdss_dp_aux_is_lane_count_valid(dp->dpcd.max_lane_count)) {
+ if (ret == EDP_AUX_ERR_TOUT) {
+ pr_err("DPCD read timedout, skip connect notification\n");
+ goto end;
+ }
/*
* If there is an error in parsing DPCD or if DPCD reports
* unsupported link parameters then set the default link
@@ -3978,12 +3994,6 @@ static int mdss_dp_process_hpd_irq_high(struct mdss_dp_drv_pdata *dp)
{
int ret = 0;
- /* In case of HPD_IRQ events without DP link being turned on such as
- * adb shell stop, skip handling hpd_irq event.
- */
- if (!dp->dp_initialized)
- goto exit;
-
pr_debug("start\n");
dp->hpd_irq_on = true;
@@ -4099,6 +4109,15 @@ static void mdss_dp_process_attention(struct mdss_dp_drv_pdata *dp_drv)
if (dp_drv->alt_mode.dp_status.hpd_irq) {
pr_debug("Attention: hpd_irq high\n");
+ /* In case of HPD_IRQ events without DP link being
+ * turned on such as adb shell stop, skip handling
+ * hpd_irq event.
+ */
+ if (!dp_drv->dp_initialized) {
+ pr_err("DP not initialized yet\n");
+ return;
+ }
+
if (dp_is_hdcp_enabled(dp_drv) && dp_drv->hdcp.ops->cp_irq) {
if (!dp_drv->hdcp.ops->cp_irq(dp_drv->hdcp.data))
return;
diff --git a/drivers/video/fbdev/msm/mdss_dp_aux.c b/drivers/video/fbdev/msm/mdss_dp_aux.c
index 23a63121b78c..786fe10055da 100644
--- a/drivers/video/fbdev/msm/mdss_dp_aux.c
+++ b/drivers/video/fbdev/msm/mdss_dp_aux.c
@@ -395,6 +395,7 @@ static int dp_aux_rw_cmds_retry(struct mdss_dp_drv_pdata *dp,
int i;
u32 aux_cfg1_config_count;
int ret;
+ bool connected = false;
aux_cfg1_config_count = mdss_dp_phy_aux_get_config_cnt(dp,
PHY_AUX_CFG1);
@@ -404,6 +405,15 @@ retry:
do {
struct edp_cmd cmd1 = *cmd;
+ mutex_lock(&dp->attention_lock);
+ connected = dp->cable_connected;
+ mutex_unlock(&dp->attention_lock);
+
+ if (!connected) {
+ pr_err("dp cable disconnected\n");
+ break;
+ }
+
dp->aux_error_num = EDP_AUX_ERR_NONE;
pr_debug("Trying %s, iteration count: %d\n",
mdss_dp_aux_transaction_to_string(transaction),
diff --git a/drivers/video/fbdev/msm/mdss_dp_hdcp2p2.c b/drivers/video/fbdev/msm/mdss_dp_hdcp2p2.c
index 5a677dfe7484..36a062dc5207 100644
--- a/drivers/video/fbdev/msm/mdss_dp_hdcp2p2.c
+++ b/drivers/video/fbdev/msm/mdss_dp_hdcp2p2.c
@@ -894,22 +894,24 @@ static bool dp_hdcp2p2_supported(struct dp_hdcp2p2_ctrl *ctrl)
{
struct edp_cmd cmd = {0};
const u32 offset = 0x6921d;
- u8 buf;
+ u8 buf[3];
cmd.read = 1;
cmd.addr = offset;
- cmd.len = sizeof(buf);
- cmd.out_buf = &buf;
+ cmd.len = ARRAY_SIZE(buf);
+ cmd.out_buf = buf;
if (dp_aux_read(ctrl->init_data.cb_data, &cmd)) {
pr_err("RxCaps read failed\n");
goto error;
}
- pr_debug("rxcaps 0x%x\n", buf);
+ pr_debug("HDCP_CAPABLE=%lu\n", (buf[2] & BIT(1)) >> 1);
+ pr_debug("VERSION=%d\n", buf[0]);
- if (buf & BIT(1))
+ if ((buf[2] & BIT(1)) && (buf[0] == 0x2))
return true;
+
error:
return false;
}
diff --git a/drivers/video/fbdev/msm/mdss_dsi.c b/drivers/video/fbdev/msm/mdss_dsi.c
index 82f6d4a123b5..4ac10ab494e5 100644
--- a/drivers/video/fbdev/msm/mdss_dsi.c
+++ b/drivers/video/fbdev/msm/mdss_dsi.c
@@ -909,10 +909,15 @@ static ssize_t mdss_dsi_cmd_write(struct file *file, const char __user *p,
/* Writing in batches is possible */
ret = simple_write_to_buffer(string_buf, blen, ppos, p, count);
+ if (ret < 0) {
+ pr_err("%s: Failed to copy data\n", __func__);
+ mutex_unlock(&pcmds->dbg_mutex);
+ return -EINVAL;
+ }
- string_buf[blen] = '\0';
+ string_buf[ret] = '\0';
pcmds->string_buf = string_buf;
- pcmds->sblen = blen;
+ pcmds->sblen = count;
mutex_unlock(&pcmds->dbg_mutex);
return ret;
}
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_edid.c b/drivers/video/fbdev/msm/mdss_hdmi_edid.c
index a953dd1a2ac2..abd2192997e5 100644
--- a/drivers/video/fbdev/msm/mdss_hdmi_edid.c
+++ b/drivers/video/fbdev/msm/mdss_hdmi_edid.c
@@ -62,6 +62,11 @@
#define EDID_VENDOR_ID_SIZE 4
#define EDID_IEEE_REG_ID 0x0c03
+enum edid_sink_mode {
+ SINK_MODE_DVI,
+ SINK_MODE_HDMI
+};
+
enum luminance_value {
NO_LUMINANCE_DATA = 3,
MAXIMUM_LUMINANCE = 4,
@@ -2418,7 +2423,7 @@ end:
return scaninfo;
} /* hdmi_edid_get_sink_scaninfo */
-u32 hdmi_edid_get_sink_mode(void *input, u32 mode)
+static u32 hdmi_edid_get_sink_mode(void *input)
{
struct hdmi_edid_ctrl *edid_ctrl = (struct hdmi_edid_ctrl *)input;
bool sink_mode;
@@ -2431,13 +2436,8 @@ u32 hdmi_edid_get_sink_mode(void *input, u32 mode)
if (edid_ctrl->edid_override &&
(edid_ctrl->override_data.sink_mode != -1))
sink_mode = edid_ctrl->override_data.sink_mode;
- else {
- if (edid_ctrl->sink_mode &&
- (mode > 0 && mode <= HDMI_EVFRMT_END))
- sink_mode = SINK_MODE_HDMI;
- else
- sink_mode = SINK_MODE_DVI;
- }
+ else
+ sink_mode = edid_ctrl->sink_mode;
return sink_mode;
} /* hdmi_edid_get_sink_mode */
@@ -2452,21 +2452,10 @@ u32 hdmi_edid_get_sink_mode(void *input, u32 mode)
*/
bool hdmi_edid_is_dvi_mode(void *input)
{
- struct hdmi_edid_ctrl *edid_ctrl = (struct hdmi_edid_ctrl *)input;
- int sink_mode;
-
- if (!edid_ctrl) {
- DEV_ERR("%s: invalid input\n", __func__);
- return true;
- }
-
- if (edid_ctrl->edid_override &&
- (edid_ctrl->override_data.sink_mode != -1))
- sink_mode = edid_ctrl->override_data.sink_mode;
+ if (hdmi_edid_get_sink_mode(input))
+ return false;
else
- sink_mode = edid_ctrl->sink_mode;
-
- return (sink_mode == SINK_MODE_DVI);
+ return true;
}
/**
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_edid.h b/drivers/video/fbdev/msm/mdss_hdmi_edid.h
index c604d0fbf7b2..af802bb45f89 100644
--- a/drivers/video/fbdev/msm/mdss_hdmi_edid.h
+++ b/drivers/video/fbdev/msm/mdss_hdmi_edid.h
@@ -58,16 +58,10 @@ struct hdmi_edid_override_data {
int vic;
};
-enum edid_sink_mode {
- SINK_MODE_DVI,
- SINK_MODE_HDMI
-};
-
int hdmi_edid_parser(void *edid_ctrl);
u32 hdmi_edid_get_raw_data(void *edid_ctrl, u8 *buf, u32 size);
u8 hdmi_edid_get_sink_scaninfo(void *edid_ctrl, u32 resolution);
bool hdmi_edid_is_dvi_mode(void *input);
-u32 hdmi_edid_get_sink_mode(void *edid_ctrl, u32 mode);
bool hdmi_edid_sink_scramble_override(void *input);
bool hdmi_edid_get_sink_scrambler_support(void *input);
bool hdmi_edid_get_scdc_support(void *input);
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_tx.c b/drivers/video/fbdev/msm/mdss_hdmi_tx.c
index 2e267f2695d7..9ee0c27b225e 100644
--- a/drivers/video/fbdev/msm/mdss_hdmi_tx.c
+++ b/drivers/video/fbdev/msm/mdss_hdmi_tx.c
@@ -375,12 +375,11 @@ static void hdmi_tx_audio_setup(struct hdmi_tx_ctrl *hdmi_ctrl)
}
}
-static inline bool hdmi_tx_is_dvi_mode(struct hdmi_tx_ctrl *hdmi_ctrl)
+static inline u32 hdmi_tx_is_dvi_mode(struct hdmi_tx_ctrl *hdmi_ctrl)
{
void *data = hdmi_tx_get_fd(HDMI_TX_FEAT_EDID);
- return (hdmi_edid_get_sink_mode(data,
- hdmi_ctrl->vic) == SINK_MODE_DVI);
+ return hdmi_edid_is_dvi_mode(data);
} /* hdmi_tx_is_dvi_mode */
static inline u32 hdmi_tx_is_in_splash(struct hdmi_tx_ctrl *hdmi_ctrl)
@@ -2482,8 +2481,7 @@ static void hdmi_tx_set_mode(struct hdmi_tx_ctrl *hdmi_ctrl, u32 power_on)
hdmi_ctrl_reg |= BIT(2);
/* Set transmission mode to DVI based in EDID info */
- if (hdmi_edid_get_sink_mode(data,
- hdmi_ctrl->vic) == SINK_MODE_DVI)
+ if (hdmi_edid_is_dvi_mode(data))
hdmi_ctrl_reg &= ~BIT(1); /* DVI mode */
/*
@@ -2942,6 +2940,7 @@ static int hdmi_tx_audio_info_setup(struct platform_device *pdev,
{
int rc = 0;
struct hdmi_tx_ctrl *hdmi_ctrl = platform_get_drvdata(pdev);
+ u32 is_mode_dvi;
if (!hdmi_ctrl || !params) {
DEV_ERR("%s: invalid input\n", __func__);
@@ -2950,8 +2949,9 @@ static int hdmi_tx_audio_info_setup(struct platform_device *pdev,
mutex_lock(&hdmi_ctrl->tx_lock);
- if (!hdmi_tx_is_dvi_mode(hdmi_ctrl) &&
- hdmi_tx_is_panel_on(hdmi_ctrl)) {
+ is_mode_dvi = hdmi_tx_is_dvi_mode(hdmi_ctrl);
+
+ if (!is_mode_dvi && hdmi_tx_is_panel_on(hdmi_ctrl)) {
memcpy(&hdmi_ctrl->audio_params, params,
sizeof(struct msm_ext_disp_audio_setup_params));
diff --git a/drivers/video/fbdev/msm/mdss_mdp.c b/drivers/video/fbdev/msm/mdss_mdp.c
index 6936c4c1f3cc..6fb32761a767 100644
--- a/drivers/video/fbdev/msm/mdss_mdp.c
+++ b/drivers/video/fbdev/msm/mdss_mdp.c
@@ -4527,6 +4527,15 @@ static int mdss_mdp_parse_dt_misc(struct platform_device *pdev)
mdss_mdp_parse_dt_fudge_factors(pdev, "qcom,mdss-clk-factor",
&mdata->clk_factor);
+ /*
+ * Bus throughput factor will be used during high downscale cases.
+ * The recommended default factor is 1.1.
+ */
+ mdata->bus_throughput_factor.numer = 11;
+ mdata->bus_throughput_factor.denom = 10;
+ mdss_mdp_parse_dt_fudge_factors(pdev, "qcom,mdss-bus-througput-factor",
+ &mdata->bus_throughput_factor);
+
rc = of_property_read_u32(pdev->dev.of_node,
"qcom,max-bandwidth-low-kbps", &mdata->max_bw_low);
if (rc)
diff --git a/drivers/video/fbdev/msm/mdss_mdp_ctl.c b/drivers/video/fbdev/msm/mdss_mdp_ctl.c
index efd681a5d954..54fb21a5f35d 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_ctl.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_ctl.c
@@ -31,6 +31,7 @@
#define MDSS_MDP_QSEED3_VER_DOWNSCALE_LIM 2
#define NUM_MIXERCFG_REGS 3
#define MDSS_MDP_WB_OUTPUT_BPP 3
+#define MIN_BUS_THROUGHPUT_SCALE_FACTOR 35
struct mdss_mdp_mixer_cfg {
u32 config_masks[NUM_MIXERCFG_REGS];
bool border_enabled;
@@ -622,11 +623,21 @@ static u32 __calc_qseed3_mdp_clk_rate(struct mdss_mdp_pipe *pipe,
}
static inline bool __is_vert_downscaling(u32 src_h,
- struct mdss_rect dst){
-
+ struct mdss_rect dst)
+{
return (src_h > dst.h);
}
+static inline bool __is_bus_throughput_factor_required(u32 src_h,
+ struct mdss_rect dst)
+{
+ u32 scale_factor = src_h * 10;
+
+ do_div(scale_factor, dst.h);
+ return (__is_vert_downscaling(src_h, dst) &&
+ (scale_factor >= MIN_BUS_THROUGHPUT_SCALE_FACTOR));
+}
+
static u32 get_pipe_mdp_clk_rate(struct mdss_mdp_pipe *pipe,
struct mdss_rect src, struct mdss_rect dst,
u32 fps, u32 v_total, u32 flags)
@@ -673,10 +684,18 @@ static u32 get_pipe_mdp_clk_rate(struct mdss_mdp_pipe *pipe,
}
}
+ /*
+ * If the downscale factor is >= 3.5 for a 32 BPP surface,
+ * it is recommended to add a 10% bus throughput factor to
+ * the clock rate.
+ */
+ if ((pipe->src_fmt->bpp == 4) &&
+ __is_bus_throughput_factor_required(src_h, dst))
+ rate = apply_fudge_factor(rate, &mdata->bus_throughput_factor);
+
if (flags & PERF_CALC_PIPE_APPLY_CLK_FUDGE)
rate = mdss_mdp_clk_fudge_factor(mixer, rate);
- rate = min(mdata->max_mdp_clk_rate, rate);
return rate;
}
diff --git a/drivers/video/fbdev/msm/mdss_mdp_debug.c b/drivers/video/fbdev/msm/mdss_mdp_debug.c
index 1035d23fe9ce..09d1dab0d180 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_debug.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_debug.c
@@ -1863,12 +1863,14 @@ static void __print_buf(struct seq_file *s, struct mdss_mdp_data *buf,
seq_puts(s, "\n");
}
-static void __dump_pipe(struct seq_file *s, struct mdss_mdp_pipe *pipe)
+static void __dump_pipe(struct seq_file *s, struct mdss_mdp_pipe *pipe,
+ struct msm_fb_data_type *mfd)
{
struct mdss_mdp_data *buf;
int format;
int smps[4];
int i;
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
seq_printf(s, "\nSSPP #%d type=%s ndx=%x flags=0x%16llx play_cnt=%u xin_id=%d\n",
pipe->num, mdss_mdp_pipetype2str(pipe->type),
@@ -1923,11 +1925,14 @@ static void __dump_pipe(struct seq_file *s, struct mdss_mdp_pipe *pipe)
seq_puts(s, "Data:\n");
+ mutex_lock(&mdp5_data->list_lock);
list_for_each_entry(buf, &pipe->buf_queue, pipe_list)
__print_buf(s, buf, false);
+ mutex_unlock(&mdp5_data->list_lock);
}
-static void __dump_mixer(struct seq_file *s, struct mdss_mdp_mixer *mixer)
+static void __dump_mixer(struct seq_file *s, struct mdss_mdp_mixer *mixer,
+ struct msm_fb_data_type *mfd)
{
struct mdss_mdp_pipe *pipe;
int i, cnt = 0;
@@ -1944,7 +1949,7 @@ static void __dump_mixer(struct seq_file *s, struct mdss_mdp_mixer *mixer)
for (i = 0; i < ARRAY_SIZE(mixer->stage_pipe); i++) {
pipe = mixer->stage_pipe[i];
if (pipe) {
- __dump_pipe(s, pipe);
+ __dump_pipe(s, pipe, mfd);
cnt++;
}
}
@@ -2019,8 +2024,8 @@ static void __dump_ctl(struct seq_file *s, struct mdss_mdp_ctl *ctl)
seq_printf(s, "Play Count=%u Underrun Count=%u\n",
ctl->play_cnt, ctl->underrun_cnt);
- __dump_mixer(s, ctl->mixer_left);
- __dump_mixer(s, ctl->mixer_right);
+ __dump_mixer(s, ctl->mixer_left, ctl->mfd);
+ __dump_mixer(s, ctl->mixer_right, ctl->mfd);
}
static int __dump_mdp(struct seq_file *s, struct mdss_data_type *mdata)
diff --git a/drivers/video/fbdev/msm/mdss_mdp_intf_video.c b/drivers/video/fbdev/msm/mdss_mdp_intf_video.c
index c31c0149866a..bdf6705ef597 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_intf_video.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_intf_video.c
@@ -643,8 +643,13 @@ static int mdss_mdp_video_timegen_setup(struct mdss_mdp_ctl *ctl,
display_hctl = (hsync_end_x << 16) | hsync_start_x;
den_polarity = 0;
- hsync_polarity = p->h_polarity;
- vsync_polarity = p->v_polarity;
+ if (ctx->intf_type == MDSS_INTF_HDMI) {
+ hsync_polarity = p->h_polarity;
+ vsync_polarity = p->v_polarity;
+ } else {
+ hsync_polarity = 0;
+ vsync_polarity = 0;
+ }
polarity_ctl = (den_polarity << 2) | /* DEN Polarity */
(vsync_polarity << 1) | /* VSYNC Polarity */
(hsync_polarity << 0); /* HSYNC Polarity */
diff --git a/drivers/video/fbdev/msm/mdss_mdp_layer.c b/drivers/video/fbdev/msm/mdss_mdp_layer.c
index e44edb8fbea7..3ccc09d58480 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_layer.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_layer.c
@@ -3131,6 +3131,14 @@ int mdss_mdp_layer_pre_commit_wfd(struct msm_fb_data_type *mfd,
sync_pt_data = &mfd->mdp_sync_pt_data;
mutex_lock(&sync_pt_data->sync_mutex);
count = sync_pt_data->acq_fen_cnt;
+
+ if (count >= MDP_MAX_FENCE_FD) {
+ pr_err("Reached maximum possible value for fence count\n");
+ mutex_unlock(&sync_pt_data->sync_mutex);
+ rc = -EINVAL;
+ goto input_layer_err;
+ }
+
sync_pt_data->acq_fen[count] = fence;
sync_pt_data->acq_fen_cnt++;
mutex_unlock(&sync_pt_data->sync_mutex);
diff --git a/drivers/video/fbdev/msm/mdss_mdp_overlay.c b/drivers/video/fbdev/msm/mdss_mdp_overlay.c
index 953aeb95332b..305fff6b5695 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_overlay.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_overlay.c
@@ -5023,12 +5023,15 @@ static int mdss_fb_get_metadata(struct msm_fb_data_type *mfd,
break;
case metadata_op_get_ion_fd:
if (mfd->fb_ion_handle && mfd->fb_ion_client) {
+ get_dma_buf(mfd->fbmem_buf);
metadata->data.fbmem_ionfd =
ion_share_dma_buf_fd(mfd->fb_ion_client,
mfd->fb_ion_handle);
- if (metadata->data.fbmem_ionfd < 0)
+ if (metadata->data.fbmem_ionfd < 0) {
+ dma_buf_put(mfd->fbmem_buf);
pr_err("fd allocation failed. fd = %d\n",
metadata->data.fbmem_ionfd);
+ }
}
break;
case metadata_op_crc:
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pp_cache_config.c b/drivers/video/fbdev/msm/mdss_mdp_pp_cache_config.c
index 017a2f10dfbc..a5ec7097e0f6 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_pp_cache_config.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_pp_cache_config.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -194,8 +194,12 @@ static int pp_hist_lut_cache_params_pipe_v1_7(struct mdp_hist_lut_data *config,
return -EINVAL;
}
- memcpy(&hist_lut_usr_config, config->cfg_payload,
- sizeof(struct mdp_hist_lut_data_v1_7));
+ if (copy_from_user(&hist_lut_usr_config,
+ (void __user *) config->cfg_payload,
+ sizeof(hist_lut_usr_config))) {
+ pr_err("failed to copy hist lut config\n");
+ return -EFAULT;
+ }
hist_lut_cache_data = pipe->pp_res.hist_lut_cfg_payload;
if (!hist_lut_cache_data) {
@@ -606,8 +610,12 @@ static int pp_pcc_cache_params_pipe_v1_7(struct mdp_pcc_cfg_data *config,
return -EINVAL;
}
- memcpy(&v17_usr_config, config->cfg_payload,
- sizeof(v17_usr_config));
+ if (copy_from_user(&v17_usr_config,
+ (void __user *) config->cfg_payload,
+ sizeof(v17_usr_config))) {
+ pr_err("failed to copy pcc config\n");
+ return -EFAULT;
+ }
if (!(config->ops & MDP_PP_OPS_WRITE)) {
pr_debug("write ops not set value of flag is %d\n",
@@ -861,8 +869,12 @@ static int pp_igc_lut_cache_params_pipe_v1_7(struct mdp_igc_lut_data *config,
goto igc_config_exit;
}
- memcpy(&v17_usr_config, config->cfg_payload,
- sizeof(v17_usr_config));
+ if (copy_from_user(&v17_usr_config,
+ (void __user *) config->cfg_payload,
+ sizeof(v17_usr_config))) {
+ pr_err("failed to copy igc usr config\n");
+ return -EFAULT;
+ }
if (!(config->ops & MDP_PP_OPS_WRITE)) {
pr_debug("op for gamut %d\n", config->ops);
@@ -1272,8 +1284,12 @@ static int pp_pa_cache_params_pipe_v1_7(struct mdp_pa_v2_cfg_data *config,
return -EINVAL;
}
- memcpy(&pa_usr_config, config->cfg_payload,
- sizeof(struct mdp_pa_data_v1_7));
+ if (copy_from_user(&pa_usr_config,
+ (void __user *) config->cfg_payload,
+ sizeof(pa_usr_config))) {
+ pr_err("failed to copy pa usr config\n");
+ return -EFAULT;
+ }
pa_cache_data = pipe->pp_res.pa_cfg_payload;
if (!pa_cache_data) {
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pp_v1_7.c b/drivers/video/fbdev/msm/mdss_mdp_pp_v1_7.c
index 71cab148e1c3..aabf7c507376 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_pp_v1_7.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_pp_v1_7.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -358,7 +358,8 @@ static int pp_hist_lut_get_config(char __iomem *base_addr, void *cfg_data,
int ret = 0, i = 0;
char __iomem *hist_addr;
u32 sz = 0, temp = 0, *data = NULL;
- struct mdp_hist_lut_data_v1_7 *lut_data = NULL;
+ struct mdp_hist_lut_data_v1_7 lut_data_v1_7;
+ struct mdp_hist_lut_data_v1_7 *lut_data = &lut_data_v1_7;
struct mdp_hist_lut_data *lut_cfg_data = NULL;
if (!base_addr || !cfg_data) {
@@ -378,7 +379,11 @@ static int pp_hist_lut_get_config(char __iomem *base_addr, void *cfg_data,
lut_cfg_data->version, lut_cfg_data->cfg_payload);
return -EINVAL;
}
- lut_data = lut_cfg_data->cfg_payload;
+ if (copy_from_user(lut_data, (void __user *) lut_cfg_data->cfg_payload,
+ sizeof(*lut_data))) {
+ pr_err("copy from user failed for lut_data\n");
+ return -EFAULT;
+ }
if (lut_data->len != ENHIST_LUT_ENTRIES) {
pr_err("invalid hist_lut len %d", lut_data->len);
return -EINVAL;
@@ -1786,7 +1791,8 @@ static int pp_igc_get_config(char __iomem *base_addr, void *cfg_data,
{
int ret = 0, i = 0;
struct mdp_igc_lut_data *lut_cfg_data = NULL;
- struct mdp_igc_lut_data_v1_7 *lut_data = NULL;
+ struct mdp_igc_lut_data_v1_7 lut_data_v1_7;
+ struct mdp_igc_lut_data_v1_7 *lut_data = &lut_data_v1_7;
char __iomem *c1 = NULL, *c2 = NULL;
u32 *c0c1_data = NULL, *c2_data = NULL;
u32 data = 0, sz = 0;
@@ -1810,7 +1816,11 @@ static int pp_igc_get_config(char __iomem *base_addr, void *cfg_data,
ret = -EINVAL;
goto exit;
}
- lut_data = lut_cfg_data->cfg_payload;
+ if (copy_from_user(lut_data, (void __user *) lut_cfg_data->cfg_payload,
+ sizeof(*lut_data))) {
+ pr_err("copy from user failed for lut_data\n");
+ return -EFAULT;
+ }
if (lut_data->len != IGC_LUT_ENTRIES) {
pr_err("invalid lut len %d\n", lut_data->len);
ret = -EINVAL;
@@ -1954,20 +1964,24 @@ static int pp_pgc_get_config(char __iomem *base_addr, void *cfg_data,
u32 *c0_data = NULL, *c1_data = NULL, *c2_data = NULL;
u32 val = 0, i = 0, sz = 0;
struct mdp_pgc_lut_data *pgc_data = NULL;
- struct mdp_pgc_lut_data_v1_7 *pgc_data_v17 = NULL;
+ struct mdp_pgc_lut_data_v1_7 pgc_lut_data_v17;
+ struct mdp_pgc_lut_data_v1_7 *pgc_data_v17 = &pgc_lut_data_v17;
if (!base_addr || !cfg_data) {
pr_err("invalid params base_addr %pK cfg_data %pK block_type %d\n",
base_addr, cfg_data, block_type);
return -EINVAL;
}
pgc_data = (struct mdp_pgc_lut_data *) cfg_data;
- pgc_data_v17 = (struct mdp_pgc_lut_data_v1_7 *)
- pgc_data->cfg_payload;
- if (pgc_data->version != mdp_pgc_v1_7 || !pgc_data_v17) {
+ if (pgc_data->version != mdp_pgc_v1_7 || !pgc_data->cfg_payload) {
pr_err("invalid pgc version %d payload %pK\n",
- pgc_data->version, pgc_data_v17);
+ pgc_data->version, pgc_data->cfg_payload);
return -EINVAL;
}
+ if (copy_from_user(pgc_data_v17, (void __user *) pgc_data->cfg_payload,
+ sizeof(*pgc_data_v17))) {
+ pr_err("copy from user failed for pgc lut data\n");
+ return -EFAULT;
+ }
if (!(pgc_data->flags & MDP_PP_OPS_READ)) {
pr_info("read ops is not set %d", pgc_data->flags);
return -EINVAL;
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pp_v3.c b/drivers/video/fbdev/msm/mdss_mdp_pp_v3.c
index 25cb94f89dd5..b377f0921508 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_pp_v3.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_pp_v3.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -298,7 +298,8 @@ static int pp_hist_lut_get_config(char __iomem *base_addr, void *cfg_data,
int ret = 0, i = 0;
char __iomem *hist_lut_addr;
u32 sz = 0, temp = 0, *data = NULL;
- struct mdp_hist_lut_data_v1_7 *lut_data = NULL;
+ struct mdp_hist_lut_data_v1_7 lut_data_v1_7;
+ struct mdp_hist_lut_data_v1_7 *lut_data = &lut_data_v1_7;
struct mdp_hist_lut_data *lut_cfg_data = NULL;
if (!base_addr || !cfg_data) {
@@ -323,7 +324,11 @@ static int pp_hist_lut_get_config(char __iomem *base_addr, void *cfg_data,
lut_cfg_data->version, lut_cfg_data->cfg_payload);
return -EINVAL;
}
- lut_data = lut_cfg_data->cfg_payload;
+ if (copy_from_user(lut_data, (void __user *) lut_cfg_data->cfg_payload,
+ sizeof(*lut_data))) {
+ pr_err("copy from user failed for lut_data\n");
+ return -EFAULT;
+ }
if (lut_data->len != ENHIST_LUT_ENTRIES) {
pr_err("invalid hist_lut len %d", lut_data->len);
return -EINVAL;
diff --git a/drivers/video/fbdev/msm/mdss_mdp_splash_logo.c b/drivers/video/fbdev/msm/mdss_mdp_splash_logo.c
index d412a1c66e79..5df1ed7c89a5 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_splash_logo.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_splash_logo.c
@@ -175,7 +175,7 @@ static int mdss_mdp_splash_iommu_attach(struct msm_fb_data_type *mfd)
ret = mdss_smmu_set_attribute(MDSS_IOMMU_DOMAIN_UNSECURE, EARLY_MAP, 1);
if (ret) {
- pr_debug("mdss set attribute failed for early map\n");
+ pr_err("mdss set attribute failed for early map\n");
goto end;
}
@@ -198,6 +198,9 @@ static int mdss_mdp_splash_iommu_attach(struct msm_fb_data_type *mfd)
}
ret = mdss_smmu_set_attribute(MDSS_IOMMU_DOMAIN_UNSECURE, EARLY_MAP, 0);
+ if (ret)
+ pr_err("mdss reset attribute failed for early map\n");
+
end:
mdata->handoff_pending = true;
diff --git a/drivers/video/fbdev/msm/mdss_rotator.c b/drivers/video/fbdev/msm/mdss_rotator.c
index 61b0518d3ee6..2028222748c3 100644
--- a/drivers/video/fbdev/msm/mdss_rotator.c
+++ b/drivers/video/fbdev/msm/mdss_rotator.c
@@ -373,6 +373,15 @@ static bool mdss_rotator_is_work_pending(struct mdss_rot_mgr *mgr,
return false;
}
+static void mdss_rotator_install_fence_fd(struct mdss_rot_entry_container *req)
+{
+ int i = 0;
+
+ for (i = 0; i < req->count; i++)
+ sync_fence_install(req->entries[i].output_fence,
+ req->entries[i].output_fence_fd);
+}
+
static int mdss_rotator_create_fence(struct mdss_rot_entry *entry)
{
int ret = 0, fd;
@@ -411,7 +420,6 @@ static int mdss_rotator_create_fence(struct mdss_rot_entry *entry)
goto get_fd_err;
}
- sync_fence_install(fence, fd);
rot_timeline->next_value++;
mutex_unlock(&rot_timeline->lock);
@@ -2240,6 +2248,7 @@ static int mdss_rotator_handle_request(struct mdss_rot_mgr *mgr,
goto handle_request_err1;
}
+ mdss_rotator_install_fence_fd(req);
mdss_rotator_queue_request(mgr, private, req);
mutex_unlock(&mgr->lock);
@@ -2400,6 +2409,7 @@ static int mdss_rotator_handle_request32(struct mdss_rot_mgr *mgr,
goto handle_request32_err1;
}
+ mdss_rotator_install_fence_fd(req);
mdss_rotator_queue_request(mgr, private, req);
mutex_unlock(&mgr->lock);
diff --git a/drivers/video/fbdev/msm/msm_mdss_io_8974.c b/drivers/video/fbdev/msm/msm_mdss_io_8974.c
index 80dbe83972d7..bb3b4b3fa929 100644
--- a/drivers/video/fbdev/msm/msm_mdss_io_8974.c
+++ b/drivers/video/fbdev/msm/msm_mdss_io_8974.c
@@ -2587,7 +2587,8 @@ int mdss_dsi_post_clkon_cb(void *priv,
}
if (clk & MDSS_DSI_LINK_CLK) {
/* toggle the resync FIFO everytime clock changes */
- if (ctrl->shared_data->phy_rev == DSI_PHY_REV_30)
+ if ((ctrl->shared_data->phy_rev == DSI_PHY_REV_30) &&
+ !pdata->panel_info.cont_splash_enabled)
mdss_dsi_phy_v3_toggle_resync_fifo(ctrl);
if (ctrl->ulps) {
diff --git a/drivers/watchdog/bcm_kona_wdt.c b/drivers/watchdog/bcm_kona_wdt.c
index e0c98423f2c9..11a72bc2c71b 100644
--- a/drivers/watchdog/bcm_kona_wdt.c
+++ b/drivers/watchdog/bcm_kona_wdt.c
@@ -304,6 +304,8 @@ static int bcm_kona_wdt_probe(struct platform_device *pdev)
if (!wdt)
return -ENOMEM;
+ spin_lock_init(&wdt->lock);
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
wdt->base = devm_ioremap_resource(dev, res);
if (IS_ERR(wdt->base))
@@ -316,7 +318,6 @@ static int bcm_kona_wdt_probe(struct platform_device *pdev)
return ret;
}
- spin_lock_init(&wdt->lock);
platform_set_drvdata(pdev, wdt);
watchdog_set_drvdata(&bcm_kona_wdt_wdd, wdt);
bcm_kona_wdt_wdd.parent = &pdev->dev;
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 7399782c0998..8a58bbc14de2 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -409,9 +409,9 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
if (map == SWIOTLB_MAP_ERROR)
return DMA_ERROR_CODE;
+ dev_addr = xen_phys_to_bus(map);
xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT),
dev_addr, map & ~PAGE_MASK, size, dir, attrs);
- dev_addr = xen_phys_to_bus(map);
/*
* Ensure that the address returned is DMA'ble
@@ -567,13 +567,14 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
sg_dma_len(sgl) = 0;
return 0;
}
+ dev_addr = xen_phys_to_bus(map);
xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT),
dev_addr,
map & ~PAGE_MASK,
sg->length,
dir,
attrs);
- sg->dma_address = xen_phys_to_bus(map);
+ sg->dma_address = dev_addr;
} else {
/* we are not interested in the dma_addr returned by
* xen_dma_map_page, only in the potential cache flushes executed
diff --git a/fs/Kconfig b/fs/Kconfig
index a5d2dc39ba07..4adb93ec85ea 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -282,4 +282,9 @@ endif # NETWORK_FILESYSTEMS
source "fs/nls/Kconfig"
source "fs/dlm/Kconfig"
+config FILE_TABLE_DEBUG
+ bool "Enable FILE_TABLE_DEBUG"
+ help
+ This option enables debug of the open files using a global filetable
+
endmenu
diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c
index ac7d921ed984..257425511d10 100644
--- a/fs/autofs4/dev-ioctl.c
+++ b/fs/autofs4/dev-ioctl.c
@@ -331,7 +331,7 @@ static int autofs_dev_ioctl_fail(struct file *fp,
int status;
token = (autofs_wqt_t) param->fail.token;
- status = param->fail.status ? param->fail.status : -ENOENT;
+ status = param->fail.status < 0 ? param->fail.status : -ENOENT;
return autofs4_wait_release(sbi, token, status);
}
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 0c52941dd62c..6c031dd1bc4e 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -2295,6 +2295,7 @@ static int elf_core_dump(struct coredump_params *cprm)
goto end_coredump;
}
}
+ dump_truncate(cprm);
if (!elf_core_write_extra_data(cprm))
goto end_coredump;
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 863fa0f1972b..a61926cb01c0 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -4397,8 +4397,19 @@ search_again:
if (found_type > min_type) {
del_item = 1;
} else {
- if (item_end < new_size)
+ if (item_end < new_size) {
+ /*
+ * With NO_HOLES mode, for the following mapping
+ *
+ * [0-4k][hole][8k-12k]
+ *
+ * if truncating isize down to 6k, it ends up
+ * isize being 8k.
+ */
+ if (btrfs_fs_incompat(root->fs_info, NO_HOLES))
+ last_size = new_size;
break;
+ }
if (found_key.offset >= new_size)
del_item = 1;
else
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
index 87b87e091e8e..efd72e1fae74 100644
--- a/fs/cifs/smb1ops.c
+++ b/fs/cifs/smb1ops.c
@@ -849,8 +849,13 @@ cifs_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
struct cifs_fid *fid, __u16 search_flags,
struct cifs_search_info *srch_inf)
{
- return CIFSFindFirst(xid, tcon, path, cifs_sb,
- &fid->netfid, search_flags, srch_inf, true);
+ int rc;
+
+ rc = CIFSFindFirst(xid, tcon, path, cifs_sb,
+ &fid->netfid, search_flags, srch_inf, true);
+ if (rc)
+ cifs_dbg(FYI, "find first failed=%d\n", rc);
+ return rc;
}
static int
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 087918c4612a..1d125d3d0d89 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -909,7 +909,7 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL);
kfree(utf16_path);
if (rc) {
- cifs_dbg(VFS, "open dir failed\n");
+ cifs_dbg(FYI, "open dir failed rc=%d\n", rc);
return rc;
}
@@ -919,7 +919,7 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
rc = SMB2_query_directory(xid, tcon, fid->persistent_fid,
fid->volatile_fid, 0, srch_inf);
if (rc) {
- cifs_dbg(VFS, "query directory failed\n");
+ cifs_dbg(FYI, "query directory failed rc=%d\n", rc);
SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
}
return rc;
diff --git a/fs/coredump.c b/fs/coredump.c
index fe0a28da18a6..2ce5ef429c48 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -810,3 +810,21 @@ int dump_align(struct coredump_params *cprm, int align)
return mod ? dump_skip(cprm, align - mod) : 1;
}
EXPORT_SYMBOL(dump_align);
+
+/*
+ * Ensures that file size is big enough to contain the current file
+ * postion. This prevents gdb from complaining about a truncated file
+ * if the last "write" to the file was dump_skip.
+ */
+void dump_truncate(struct coredump_params *cprm)
+{
+ struct file *file = cprm->file;
+ loff_t offset;
+
+ if (file->f_op->llseek && file->f_op->llseek != no_llseek) {
+ offset = file->f_op->llseek(file, 0, SEEK_CUR);
+ if (i_size_read(file->f_mapping->host) < offset)
+ do_truncate(file->f_path.dentry, offset, 0, file);
+ }
+}
+EXPORT_SYMBOL(dump_truncate);
diff --git a/fs/dcache.c b/fs/dcache.c
index 3a3b0f370304..4c07a84d1317 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1128,11 +1128,12 @@ void shrink_dcache_sb(struct super_block *sb)
LIST_HEAD(dispose);
freed = list_lru_walk(&sb->s_dentry_lru,
- dentry_lru_isolate_shrink, &dispose, UINT_MAX);
+ dentry_lru_isolate_shrink, &dispose, 1024);
this_cpu_sub(nr_dentry_unused, freed);
shrink_dentry_list(&dispose);
- } while (freed > 0);
+ cond_resched();
+ } while (list_lru_count(&sb->s_dentry_lru) > 0);
}
EXPORT_SYMBOL(shrink_dcache_sb);
diff --git a/fs/exec.c b/fs/exec.c
index b1f5ddbf7d56..073ae12b396e 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -206,8 +206,26 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
if (write) {
unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
+ unsigned long ptr_size;
struct rlimit *rlim;
+ /*
+ * Since the stack will hold pointers to the strings, we
+ * must account for them as well.
+ *
+ * The size calculation is the entire vma while each arg page is
+ * built, so each time we get here it's calculating how far it
+ * is currently (rather than each call being just the newly
+ * added size from the arg page). As a result, we need to
+ * always add the entire size of the pointers, so that on the
+ * last call to get_arg_page() we'll actually have the entire
+ * correct size.
+ */
+ ptr_size = (bprm->argc + bprm->envc) * sizeof(void *);
+ if (ptr_size > ULONG_MAX - size)
+ goto fail;
+ size += ptr_size;
+
acct_arg_size(bprm, size / PAGE_SIZE);
/*
@@ -225,13 +243,15 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
* to work from.
*/
rlim = current->signal->rlim;
- if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4) {
- put_page(page);
- return NULL;
- }
+ if (size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4)
+ goto fail;
}
return page;
+
+fail:
+ put_page(page);
+ return NULL;
}
static void put_arg_page(struct page *page)
diff --git a/fs/file_table.c b/fs/file_table.c
index ad17e05ebf95..b4baa0de4988 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -41,6 +41,141 @@ static struct kmem_cache *filp_cachep __read_mostly;
static struct percpu_counter nr_files __cacheline_aligned_in_smp;
+#ifdef CONFIG_FILE_TABLE_DEBUG
+#include <linux/hashtable.h>
+#include <mount.h>
+static DEFINE_MUTEX(global_files_lock);
+static DEFINE_HASHTABLE(global_files_hashtable, 10);
+
+struct global_filetable_lookup_key {
+ struct work_struct work;
+ uintptr_t value;
+};
+
+void global_filetable_print_warning_once(void)
+{
+ pr_err_once("\n**********************************************************\n");
+ pr_err_once("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
+ pr_err_once("** **\n");
+ pr_err_once("** VFS FILE TABLE DEBUG is enabled . **\n");
+ pr_err_once("** Allocating extra memory and slowing access to files **\n");
+ pr_err_once("** **\n");
+ pr_err_once("** This means that this is a DEBUG kernel and it is **\n");
+ pr_err_once("** unsafe for production use. **\n");
+ pr_err_once("** **\n");
+ pr_err_once("** If you see this message and you are not debugging **\n");
+ pr_err_once("** the kernel, report this immediately to your vendor! **\n");
+ pr_err_once("** **\n");
+ pr_err_once("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
+ pr_err_once("**********************************************************\n");
+}
+
+void global_filetable_add(struct file *filp)
+{
+ struct mount *mnt;
+
+ if (filp->f_path.dentry->d_iname == NULL ||
+ strlen(filp->f_path.dentry->d_iname) == 0)
+ return;
+
+ mnt = real_mount(filp->f_path.mnt);
+
+ mutex_lock(&global_files_lock);
+ hash_add(global_files_hashtable, &filp->f_hash, (uintptr_t)mnt);
+ mutex_unlock(&global_files_lock);
+}
+
+void global_filetable_del(struct file *filp)
+{
+ mutex_lock(&global_files_lock);
+ hash_del(&filp->f_hash);
+ mutex_unlock(&global_files_lock);
+}
+
+static void global_print_file(struct file *filp, char *path_buffer, int *count)
+{
+ char *pathname;
+
+ pathname = d_path(&filp->f_path, path_buffer, PAGE_SIZE);
+ if (IS_ERR(pathname))
+ pr_err("VFS: File %d Address : %pa partial filename: %s ref_count=%ld\n",
+ ++(*count), &filp, filp->f_path.dentry->d_iname,
+ atomic_long_read(&filp->f_count));
+ else
+ pr_err("VFS: File %d Address : %pa full filepath: %s ref_count=%ld\n",
+ ++(*count), &filp, pathname,
+ atomic_long_read(&filp->f_count));
+}
+
+static void global_filetable_print(uintptr_t lookup_mnt)
+{
+ struct hlist_node *tmp;
+ struct file *filp;
+ struct mount *mnt;
+ int index;
+ int count = 0;
+ char *path_buffer = (char *)__get_free_page(GFP_TEMPORARY);
+
+ mutex_lock(&global_files_lock);
+ pr_err("\n**********************************************************\n");
+ pr_err("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
+
+ pr_err("\n");
+ pr_err("VFS: The following files hold a reference to the mount\n");
+ pr_err("\n");
+ hash_for_each_possible_safe(global_files_hashtable, filp, tmp, f_hash,
+ lookup_mnt) {
+ mnt = real_mount(filp->f_path.mnt);
+ if ((uintptr_t)mnt == lookup_mnt)
+ global_print_file(filp, path_buffer, &count);
+ }
+ pr_err("\n");
+ pr_err("VFS: Found total of %d open files\n", count);
+ pr_err("\n");
+
+ count = 0;
+ pr_err("\n");
+ pr_err("VFS: The following files need to cleaned up\n");
+ pr_err("\n");
+ hash_for_each_safe(global_files_hashtable, index, tmp, filp, f_hash) {
+ if (atomic_long_read(&filp->f_count) == 0)
+ global_print_file(filp, path_buffer, &count);
+ }
+
+ pr_err("\n");
+ pr_err("VFS: Found total of %d files awaiting clean-up\n", count);
+ pr_err("\n");
+ pr_err("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
+ pr_err("\n**********************************************************\n");
+
+ mutex_unlock(&global_files_lock);
+ free_page((unsigned long)path_buffer);
+}
+
+static void global_filetable_print_work_fn(struct work_struct *work)
+{
+ struct global_filetable_lookup_key *key;
+ uintptr_t lookup_mnt;
+
+ key = container_of(work, struct global_filetable_lookup_key, work);
+ lookup_mnt = key->value;
+ kfree(key);
+ global_filetable_print(lookup_mnt);
+}
+
+void global_filetable_delayed_print(struct mount *mnt)
+{
+ struct global_filetable_lookup_key *key;
+
+ key = kzalloc(sizeof(*key), GFP_KERNEL);
+ if (key == NULL)
+ return;
+ key->value = (uintptr_t)mnt;
+ INIT_WORK(&key->work, global_filetable_print_work_fn);
+ schedule_work(&key->work);
+}
+#endif /* CONFIG_FILE_TABLE_DEBUG */
+
static void file_free_rcu(struct rcu_head *head)
{
struct file *f = container_of(head, struct file, f_u.fu_rcuhead);
@@ -219,6 +354,7 @@ static void __fput(struct file *file)
put_write_access(inode);
__mnt_drop_write(mnt);
}
+ global_filetable_del(file);
file->f_path.dentry = NULL;
file->f_path.mnt = NULL;
file->f_inode = NULL;
@@ -314,6 +450,7 @@ void __init files_init(void)
filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0,
SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
percpu_counter_init(&nr_files, 0, GFP_KERNEL);
+ global_filetable_print_warning_once();
}
/*
diff --git a/fs/internal.h b/fs/internal.h
index 6387b35a1c0d..1b93a3929b16 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -153,3 +153,29 @@ extern void mnt_pin_kill(struct mount *m);
* fs/nsfs.c
*/
extern struct dentry_operations ns_dentry_operations;
+
+#ifdef CONFIG_FILE_TABLE_DEBUG
+void global_filetable_print_warning_once(void);
+void global_filetable_add(struct file *filp);
+void global_filetable_del(struct file *filp);
+void global_filetable_delayed_print(struct mount *mnt);
+
+#else /* i.e NOT CONFIG_FILE_TABLE_DEBUG */
+
+static inline void global_filetable_print_warning_once(void)
+{
+}
+
+static inline void global_filetable_add(struct file *filp)
+{
+}
+
+static inline void global_filetable_del(struct file *filp)
+{
+}
+
+static inline void global_filetable_delayed_print(struct mount *mnt)
+{
+}
+
+#endif /* CONFIG_FILE_TABLE_DEBUG */
diff --git a/fs/namei.c b/fs/namei.c
index 816b6e8e934e..b5dfe2c5b51e 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -3379,6 +3379,8 @@ out2:
error = -ESTALE;
}
file = ERR_PTR(error);
+ } else {
+ global_filetable_add(file);
}
return file;
}
diff --git a/fs/namespace.c b/fs/namespace.c
index a22959c97384..0f52d90c356f 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1595,6 +1595,8 @@ static int do_umount(struct mount *mnt, int flags)
}
unlock_mount_hash();
namespace_unlock();
+ if (retval == -EBUSY)
+ global_filetable_delayed_print(mnt);
return retval;
}
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 4e3679b25b9b..8e425f2c5ddd 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2188,8 +2188,6 @@ static int nfs4_opendata_access(struct rpc_cred *cred,
if ((mask & ~cache.mask & (MAY_READ | MAY_EXEC)) == 0)
return 0;
- /* even though OPEN succeeded, access is denied. Close the file */
- nfs4_close_state(state, fmode);
return -EACCES;
}
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 709fbbd44c65..acebc350e98d 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -2070,13 +2070,13 @@ unlock:
spin_unlock(&o2hb_live_lock);
}
-static ssize_t o2hb_heartbeat_group_threshold_show(struct config_item *item,
+static ssize_t o2hb_heartbeat_group_dead_threshold_show(struct config_item *item,
char *page)
{
return sprintf(page, "%u\n", o2hb_dead_threshold);
}
-static ssize_t o2hb_heartbeat_group_threshold_store(struct config_item *item,
+static ssize_t o2hb_heartbeat_group_dead_threshold_store(struct config_item *item,
const char *page, size_t count)
{
unsigned long tmp;
@@ -2125,11 +2125,11 @@ static ssize_t o2hb_heartbeat_group_mode_store(struct config_item *item,
}
-CONFIGFS_ATTR(o2hb_heartbeat_group_, threshold);
+CONFIGFS_ATTR(o2hb_heartbeat_group_, dead_threshold);
CONFIGFS_ATTR(o2hb_heartbeat_group_, mode);
static struct configfs_attribute *o2hb_heartbeat_group_attrs[] = {
- &o2hb_heartbeat_group_attr_threshold,
+ &o2hb_heartbeat_group_attr_dead_threshold,
&o2hb_heartbeat_group_attr_mode,
NULL,
};
diff --git a/fs/squashfs/block.c b/fs/squashfs/block.c
index 2eb66decc5ab..b3b95e2ae2ff 100644
--- a/fs/squashfs/block.c
+++ b/fs/squashfs/block.c
@@ -121,11 +121,12 @@ static void squashfs_process_blocks(struct squashfs_read_request *req)
if (req->data_processing == SQUASHFS_METADATA) {
/* Extract the length of the metadata block */
- if (req->offset != msblk->devblksize - 1)
- length = *((u16 *)(bh[0]->b_data + req->offset));
- else {
- length = bh[0]->b_data[req->offset];
- length |= bh[1]->b_data[0] << 8;
+ if (req->offset != msblk->devblksize - 1) {
+ length = le16_to_cpup((__le16 *)
+ (bh[0]->b_data + req->offset));
+ } else {
+ length = (unsigned char)bh[0]->b_data[req->offset];
+ length |= (unsigned char)bh[1]->b_data[0] << 8;
}
req->compressed = SQUASHFS_COMPRESSED(length);
req->data_processing = req->compressed ? SQUASHFS_DECOMPRESS
@@ -211,8 +212,8 @@ static int bh_is_optional(struct squashfs_read_request *req, int idx)
int start_idx, end_idx;
struct squashfs_sb_info *msblk = req->sb->s_fs_info;
- start_idx = (idx * msblk->devblksize - req->offset) / PAGE_CACHE_SIZE;
- end_idx = ((idx + 1) * msblk->devblksize - req->offset + 1) / PAGE_CACHE_SIZE;
+ start_idx = (idx * msblk->devblksize - req->offset) >> PAGE_SHIFT;
+ end_idx = ((idx + 1) * msblk->devblksize - req->offset + 1) >> PAGE_SHIFT;
if (start_idx >= req->output->pages)
return 1;
if (start_idx < 0)
diff --git a/include/dt-bindings/clock/msm-clocks-8998.h b/include/dt-bindings/clock/msm-clocks-8998.h
index cd36374a04a7..67e47c46e09a 100644
--- a/include/dt-bindings/clock/msm-clocks-8998.h
+++ b/include/dt-bindings/clock/msm-clocks-8998.h
@@ -443,6 +443,11 @@
#define clk_dsi0pll_pclk_src 0x5efd85d4
#define clk_dsi0pll_pclk_src_mux 0x84b14663
#define clk_dsi0pll_post_bit_div 0xf46dcf27
+#define clk_dsi0pll_pll_out_div1 0xeda5b7fe
+#define clk_dsi0pll_pll_out_div2 0x97fa476d
+#define clk_dsi0pll_pll_out_div4 0x90a98ce0
+#define clk_dsi0pll_pll_out_div8 0x9d9d85cf
+#define clk_dsi0pll_pll_out_mux 0x179c27ca
#define clk_dsi0pll_post_vco_mux 0xfaf9bd1f
#define clk_dsi0pll_post_vco_div1 0xabb50b2a
#define clk_dsi0pll_post_vco_div4 0xbe51c091
@@ -455,6 +460,11 @@
#define clk_dsi1pll_pclk_src 0xeddcd80e
#define clk_dsi1pll_pclk_src_mux 0x3651feb3
#define clk_dsi1pll_post_bit_div 0x712f0260
+#define clk_dsi1pll_pll_out_div8 0x87628ddb
+#define clk_dsi1pll_pll_out_div4 0x0d9a384b
+#define clk_dsi1pll_pll_out_div2 0x0c9b5748
+#define clk_dsi1pll_pll_out_div1 0x3193164e
+#define clk_dsi1pll_pll_out_mux 0x171bf8fd
#define clk_dsi1pll_post_vco_mux 0xc6a90d20
#define clk_dsi1pll_post_vco_div1 0x6f47ca7d
#define clk_dsi1pll_post_vco_div4 0x90628974
diff --git a/include/linux/coredump.h b/include/linux/coredump.h
index d016a121a8c4..28ffa94aed6b 100644
--- a/include/linux/coredump.h
+++ b/include/linux/coredump.h
@@ -14,6 +14,7 @@ struct coredump_params;
extern int dump_skip(struct coredump_params *cprm, size_t nr);
extern int dump_emit(struct coredump_params *cprm, const void *addr, int nr);
extern int dump_align(struct coredump_params *cprm, int align);
+extern void dump_truncate(struct coredump_params *cprm);
#ifdef CONFIG_COREDUMP
extern void do_coredump(const siginfo_t *siginfo);
#else
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 4b27be2038e3..df1171bada01 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -912,6 +912,10 @@ struct file {
struct list_head f_tfile_llink;
#endif /* #ifdef CONFIG_EPOLL */
struct address_space *f_mapping;
+
+#ifdef CONFIG_FILE_TABLE_DEBUG
+ struct hlist_node f_hash;
+#endif /* #ifdef CONFIG_FILE_TABLE_DEBUG */
} __attribute__((aligned(4))); /* lest something weird decides that 2 is OK */
struct file_handle {
diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h
index 2a6b9947aaa3..743b34f56f2b 100644
--- a/include/linux/list_lru.h
+++ b/include/linux/list_lru.h
@@ -44,6 +44,7 @@ struct list_lru_node {
/* for cgroup aware lrus points to per cgroup lists, otherwise NULL */
struct list_lru_memcg *memcg_lrus;
#endif
+ long nr_items;
} ____cacheline_aligned_in_smp;
struct list_lru {
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 5219df44cfec..ae8d475a9385 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -23,9 +23,13 @@ enum migrate_reason {
MR_SYSCALL, /* also applies to cpusets */
MR_MEMPOLICY_MBIND,
MR_NUMA_MISPLACED,
- MR_CMA
+ MR_CMA,
+ MR_TYPES
};
+/* In mm/debug.c; also keep sync with include/trace/events/migrate.h */
+extern char *migrate_reason_names[MR_TYPES];
+
#ifdef CONFIG_MIGRATION
extern void putback_movable_pages(struct list_head *l);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index e599eb35c10e..7d0b5e7bcadb 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -529,7 +529,6 @@ void put_page(struct page *page);
void put_pages_list(struct list_head *pages);
void split_page(struct page *page, unsigned int order);
-int split_free_page(struct page *page);
/*
* Compound pages have a destructor function. Provide a
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 9d1161a8d6b7..2b1be7efde55 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -71,6 +71,9 @@ enum {
*/
extern int *get_migratetype_fallbacks(int mtype);
+/* In mm/page_alloc.c; keep in sync also with show_migration_types() there */
+extern char * const migratetype_names[MIGRATE_TYPES];
+
#ifdef CONFIG_CMA
bool is_cma_pageblock(struct page *page);
# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h
index 17f118a82854..03f2a3e7d76d 100644
--- a/include/linux/page_ext.h
+++ b/include/linux/page_ext.h
@@ -3,6 +3,7 @@
#include <linux/types.h>
#include <linux/stacktrace.h>
+#include <linux/stackdepot.h>
struct pglist_data;
struct page_ext_operations {
@@ -44,8 +45,8 @@ struct page_ext {
#ifdef CONFIG_PAGE_OWNER
unsigned int order;
gfp_t gfp_mask;
- unsigned int nr_entries;
- unsigned long trace_entries[8];
+ int last_migrate_reason;
+ depot_stack_handle_t handle;
#endif
};
diff --git a/include/linux/page_idle.h b/include/linux/page_idle.h
index bf268fa92c5b..fec40271339f 100644
--- a/include/linux/page_idle.h
+++ b/include/linux/page_idle.h
@@ -46,33 +46,62 @@ extern struct page_ext_operations page_idle_ops;
static inline bool page_is_young(struct page *page)
{
- return test_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags);
+ struct page_ext *page_ext = lookup_page_ext(page);
+
+ if (unlikely(!page_ext))
+ return false;
+
+ return test_bit(PAGE_EXT_YOUNG, &page_ext->flags);
}
static inline void set_page_young(struct page *page)
{
- set_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags);
+ struct page_ext *page_ext = lookup_page_ext(page);
+
+ if (unlikely(!page_ext))
+ return;
+
+ set_bit(PAGE_EXT_YOUNG, &page_ext->flags);
}
static inline bool test_and_clear_page_young(struct page *page)
{
- return test_and_clear_bit(PAGE_EXT_YOUNG,
- &lookup_page_ext(page)->flags);
+ struct page_ext *page_ext = lookup_page_ext(page);
+
+ if (unlikely(!page_ext))
+ return false;
+
+ return test_and_clear_bit(PAGE_EXT_YOUNG, &page_ext->flags);
}
static inline bool page_is_idle(struct page *page)
{
- return test_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags);
+ struct page_ext *page_ext = lookup_page_ext(page);
+
+ if (unlikely(!page_ext))
+ return false;
+
+ return test_bit(PAGE_EXT_IDLE, &page_ext->flags);
}
static inline void set_page_idle(struct page *page)
{
- set_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags);
+ struct page_ext *page_ext = lookup_page_ext(page);
+
+ if (unlikely(!page_ext))
+ return;
+
+ set_bit(PAGE_EXT_IDLE, &page_ext->flags);
}
static inline void clear_page_idle(struct page *page)
{
- clear_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags);
+ struct page_ext *page_ext = lookup_page_ext(page);
+
+ if (unlikely(!page_ext))
+ return;
+
+ clear_bit(PAGE_EXT_IDLE, &page_ext->flags);
}
#endif /* CONFIG_64BIT */
diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h
index cacaabea8a09..30583ab0ffb1 100644
--- a/include/linux/page_owner.h
+++ b/include/linux/page_owner.h
@@ -1,38 +1,52 @@
#ifndef __LINUX_PAGE_OWNER_H
#define __LINUX_PAGE_OWNER_H
+#include <linux/jump_label.h>
+
#ifdef CONFIG_PAGE_OWNER
-extern bool page_owner_inited;
+extern struct static_key_false page_owner_inited;
extern struct page_ext_operations page_owner_ops;
extern void __reset_page_owner(struct page *page, unsigned int order);
extern void __set_page_owner(struct page *page,
unsigned int order, gfp_t gfp_mask);
-extern gfp_t __get_page_owner_gfp(struct page *page);
+extern void __split_page_owner(struct page *page, unsigned int order);
+extern void __copy_page_owner(struct page *oldpage, struct page *newpage);
+extern void __set_page_owner_migrate_reason(struct page *page, int reason);
+extern void __dump_page_owner(struct page *page);
static inline void reset_page_owner(struct page *page, unsigned int order)
{
- if (likely(!page_owner_inited))
- return;
-
- __reset_page_owner(page, order);
+ if (static_branch_unlikely(&page_owner_inited))
+ __reset_page_owner(page, order);
}
static inline void set_page_owner(struct page *page,
unsigned int order, gfp_t gfp_mask)
{
- if (likely(!page_owner_inited))
- return;
-
- __set_page_owner(page, order, gfp_mask);
+ if (static_branch_unlikely(&page_owner_inited))
+ __set_page_owner(page, order, gfp_mask);
}
-static inline gfp_t get_page_owner_gfp(struct page *page)
+static inline void split_page_owner(struct page *page, unsigned int order)
{
- if (likely(!page_owner_inited))
- return 0;
-
- return __get_page_owner_gfp(page);
+ if (static_branch_unlikely(&page_owner_inited))
+ __split_page_owner(page, order);
+}
+static inline void copy_page_owner(struct page *oldpage, struct page *newpage)
+{
+ if (static_branch_unlikely(&page_owner_inited))
+ __copy_page_owner(oldpage, newpage);
+}
+static inline void set_page_owner_migrate_reason(struct page *page, int reason)
+{
+ if (static_branch_unlikely(&page_owner_inited))
+ __set_page_owner_migrate_reason(page, reason);
+}
+static inline void dump_page_owner(struct page *page)
+{
+ if (static_branch_unlikely(&page_owner_inited))
+ __dump_page_owner(page);
}
#else
static inline void reset_page_owner(struct page *page, unsigned int order)
@@ -42,10 +56,18 @@ static inline void set_page_owner(struct page *page,
unsigned int order, gfp_t gfp_mask)
{
}
-static inline gfp_t get_page_owner_gfp(struct page *page)
+static inline void split_page_owner(struct page *page,
+ unsigned int order)
+{
+}
+static inline void copy_page_owner(struct page *oldpage, struct page *newpage)
+{
+}
+static inline void set_page_owner_migrate_reason(struct page *page, int reason)
+{
+}
+static inline void dump_page_owner(struct page *page)
{
- return 0;
}
-
#endif /* CONFIG_PAGE_OWNER */
#endif /* __LINUX_PAGE_OWNER_H */
diff --git a/include/linux/stackdepot.h b/include/linux/stackdepot.h
new file mode 100644
index 000000000000..7978b3e2c1e1
--- /dev/null
+++ b/include/linux/stackdepot.h
@@ -0,0 +1,32 @@
+/*
+ * A generic stack depot implementation
+ *
+ * Author: Alexander Potapenko <glider@google.com>
+ * Copyright (C) 2016 Google, Inc.
+ *
+ * Based on code by Dmitry Chernenkov.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_STACKDEPOT_H
+#define _LINUX_STACKDEPOT_H
+
+typedef u32 depot_stack_handle_t;
+
+struct stack_trace;
+
+depot_stack_handle_t depot_save_stack(struct stack_trace *trace, gfp_t flags);
+
+void depot_fetch_stack(depot_stack_handle_t handle, struct stack_trace *trace);
+
+#endif
diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h
index 25247220b4b7..f0f1793cfa49 100644
--- a/include/linux/timekeeper_internal.h
+++ b/include/linux/timekeeper_internal.h
@@ -29,7 +29,6 @@
*/
struct tk_read_base {
struct clocksource *clock;
- cycle_t (*read)(struct clocksource *cs);
cycle_t mask;
cycle_t cycle_last;
u32 mult;
diff --git a/include/linux/usb/usb_qdss.h b/include/linux/usb/usb_qdss.h
index e01e6781eb21..f2b8782528a8 100644
--- a/include/linux/usb/usb_qdss.h
+++ b/include/linux/usb/usb_qdss.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, 2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -15,6 +15,9 @@
#include <linux/kernel.h>
+#define USB_QDSS_CH_MDM "qdss_mdm"
+#define USB_QDSS_CH_MSM "qdss"
+
struct qdss_request {
char *buf;
int length;
diff --git a/include/net/cnss2.h b/include/net/cnss2.h
new file mode 100644
index 000000000000..5409e1b15a25
--- /dev/null
+++ b/include/net/cnss2.h
@@ -0,0 +1,192 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _NET_CNSS2_H
+#define _NET_CNSS2_H
+
+#include <linux/pci.h>
+
+#define CNSS_MAX_FILE_NAME 20
+#define CNSS_MAX_TIMESTAMP_LEN 32
+
+enum cnss_bus_width_type {
+ CNSS_BUS_WIDTH_NONE,
+ CNSS_BUS_WIDTH_LOW,
+ CNSS_BUS_WIDTH_MEDIUM,
+ CNSS_BUS_WIDTH_HIGH
+};
+
+enum cnss_platform_cap_flag {
+ CNSS_HAS_EXTERNAL_SWREG = 0x01,
+ CNSS_HAS_UART_ACCESS = 0x02,
+};
+
+struct cnss_platform_cap {
+ u32 cap_flag;
+};
+
+struct cnss_fw_files {
+ char image_file[CNSS_MAX_FILE_NAME];
+ char board_data[CNSS_MAX_FILE_NAME];
+ char otp_data[CNSS_MAX_FILE_NAME];
+ char utf_file[CNSS_MAX_FILE_NAME];
+ char utf_board_data[CNSS_MAX_FILE_NAME];
+ char epping_file[CNSS_MAX_FILE_NAME];
+ char evicted_data[CNSS_MAX_FILE_NAME];
+};
+
+struct cnss_soc_info {
+ void __iomem *va;
+ phys_addr_t pa;
+ uint32_t chip_id;
+ uint32_t chip_family;
+ uint32_t board_id;
+ uint32_t soc_id;
+ uint32_t fw_version;
+ char fw_build_timestamp[CNSS_MAX_TIMESTAMP_LEN + 1];
+};
+
+struct cnss_wlan_runtime_ops {
+ int (*runtime_suspend)(struct pci_dev *pdev);
+ int (*runtime_resume)(struct pci_dev *pdev);
+};
+
+struct cnss_wlan_driver {
+ char *name;
+ int (*probe)(struct pci_dev *pdev, const struct pci_device_id *id);
+ void (*remove)(struct pci_dev *pdev);
+ int (*reinit)(struct pci_dev *pdev, const struct pci_device_id *id);
+ void (*shutdown)(struct pci_dev *pdev);
+ void (*crash_shutdown)(struct pci_dev *pdev);
+ int (*suspend)(struct pci_dev *pdev, pm_message_t state);
+ int (*resume)(struct pci_dev *pdev);
+ int (*suspend_noirq)(struct pci_dev *pdev);
+ int (*resume_noirq)(struct pci_dev *pdev);
+ void (*modem_status)(struct pci_dev *, int state);
+ void (*update_status)(struct pci_dev *pdev, uint32_t status);
+ struct cnss_wlan_runtime_ops *runtime_ops;
+ const struct pci_device_id *id_table;
+};
+
+enum cnss_driver_status {
+ CNSS_UNINITIALIZED,
+ CNSS_INITIALIZED,
+ CNSS_LOAD_UNLOAD,
+ CNSS_RECOVERY,
+};
+
+struct cnss_ce_tgt_pipe_cfg {
+ u32 pipe_num;
+ u32 pipe_dir;
+ u32 nentries;
+ u32 nbytes_max;
+ u32 flags;
+ u32 reserved;
+};
+
+struct cnss_ce_svc_pipe_cfg {
+ u32 service_id;
+ u32 pipe_dir;
+ u32 pipe_num;
+};
+
+struct cnss_shadow_reg_cfg {
+ u16 ce_id;
+ u16 reg_offset;
+};
+
+struct cnss_shadow_reg_v2_cfg {
+ u32 addr;
+};
+
+struct cnss_wlan_enable_cfg {
+ u32 num_ce_tgt_cfg;
+ struct cnss_ce_tgt_pipe_cfg *ce_tgt_cfg;
+ u32 num_ce_svc_pipe_cfg;
+ struct cnss_ce_svc_pipe_cfg *ce_svc_cfg;
+ u32 num_shadow_reg_cfg;
+ struct cnss_shadow_reg_cfg *shadow_reg_cfg;
+ u32 num_shadow_reg_v2_cfg;
+ struct cnss_shadow_reg_v2_cfg *shadow_reg_v2_cfg;
+};
+
+enum cnss_driver_mode {
+ CNSS_MISSION,
+ CNSS_FTM,
+ CNSS_EPPING,
+ CNSS_WALTEST,
+ CNSS_OFF,
+ CNSS_CCPM,
+ CNSS_QVIT,
+ CNSS_CALIBRATION,
+};
+
+enum cnss_recovery_reason {
+ CNSS_REASON_DEFAULT,
+ CNSS_REASON_LINK_DOWN,
+ CNSS_REASON_RDDM,
+ CNSS_REASON_TIMEOUT,
+};
+
+extern int cnss_wlan_register_driver(struct cnss_wlan_driver *driver);
+extern void cnss_wlan_unregister_driver(struct cnss_wlan_driver *driver);
+extern void cnss_device_crashed(void);
+extern int cnss_pci_link_down(struct device *dev);
+extern void cnss_schedule_recovery(struct device *dev,
+ enum cnss_recovery_reason reason);
+extern int cnss_self_recovery(struct device *dev,
+ enum cnss_recovery_reason reason);
+extern int cnss_force_fw_assert(struct device *dev);
+extern void *cnss_get_virt_ramdump_mem(unsigned long *size);
+extern int cnss_get_fw_files_for_target(struct cnss_fw_files *pfw_files,
+ u32 target_type, u32 target_version);
+extern int cnss_get_platform_cap(struct cnss_platform_cap *cap);
+extern int cnss_get_soc_info(struct device *dev, struct cnss_soc_info *info);
+extern void cnss_set_driver_status(enum cnss_driver_status driver_status);
+extern int cnss_request_bus_bandwidth(int bandwidth);
+extern int cnss_set_wlan_unsafe_channel(u16 *unsafe_ch_list, u16 ch_count);
+extern int cnss_get_wlan_unsafe_channel(u16 *unsafe_ch_list, u16 *ch_count,
+ u16 buf_len);
+extern int cnss_wlan_set_dfs_nol(const void *info, u16 info_len);
+extern int cnss_wlan_get_dfs_nol(void *info, u16 info_len);
+extern int cnss_power_up(struct device *dev);
+extern int cnss_power_down(struct device *dev);
+extern u8 *cnss_common_get_wlan_mac_address(struct device *dev, uint32_t *num);
+extern void cnss_request_pm_qos(u32 qos_val);
+extern void cnss_remove_pm_qos(void);
+extern void cnss_lock_pm_sem(void);
+extern void cnss_release_pm_sem(void);
+extern int cnss_wlan_pm_control(bool vote);
+extern int cnss_auto_suspend(void);
+extern int cnss_auto_resume(void);
+extern int cnss_get_user_msi_assignment(struct device *dev, char *user_name,
+ int *num_vectors,
+ uint32_t *user_base_data,
+ uint32_t *base_vector);
+extern int cnss_get_msi_irq(struct device *dev, unsigned int vector);
+extern void cnss_get_msi_address(struct device *dev, uint32_t *msi_addr_low,
+ uint32_t *msi_addr_high);
+extern int cnss_wlan_enable(struct device *dev,
+ struct cnss_wlan_enable_cfg *config,
+ enum cnss_driver_mode mode,
+ const char *host_version);
+extern int cnss_wlan_disable(struct device *dev, enum cnss_driver_mode mode);
+extern unsigned int cnss_get_qmi_timeout(void);
+extern int cnss_athdiag_read(struct device *dev, uint32_t offset,
+ uint32_t mem_type, uint32_t data_len,
+ uint8_t *output);
+extern int cnss_athdiag_write(struct device *dev, uint32_t offset,
+ uint32_t mem_type, uint32_t data_len,
+ uint8_t *input);
+extern int cnss_set_fw_log_mode(struct device *dev, uint8_t fw_log_mode);
+
+#endif /* _NET_CNSS2_H */
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index d18cbafc3455..c81d806e415f 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -947,10 +947,6 @@ struct xfrm_dst {
struct flow_cache_object flo;
struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
int num_pols, num_xfrms;
-#ifdef CONFIG_XFRM_SUB_POLICY
- struct flowi *origin;
- struct xfrm_selector *partner;
-#endif
u32 xfrm_genid;
u32 policy_genid;
u32 route_mtu_cached;
@@ -966,12 +962,6 @@ static inline void xfrm_dst_destroy(struct xfrm_dst *xdst)
dst_release(xdst->route);
if (likely(xdst->u.dst.xfrm))
xfrm_state_put(xdst->u.dst.xfrm);
-#ifdef CONFIG_XFRM_SUB_POLICY
- kfree(xdst->origin);
- xdst->origin = NULL;
- kfree(xdst->partner);
- xdst->partner = NULL;
-#endif
}
#endif
diff --git a/include/soc/qcom/qseecomi.h b/include/soc/qcom/qseecomi.h
index 6497d962e347..e199978302bf 100644
--- a/include/soc/qcom/qseecomi.h
+++ b/include/soc/qcom/qseecomi.h
@@ -336,7 +336,7 @@ __packed struct qseecom_client_send_fsm_key_req {
__packed struct qseecom_continue_blocked_request_ireq {
uint32_t qsee_cmd_id;
- uint32_t app_id;
+ uint32_t app_or_session_id; /*legacy: app_id; smcinvoke: session_id*/
};
@@ -681,6 +681,9 @@ __packed struct qseecom_continue_blocked_request_ireq {
#define TZ_OS_CONTINUE_BLOCKED_REQUEST_ID \
TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_LISTENER, 0x04)
+#define TZ_OS_CONTINUE_BLOCKED_REQUEST_SMCINVOKE_ID \
+ TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_LISTENER, 0x07)
+
#define TZ_OS_CONTINUE_BLOCKED_REQUEST_ID_PARAM_ID \
TZ_SYSCALL_CREATE_PARAM_ID_1(TZ_SYSCALL_PARAM_TYPE_VAL)
diff --git a/include/soc/qcom/scm.h b/include/soc/qcom/scm.h
index ad57eda97f9d..af389305207f 100644
--- a/include/soc/qcom/scm.h
+++ b/include/soc/qcom/scm.h
@@ -95,7 +95,7 @@ struct scm_desc {
u64 x5;
};
-#ifdef CONFIG_QCOM_SCM
+#if defined(CONFIG_QCOM_SCM) || defined(CONFIG_QCOM_SCM_QCPE)
extern int scm_call(u32 svc_id, u32 cmd_id, const void *cmd_buf, size_t cmd_len,
void *resp_buf, size_t resp_len);
@@ -230,7 +230,7 @@ static inline int scm_io_write(phys_addr_t address, u32 val)
return 0;
}
-inline bool scm_is_secure_device(void)
+static inline bool scm_is_secure_device(void)
{
return false;
}
diff --git a/include/sound/apr_audio-v2.h b/include/sound/apr_audio-v2.h
index 74995a0cdbad..2680a13721c2 100644
--- a/include/sound/apr_audio-v2.h
+++ b/include/sound/apr_audio-v2.h
@@ -446,6 +446,88 @@ struct adm_param_data_v5 {
*/
} __packed;
+
+struct param_data_v6 {
+ /* Unique ID of the module. */
+ u32 module_id;
+ /* Unique ID of the instance. */
+ u16 instance_id;
+ /* Reserved for future enhancements.
+ * This field must be set to zero.
+ */
+ u16 reserved;
+ /* Unique ID of the parameter. */
+ u32 param_id;
+ /* Data size of the param_id/module_id combination.
+ * This value is a
+ * multiple of 4 bytes.
+ */
+ u32 param_size;
+} __packed;
+
+/* ADM_CMD_SET_MTMX_STRTR_DEV_PARAMS_V1 command is used to set
+ * calibration data to the ADSP Matrix Mixer the payload is
+ * of struct adm_cmd_set_mtmx_params_v1.
+ *
+ * ADM_CMD_GET_MTMX_STRTR_DEV_PARAMS_V1 can be used to get
+ * the calibration data from the ADSP Matrix Mixer and
+ * ADM_CMDRSP_GET_MTMX_STRTR_DEV_PARAMS_V1 is the response
+ * ioctl to ADM_CMD_GET_MTMX_STRTR_DEV_PARAMS_V1.
+ */
+#define ADM_CMD_SET_MTMX_STRTR_DEV_PARAMS_V1 0x00010367
+#define ADM_CMD_GET_MTMX_STRTR_DEV_PARAMS_V1 0x00010368
+#define ADM_CMDRSP_GET_MTMX_STRTR_DEV_PARAMS_V1 0x00010369
+
+/* Payload of the #define ADM_CMD_SET_MTMX_STRTR_DEV_PARAMS_V1 command.
+ * If the data_payload_addr_lsw and data_payload_addr_msw element
+ * are NULL, a series of struct param_data_v6 structures immediately
+ * follows, whose total size is payload_size bytes.
+ */
+struct adm_cmd_set_mtmx_params_v1 {
+ struct apr_hdr hdr;
+ /* LSW of parameter data payload address.*/
+ u32 payload_addr_lsw;
+
+ /* MSW of parameter data payload address.*/
+ u32 payload_addr_msw;
+
+ /* Memory map handle returned by ADM_CMD_SHARED_MEM_MAP_REGIONS
+ * command.
+ * If mem_map_handle is zero it implies the message is in
+ * the payload
+ */
+ u32 mem_map_handle;
+
+ /* Size in bytes of the variable payload accompanying this
+ * message or in shared memory. This is used for parsing
+ * the parameter payload.
+ */
+ u32 payload_size;
+
+ /* COPP ID/Device ID */
+ u16 copp_id;
+
+ /* For alignment, must be set to 0 */
+ u16 reserved;
+} __packed;
+
+struct enable_param_v6 {
+ /*
+ * Specifies whether the Audio processing module is enabled.
+ * This parameter is generic/common parameter to configure or
+ * determine the state of any audio processing module.
+ */
+ struct param_data_v6 param;
+
+ /* @values 0 : Disable 1: Enable */
+ uint32_t enable;
+} __packed;
+
+/* Defined in ADSP as VOICE_MODULE_TX_STREAM_LIMITER but
+ * used for RX stream limiter on matrix input to ADM.
+ */
+#define ADM_MTMX_MODULE_STREAM_LIMITER 0x00010F15
+
#define ASM_STREAM_CMD_REGISTER_PP_EVENTS 0x00013213
#define ASM_STREAM_PP_EVENT 0x00013214
#define ASM_STREAM_CMD_REGISTER_IEC_61937_FMT_UPDATE 0x13333
@@ -625,6 +707,29 @@ struct audproc_softvolume_params {
*/
#define AUDPROC_PARAM_ID_MFC_OUTPUT_MEDIA_FORMAT 0x00010913
+/* ID of the Channel Mixer module, which is used to configure
+ * channel-mixer related parameters.
+ * This module supports the AUDPROC_CHMIXER_PARAM_ID_COEFF parameter ID.
+ */
+#define AUDPROC_MODULE_ID_CHMIXER 0x00010341
+
+/* ID of the Coefficient parameter used by AUDPROC_MODULE_ID_CHMIXER to
+ *configure the channel mixer weighting coefficients.
+ */
+#define AUDPROC_CHMIXER_PARAM_ID_COEFF 0x00010342
+
+/* Payload of the per-session, per-device parameter data of the
+ * #ADM_CMD_SET_PSPD_MTMX_STRTR_PARAMS_V5 command or
+ * #ADM_CMD_SET_PSPD_MTMX_STRTR_PARAMS_V6 command.
+ * Immediately following this structure are param_size bytes of parameter
+ * data. The structure and size depend on the module_id/param_id pair.
+ */
+struct adm_pspd_param_data_t {
+ uint32_t module_id;
+ uint32_t param_id;
+ uint16_t param_size;
+ uint16_t reserved;
+} __packed;
struct audproc_mfc_output_media_fmt {
struct adm_cmd_set_pp_params_v5 params;
@@ -3902,6 +4007,14 @@ struct asm_softvolume_params {
u32 rampingcurve;
} __packed;
+struct asm_stream_pan_ctrl_params {
+ uint16_t num_output_channels;
+ uint16_t num_input_channels;
+ uint16_t output_channel_map[8];
+ uint16_t input_channel_map[8];
+ uint16_t gain[64];
+} __packed;
+
#define ASM_END_POINT_DEVICE_MATRIX 0
#define PCM_CHANNEL_NULL 0
@@ -7802,6 +7915,48 @@ struct asm_volume_ctrl_lr_chan_gain {
/*< Linear gain in Q13 format for the right channel.*/
} __packed;
+struct audproc_chmixer_param_coeff {
+ uint32_t index;
+ uint16_t num_output_channels;
+ uint16_t num_input_channels;
+} __packed;
+
+
+/* ID of the Multichannel Volume Control parameters used by
+ * AUDPROC_MODULE_ID_VOL_CTRL.
+ */
+#define AUDPROC_PARAM_ID_MULTICHANNEL_GAIN 0x00010713
+
+/* Payload of the AUDPROC_PARAM_ID_MULTICHANNEL_GAIN channel type/gain
+ * pairs used by the Volume Control module.
+ * This structure immediately follows the
+ * audproc_volume_ctrl_multichannel_gain_t structure.
+ */
+struct audproc_volume_ctrl_channel_type_gain_pair {
+ uint8_t channel_type;
+ /* Channel type for which the gain setting is to be applied. */
+
+ uint8_t reserved1;
+ uint8_t reserved2;
+ uint8_t reserved3;
+
+ uint32_t gain;
+ /* Gain value for this channel in Q28 format. */
+} __packed;
+
+/* Payload of the AUDPROC_PARAM_ID_MULTICHANNEL_MUTE parameters used by
+ * the Volume Control module.
+ */
+struct audproc_volume_ctrl_multichannel_gain {
+ uint32_t num_channels;
+ /* Number of channels for which mute configuration is provided. Any
+ * channels present in the data for which mute configuration is not
+ * provided are set to unmute.
+ */
+
+ struct audproc_volume_ctrl_channel_type_gain_pair *gain_data;
+ /* Array of channel type/mute setting pairs. */
+} __packed;
/* Structure for the mute configuration parameter for a
volume control module. */
@@ -9151,7 +9306,6 @@ struct srs_trumedia_params {
} __packed;
/* SRS TruMedia end */
-#define AUDPROC_PARAM_ID_ENABLE 0x00010904
#define ASM_STREAM_POSTPROC_TOPO_ID_SA_PLUS 0x1000FFFF
/* DTS Eagle */
#define AUDPROC_MODULE_ID_DTS_HPX_PREMIX 0x0001077C
diff --git a/include/sound/q6adm-v2.h b/include/sound/q6adm-v2.h
index e689e9357012..65c42ee18914 100644
--- a/include/sound/q6adm-v2.h
+++ b/include/sound/q6adm-v2.h
@@ -51,6 +51,13 @@ enum {
ADM_CLIENT_ID_MAX,
};
+/* ENUM for adm_status & route_status */
+enum adm_status_flags {
+ ADM_STATUS_CALIBRATION_REQUIRED = 0,
+ ADM_STATUS_LIMITER,
+ ADM_STATUS_MAX,
+};
+
#define MAX_COPPS_PER_PORT 0x8
#define ADM_MAX_CHANNELS 8
@@ -61,6 +68,7 @@ struct route_payload {
int app_type[MAX_COPPS_PER_PORT];
int acdb_dev_id[MAX_COPPS_PER_PORT];
int sample_rate[MAX_COPPS_PER_PORT];
+ unsigned long route_status[MAX_COPPS_PER_PORT];
unsigned short num_copps;
unsigned int session_id;
};
@@ -138,9 +146,13 @@ int adm_get_topology_for_port_copp_idx(int port_id, int copp_idx);
int adm_get_indexes_from_copp_id(int copp_id, int *port_idx, int *copp_idx);
-int adm_set_stereo_to_custom_stereo(int port_id, int copp_idx,
- unsigned int session_id,
- char *params, uint32_t params_length);
+int adm_set_pspd_matrix_params(int port_id, int copp_idx,
+ unsigned int session_id,
+ char *params, uint32_t params_length);
+
+int adm_set_downmix_params(int port_id, int copp_idx,
+ unsigned int session_id, char *params,
+ uint32_t params_length);
int adm_get_pp_topo_module_list(int port_id, int copp_idx, int32_t param_length,
char *params);
diff --git a/include/sound/q6asm-v2.h b/include/sound/q6asm-v2.h
index d0dffbd15923..177c2f4da32e 100644
--- a/include/sound/q6asm-v2.h
+++ b/include/sound/q6asm-v2.h
@@ -611,6 +611,14 @@ int q6asm_set_softvolume(struct audio_client *ac,
int q6asm_set_softvolume_v2(struct audio_client *ac,
struct asm_softvolume_params *param, int instance);
+/* Set panning and MFC params */
+int q6asm_set_mfc_panning_params(struct audio_client *ac,
+ struct asm_stream_pan_ctrl_params *pan_param);
+
+/* Set vol gain pair */
+int q6asm_set_vol_ctrl_gain_pair(struct audio_client *ac,
+ struct asm_stream_pan_ctrl_params *pan_param);
+
/* Send left-right channel gain */
int q6asm_set_lrgain(struct audio_client *ac, int left_gain, int right_gain);
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index 927c3626edb7..8b51873e7b08 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -84,6 +84,21 @@ struct drm_msm_ext_panel_hdr_metadata {
};
/**
+ * HDR Control
+ * This encapsulates the HDR metadata as well as a state control
+ * for the HDR metadata as required by the HDMI spec to send the
+ * relevant metadata depending on the state of the HDR playback.
+ * hdr_state: Controls HDR state, takes values ENABLE(1)/DISABLE(0)
+ * hdr_meta: Metadata sent by the userspace for the HDR clip
+ */
+
+#define DRM_MSM_EXT_PANEL_HDR_CTRL
+struct drm_msm_ext_panel_hdr_ctrl {
+ __u8 hdr_state; /* HDR state */
+ struct drm_msm_ext_panel_hdr_metadata hdr_meta; /* HDR metadata */
+};
+
+/**
* HDR sink properties
* These are defined as per EDID spec and shall be used by the userspace
* to determine the HDR properties to be set to the sink.
diff --git a/include/uapi/linux/msm_ipa.h b/include/uapi/linux/msm_ipa.h
index cbd6731aff43..4d0b992d0ba6 100644
--- a/include/uapi/linux/msm_ipa.h
+++ b/include/uapi/linux/msm_ipa.h
@@ -16,61 +16,82 @@
#define IPA_IOC_MAGIC 0xCF
/**
+ * IPA device full path
+ */
+#define IPA_DEV_NAME "/dev/ipa"
+
+/**
+ * IPA NAT table character device name
+ */
+#define IPA_NAT_DEV_NAME "ipaNatTable"
+
+/**
+ * IPA IPv6CT table character device name
+ */
+#define IPA_IPV6CT_DEV_NAME "ipaIpv6CTTable"
+
+ /**
* name of the default routing tables for v4 and v6
*/
#define IPA_DFLT_RT_TBL_NAME "ipa_dflt_rt"
/**
- * the commands supported by IPA driver
- */
-#define IPA_IOCTL_ADD_HDR 0
-#define IPA_IOCTL_DEL_HDR 1
-#define IPA_IOCTL_ADD_RT_RULE 2
-#define IPA_IOCTL_DEL_RT_RULE 3
-#define IPA_IOCTL_ADD_FLT_RULE 4
-#define IPA_IOCTL_DEL_FLT_RULE 5
-#define IPA_IOCTL_COMMIT_HDR 6
-#define IPA_IOCTL_RESET_HDR 7
-#define IPA_IOCTL_COMMIT_RT 8
-#define IPA_IOCTL_RESET_RT 9
-#define IPA_IOCTL_COMMIT_FLT 10
-#define IPA_IOCTL_RESET_FLT 11
-#define IPA_IOCTL_DUMP 12
-#define IPA_IOCTL_GET_RT_TBL 13
-#define IPA_IOCTL_PUT_RT_TBL 14
-#define IPA_IOCTL_COPY_HDR 15
-#define IPA_IOCTL_QUERY_INTF 16
-#define IPA_IOCTL_QUERY_INTF_TX_PROPS 17
-#define IPA_IOCTL_QUERY_INTF_RX_PROPS 18
-#define IPA_IOCTL_GET_HDR 19
-#define IPA_IOCTL_PUT_HDR 20
-#define IPA_IOCTL_SET_FLT 21
-#define IPA_IOCTL_ALLOC_NAT_MEM 22
-#define IPA_IOCTL_V4_INIT_NAT 23
-#define IPA_IOCTL_NAT_DMA 24
-#define IPA_IOCTL_V4_DEL_NAT 26
-#define IPA_IOCTL_PULL_MSG 27
-#define IPA_IOCTL_GET_NAT_OFFSET 28
-#define IPA_IOCTL_RM_ADD_DEPENDENCY 29
-#define IPA_IOCTL_RM_DEL_DEPENDENCY 30
-#define IPA_IOCTL_GENERATE_FLT_EQ 31
-#define IPA_IOCTL_QUERY_INTF_EXT_PROPS 32
-#define IPA_IOCTL_QUERY_EP_MAPPING 33
-#define IPA_IOCTL_QUERY_RT_TBL_INDEX 34
-#define IPA_IOCTL_WRITE_QMAPID 35
-#define IPA_IOCTL_MDFY_FLT_RULE 36
-#define IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_ADD 37
-#define IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_DEL 38
-#define IPA_IOCTL_NOTIFY_WAN_EMBMS_CONNECTED 39
-#define IPA_IOCTL_ADD_HDR_PROC_CTX 40
-#define IPA_IOCTL_DEL_HDR_PROC_CTX 41
-#define IPA_IOCTL_MDFY_RT_RULE 42
-#define IPA_IOCTL_ADD_RT_RULE_AFTER 43
-#define IPA_IOCTL_ADD_FLT_RULE_AFTER 44
-#define IPA_IOCTL_GET_HW_VERSION 45
-#define IPA_IOCTL_ADD_RT_RULE_EXT 46
-#define IPA_IOCTL_NAT_MODIFY_PDN 47
-#define IPA_IOCTL_MAX 48
+ * commands supported by IPA driver
+ */
+#define IPA_IOCTL_ADD_HDR 0
+#define IPA_IOCTL_DEL_HDR 1
+#define IPA_IOCTL_ADD_RT_RULE 2
+#define IPA_IOCTL_DEL_RT_RULE 3
+#define IPA_IOCTL_ADD_FLT_RULE 4
+#define IPA_IOCTL_DEL_FLT_RULE 5
+#define IPA_IOCTL_COMMIT_HDR 6
+#define IPA_IOCTL_RESET_HDR 7
+#define IPA_IOCTL_COMMIT_RT 8
+#define IPA_IOCTL_RESET_RT 9
+#define IPA_IOCTL_COMMIT_FLT 10
+#define IPA_IOCTL_RESET_FLT 11
+#define IPA_IOCTL_DUMP 12
+#define IPA_IOCTL_GET_RT_TBL 13
+#define IPA_IOCTL_PUT_RT_TBL 14
+#define IPA_IOCTL_COPY_HDR 15
+#define IPA_IOCTL_QUERY_INTF 16
+#define IPA_IOCTL_QUERY_INTF_TX_PROPS 17
+#define IPA_IOCTL_QUERY_INTF_RX_PROPS 18
+#define IPA_IOCTL_GET_HDR 19
+#define IPA_IOCTL_PUT_HDR 20
+#define IPA_IOCTL_SET_FLT 21
+#define IPA_IOCTL_ALLOC_NAT_MEM 22
+#define IPA_IOCTL_V4_INIT_NAT 23
+#define IPA_IOCTL_TABLE_DMA_CMD 24
+#define IPA_IOCTL_NAT_DMA IPA_IOCTL_TABLE_DMA_CMD
+#define IPA_IOCTL_INIT_IPV6CT_TABLE 25
+#define IPA_IOCTL_V4_DEL_NAT 26
+#define IPA_IOCTL_PULL_MSG 27
+#define IPA_IOCTL_GET_NAT_OFFSET 28
+#define IPA_IOCTL_RM_ADD_DEPENDENCY 29
+#define IPA_IOCTL_RM_DEL_DEPENDENCY 30
+#define IPA_IOCTL_GENERATE_FLT_EQ 31
+#define IPA_IOCTL_QUERY_INTF_EXT_PROPS 32
+#define IPA_IOCTL_QUERY_EP_MAPPING 33
+#define IPA_IOCTL_QUERY_RT_TBL_INDEX 34
+#define IPA_IOCTL_WRITE_QMAPID 35
+#define IPA_IOCTL_MDFY_FLT_RULE 36
+#define IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_ADD 37
+#define IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_DEL 38
+#define IPA_IOCTL_NOTIFY_WAN_EMBMS_CONNECTED 39
+#define IPA_IOCTL_ADD_HDR_PROC_CTX 40
+#define IPA_IOCTL_DEL_HDR_PROC_CTX 41
+#define IPA_IOCTL_MDFY_RT_RULE 42
+#define IPA_IOCTL_ADD_RT_RULE_AFTER 43
+#define IPA_IOCTL_ADD_FLT_RULE_AFTER 44
+#define IPA_IOCTL_GET_HW_VERSION 45
+#define IPA_IOCTL_ADD_RT_RULE_EXT 46
+#define IPA_IOCTL_NAT_MODIFY_PDN 47
+#define IPA_IOCTL_ALLOC_NAT_TABLE 48
+#define IPA_IOCTL_ALLOC_IPV6CT_TABLE 49
+#define IPA_IOCTL_DEL_NAT_TABLE 50
+#define IPA_IOCTL_DEL_IPV6CT_TABLE 51
+#define IPA_IOCTL_MAX 52
/**
* max size of the header to be inserted
@@ -1314,15 +1335,26 @@ struct ipa_ioc_nat_alloc_mem {
};
/**
- * struct ipa_ioc_v4_nat_init - nat table initialization
- * parameters
+ * struct ipa_ioc_nat_ipv6ct_table_alloc - NAT/IPv6CT table memory allocation
+ * properties
+ * @size: input parameter, size of table in bytes
+ * @offset: output parameter, offset into page in case of system memory
+ */
+struct ipa_ioc_nat_ipv6ct_table_alloc {
+ size_t size;
+ off_t offset;
+};
+
+/**
+ * struct ipa_ioc_v4_nat_init - nat table initialization parameters
* @tbl_index: input parameter, index of the table
* @ipv4_rules_offset: input parameter, ipv4 rules address offset
* @expn_rules_offset: input parameter, ipv4 expansion rules address offset
* @index_offset: input parameter, index rules offset
* @index_expn_offset: input parameter, index expansion rules offset
- * @table_entries: input parameter, ipv4 rules table size in entries
- * @expn_table_entries: input parameter, ipv4 expansion rules table size
+ * @table_entries: input parameter, ipv4 rules table number of entries
+ * @expn_table_entries: input parameter, ipv4 expansion rules table number of
+ * entries
* @ip_addr: input parameter, public ip address
*/
struct ipa_ioc_v4_nat_init {
@@ -1339,6 +1371,23 @@ struct ipa_ioc_v4_nat_init {
};
/**
+ * struct ipa_ioc_ipv6ct_init - IPv6CT table initialization parameters
+ * @tbl_index: input parameter, index of the table
+ * @base_table_offset: input parameter, IPv6CT base table address offset
+ * @expn_table_offset: input parameter, IPv6CT expansion table address offset
+ * @table_entries: input parameter, IPv6CT table number of entries
+ * @expn_table_entries: input parameter, IPv6CT expansion table number of
+ * entries
+ */
+struct ipa_ioc_ipv6ct_init {
+ uint8_t tbl_index;
+ uint32_t base_table_offset;
+ uint32_t expn_table_offset;
+ uint16_t table_entries;
+ uint16_t expn_table_entries;
+};
+
+/**
* struct ipa_ioc_v4_nat_del - nat table delete parameter
* @table_index: input parameter, index of the table
* @public_ip_addr: input parameter, public ip address
@@ -1349,7 +1398,15 @@ struct ipa_ioc_v4_nat_del {
};
/**
- * struct ipa_ioc_nat_dma_one - nat dma command parameter
+ * struct ipa_ioc_nat_ipv6ct_table_del - NAT/IPv6CT table delete parameter
+ * @table_index: input parameter, index of the table
+ */
+struct ipa_ioc_nat_ipv6ct_table_del {
+ uint8_t table_index;
+};
+
+/**
+ * struct ipa_ioc_nat_dma_one - nat/ipv6ct dma command parameter
* @table_index: input parameter, index of the table
* @base_addr: type of table, from which the base address of the table
* can be inferred
@@ -1366,7 +1423,7 @@ struct ipa_ioc_nat_dma_one {
};
/**
- * struct ipa_ioc_nat_dma_cmd - To hold multiple nat dma commands
+ * struct ipa_ioc_nat_dma_cmd - To hold multiple nat/ipv6ct dma commands
* @entries: number of dma commands in use
* @dma: data pointer to the dma commands
*/
@@ -1377,12 +1434,12 @@ struct ipa_ioc_nat_dma_cmd {
};
/**
-* struct ipa_ioc_nat_pdn_entry - PDN entry modification data
-* @pdn_index: index of the entry in the PDN config table to be changed
-* @public_ip: PDN's public ip
-* @src_metadata: PDN's source NAT metadata for metadata replacement
-* @dst_metadata: PDN's destination NAT metadata for metadata replacement
-*/
+ * struct ipa_ioc_nat_pdn_entry - PDN entry modification data
+ * @pdn_index: index of the entry in the PDN config table to be changed
+ * @public_ip: PDN's public ip
+ * @src_metadata: PDN's source NAT metadata for metadata replacement
+ * @dst_metadata: PDN's destination NAT metadata for metadata replacement
+ */
struct ipa_ioc_nat_pdn_entry {
uint8_t pdn_index;
uint32_t public_ip;
@@ -1600,15 +1657,33 @@ enum ipacm_client_enum {
#define IPA_IOC_ALLOC_NAT_MEM _IOWR(IPA_IOC_MAGIC, \
IPA_IOCTL_ALLOC_NAT_MEM, \
struct ipa_ioc_nat_alloc_mem *)
+#define IPA_IOC_ALLOC_NAT_TABLE _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_ALLOC_NAT_TABLE, \
+ struct ipa_ioc_nat_ipv6ct_table_alloc *)
+#define IPA_IOC_ALLOC_IPV6CT_TABLE _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_ALLOC_IPV6CT_TABLE, \
+ struct ipa_ioc_nat_ipv6ct_table_alloc *)
#define IPA_IOC_V4_INIT_NAT _IOWR(IPA_IOC_MAGIC, \
IPA_IOCTL_V4_INIT_NAT, \
struct ipa_ioc_v4_nat_init *)
+#define IPA_IOC_INIT_IPV6CT_TABLE _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_INIT_IPV6CT_TABLE, \
+ struct ipa_ioc_ipv6ct_init *)
#define IPA_IOC_NAT_DMA _IOWR(IPA_IOC_MAGIC, \
IPA_IOCTL_NAT_DMA, \
struct ipa_ioc_nat_dma_cmd *)
+#define IPA_IOC_TABLE_DMA_CMD _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_TABLE_DMA_CMD, \
+ struct ipa_ioc_nat_dma_cmd *)
#define IPA_IOC_V4_DEL_NAT _IOWR(IPA_IOC_MAGIC, \
IPA_IOCTL_V4_DEL_NAT, \
struct ipa_ioc_v4_nat_del *)
+#define IPA_IOC_DEL_NAT_TABLE _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_DEL_NAT_TABLE, \
+ struct ipa_ioc_nat_ipv6ct_table_del *)
+#define IPA_IOC_DEL_IPV6CT_TABLE _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_DEL_IPV6CT_TABLE, \
+ struct ipa_ioc_nat_ipv6ct_table_del *)
#define IPA_IOC_GET_NAT_OFFSET _IOWR(IPA_IOC_MAGIC, \
IPA_IOCTL_GET_NAT_OFFSET, \
uint32_t *)
diff --git a/include/uapi/media/msm_camsensor_sdk.h b/include/uapi/media/msm_camsensor_sdk.h
index 83927c614e91..a92c144f712e 100644
--- a/include/uapi/media/msm_camsensor_sdk.h
+++ b/include/uapi/media/msm_camsensor_sdk.h
@@ -113,8 +113,10 @@ enum msm_sensor_power_seq_gpio_t {
SENSOR_GPIO_FL_RESET,
SENSOR_GPIO_CUSTOM1,
SENSOR_GPIO_CUSTOM2,
+ SENSOR_GPIO_CUSTOM3,
SENSOR_GPIO_MAX,
};
+#define SENSOR_GPIO_CUSTOM3 SENSOR_GPIO_CUSTOM3
enum msm_ir_cut_filter_gpio_t {
IR_CUT_FILTER_GPIO_P = 0,
diff --git a/include/uapi/sound/devdep_params.h b/include/uapi/sound/devdep_params.h
index 5061ec0da356..9e3133b76c68 100644
--- a/include/uapi/sound/devdep_params.h
+++ b/include/uapi/sound/devdep_params.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2015,2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -66,4 +66,14 @@ struct dts_eagle_param_desc {
uint32_t device;
} __packed;
+#define HWDEP_FE_BASE 3000 /*unique base for FE hw dep nodes*/
+struct snd_pcm_mmap_fd {
+ int32_t dir;
+ int32_t fd;
+ int32_t size;
+ int32_t actual_size;
+};
+
+#define SNDRV_PCM_IOCTL_MMAP_DATA_FD _IOWR('U', 0xd2, struct snd_pcm_mmap_fd)
+
#endif
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 1a26ef5b7d58..1d6b0a209bc0 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -375,6 +375,21 @@ static int _cpu_down(unsigned int cpu, int tasks_frozen)
goto out_release;
}
+ /*
+ * By now we've cleared cpu_active_mask, wait for all preempt-disabled
+ * and RCU users of this state to go away such that all new such users
+ * will observe it.
+ *
+ * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
+ * not imply sync_sched(), so wait for both.
+ *
+ * Do sync before park smpboot threads to take care the rcu boost case.
+ */
+ if (IS_ENABLED(CONFIG_PREEMPT))
+ synchronize_rcu_mult(call_rcu, call_rcu_sched);
+ else
+ synchronize_rcu();
+
smpboot_park_threads(cpu);
/*
@@ -508,8 +523,8 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen)
ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
if (ret) {
nr_calls--;
- pr_warn("%s: attempt to bring up CPU %u failed\n",
- __func__, cpu);
+ pr_warn_ratelimited("%s: attempt to bring up CPU %u failed\n",
+ __func__, cpu);
goto out_notify;
}
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index a24c5b909047..b05509af0352 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -114,6 +114,11 @@ static ssize_t write_irq_affinity(int type, struct file *file,
goto free_cpumask;
}
+ if (cpumask_subset(new_value, cpu_isolated_mask)) {
+ err = -EINVAL;
+ goto free_cpumask;
+ }
+
/*
* Do not allow disabling IRQs completely - it's a too easy
* way to make the system unusable accidentally :-) At least
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
index a4d4de05b2d1..75c950ede9c7 100644
--- a/kernel/locking/rwsem-xadd.c
+++ b/kernel/locking/rwsem-xadd.c
@@ -511,6 +511,41 @@ struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
unsigned long flags;
/*
+ * If a spinner is present, there is a chance that the load of
+ * rwsem_has_spinner() in rwsem_wake() can be reordered with
+ * respect to decrement of rwsem count in __up_write() leading
+ * to wakeup being missed.
+ *
+ * spinning writer up_write caller
+ * --------------- -----------------------
+ * [S] osq_unlock() [L] osq
+ * spin_lock(wait_lock)
+ * sem->count=0xFFFFFFFF00000001
+ * +0xFFFFFFFF00000000
+ * count=sem->count
+ * MB
+ * sem->count=0xFFFFFFFE00000001
+ * -0xFFFFFFFF00000001
+ * RMB
+ * spin_trylock(wait_lock)
+ * return
+ * rwsem_try_write_lock(count)
+ * spin_unlock(wait_lock)
+ * schedule()
+ *
+ * Reordering of atomic_long_sub_return_release() in __up_write()
+ * and rwsem_has_spinner() in rwsem_wake() can cause missing of
+ * wakeup in up_write() context. In spinning writer, sem->count
+ * and local variable count is 0XFFFFFFFE00000001. It would result
+ * in rwsem_try_write_lock() failing to acquire rwsem and spinning
+ * writer going to sleep in rwsem_down_write_failed().
+ *
+ * The smp_rmb() here is to make sure that the spinner state is
+ * consulted after sem->count is updated in up_write context.
+ */
+ smp_rmb();
+
+ /*
* If a spinner is present, it is not necessary to do the wakeup.
* Try to do wakeup only if the trylock succeeds to minimize
* spinlock contention which may introduce too much delay in the
diff --git a/kernel/panic.c b/kernel/panic.c
index 982a52352cfc..679254405510 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -172,7 +172,7 @@ void panic(const char *fmt, ...)
* Delay timeout seconds before rebooting the machine.
* We can't use the "normal" timers since we just panicked.
*/
- pr_emerg("Rebooting in %d seconds..", panic_timeout);
+ pr_emerg("Rebooting in %d seconds..\n", panic_timeout);
for (i = 0; i < panic_timeout * 1000; i += PANIC_TIMER_STEP) {
touch_nmi_watchdog();
diff --git a/kernel/sched/loadavg.c b/kernel/sched/loadavg.c
index b0b93fd33af9..f8e8d68ed3fd 100644
--- a/kernel/sched/loadavg.c
+++ b/kernel/sched/loadavg.c
@@ -201,8 +201,9 @@ void calc_load_exit_idle(void)
struct rq *this_rq = this_rq();
/*
- * If we're still before the sample window, we're done.
+ * If we're still before the pending sample window, we're done.
*/
+ this_rq->calc_load_update = calc_load_update;
if (time_before(jiffies, this_rq->calc_load_update))
return;
@@ -211,7 +212,6 @@ void calc_load_exit_idle(void)
* accounted through the nohz accounting, so skip the entire deal and
* sync up for the next window.
*/
- this_rq->calc_load_update = calc_load_update;
if (time_before(jiffies, this_rq->calc_load_update + 10))
this_rq->calc_load_update += LOAD_FREQ;
}
diff --git a/kernel/signal.c b/kernel/signal.c
index f3f1f7a972fd..b92a047ddc82 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -503,7 +503,8 @@ int unhandled_signal(struct task_struct *tsk, int sig)
return !tsk->ptrace;
}
-static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
+static void collect_signal(int sig, struct sigpending *list, siginfo_t *info,
+ bool *resched_timer)
{
struct sigqueue *q, *first = NULL;
@@ -525,6 +526,12 @@ static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
still_pending:
list_del_init(&first->list);
copy_siginfo(info, &first->info);
+
+ *resched_timer =
+ (first->flags & SIGQUEUE_PREALLOC) &&
+ (info->si_code == SI_TIMER) &&
+ (info->si_sys_private);
+
__sigqueue_free(first);
} else {
/*
@@ -541,12 +548,12 @@ still_pending:
}
static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
- siginfo_t *info)
+ siginfo_t *info, bool *resched_timer)
{
int sig = next_signal(pending, mask);
if (sig)
- collect_signal(sig, pending, info);
+ collect_signal(sig, pending, info, resched_timer);
return sig;
}
@@ -558,15 +565,16 @@ static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
*/
int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
{
+ bool resched_timer = false;
int signr;
/* We only dequeue private signals from ourselves, we don't let
* signalfd steal them
*/
- signr = __dequeue_signal(&tsk->pending, mask, info);
+ signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
if (!signr) {
signr = __dequeue_signal(&tsk->signal->shared_pending,
- mask, info);
+ mask, info, &resched_timer);
/*
* itimer signal ?
*
@@ -611,7 +619,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
*/
current->jobctl |= JOBCTL_STOP_DEQUEUED;
}
- if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
+ if (resched_timer) {
/*
* Release the siglock to ensure proper locking order
* of timer locks outside of siglocks. Note, we leave
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 5fa544f3f560..738f3467d169 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -116,6 +116,26 @@ static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
tk->offs_boot = ktime_add(tk->offs_boot, delta);
}
+/*
+ * tk_clock_read - atomic clocksource read() helper
+ *
+ * This helper is necessary to use in the read paths because, while the
+ * seqlock ensures we don't return a bad value while structures are updated,
+ * it doesn't protect from potential crashes. There is the possibility that
+ * the tkr's clocksource may change between the read reference, and the
+ * clock reference passed to the read function. This can cause crashes if
+ * the wrong clocksource is passed to the wrong read function.
+ * This isn't necessary to use when holding the timekeeper_lock or doing
+ * a read of the fast-timekeeper tkrs (which is protected by its own locking
+ * and update logic).
+ */
+static inline u64 tk_clock_read(struct tk_read_base *tkr)
+{
+ struct clocksource *clock = READ_ONCE(tkr->clock);
+
+ return clock->read(clock);
+}
+
#ifdef CONFIG_DEBUG_TIMEKEEPING
#define WARNING_FREQ (HZ*300) /* 5 minute rate-limiting */
@@ -173,7 +193,7 @@ static inline cycle_t timekeeping_get_delta(struct tk_read_base *tkr)
*/
do {
seq = read_seqcount_begin(&tk_core.seq);
- now = tkr->read(tkr->clock);
+ now = tk_clock_read(tkr);
last = tkr->cycle_last;
mask = tkr->mask;
max = tkr->clock->max_cycles;
@@ -207,7 +227,7 @@ static inline cycle_t timekeeping_get_delta(struct tk_read_base *tkr)
cycle_t cycle_now, delta;
/* read clocksource */
- cycle_now = tkr->read(tkr->clock);
+ cycle_now = tk_clock_read(tkr);
/* calculate the delta since the last update_wall_time */
delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask);
@@ -235,12 +255,10 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
old_clock = tk->tkr_mono.clock;
tk->tkr_mono.clock = clock;
- tk->tkr_mono.read = clock->read;
tk->tkr_mono.mask = clock->mask;
- tk->tkr_mono.cycle_last = tk->tkr_mono.read(clock);
+ tk->tkr_mono.cycle_last = tk_clock_read(&tk->tkr_mono);
tk->tkr_raw.clock = clock;
- tk->tkr_raw.read = clock->read;
tk->tkr_raw.mask = clock->mask;
tk->tkr_raw.cycle_last = tk->tkr_mono.cycle_last;
@@ -404,7 +422,7 @@ static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
now += timekeeping_delta_to_ns(tkr,
clocksource_delta(
- tkr->read(tkr->clock),
+ tk_clock_read(tkr),
tkr->cycle_last,
tkr->mask));
} while (read_seqcount_retry(&tkf->seq, seq));
@@ -461,6 +479,10 @@ static cycle_t dummy_clock_read(struct clocksource *cs)
return cycles_at_suspend;
}
+static struct clocksource dummy_clock = {
+ .read = dummy_clock_read,
+};
+
/**
* halt_fast_timekeeper - Prevent fast timekeeper from accessing clocksource.
* @tk: Timekeeper to snapshot.
@@ -477,13 +499,13 @@ static void halt_fast_timekeeper(struct timekeeper *tk)
struct tk_read_base *tkr = &tk->tkr_mono;
memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
- cycles_at_suspend = tkr->read(tkr->clock);
- tkr_dummy.read = dummy_clock_read;
+ cycles_at_suspend = tk_clock_read(tkr);
+ tkr_dummy.clock = &dummy_clock;
update_fast_timekeeper(&tkr_dummy, &tk_fast_mono);
tkr = &tk->tkr_raw;
memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
- tkr_dummy.read = dummy_clock_read;
+ tkr_dummy.clock = &dummy_clock;
update_fast_timekeeper(&tkr_dummy, &tk_fast_raw);
}
@@ -647,11 +669,10 @@ static void timekeeping_update(struct timekeeper *tk, unsigned int action)
*/
static void timekeeping_forward_now(struct timekeeper *tk)
{
- struct clocksource *clock = tk->tkr_mono.clock;
cycle_t cycle_now, delta;
s64 nsec;
- cycle_now = tk->tkr_mono.read(clock);
+ cycle_now = tk_clock_read(&tk->tkr_mono);
delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
tk->tkr_mono.cycle_last = cycle_now;
tk->tkr_raw.cycle_last = cycle_now;
@@ -1434,7 +1455,7 @@ void timekeeping_resume(void)
* The less preferred source will only be tried if there is no better
* usable source. The rtc part is handled separately in rtc core code.
*/
- cycle_now = tk->tkr_mono.read(clock);
+ cycle_now = tk_clock_read(&tk->tkr_mono);
if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) &&
cycle_now > tk->tkr_mono.cycle_last) {
u64 num, max = ULLONG_MAX;
@@ -1829,7 +1850,7 @@ void update_wall_time(void)
#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
offset = real_tk->cycle_interval;
#else
- offset = clocksource_delta(tk->tkr_mono.read(tk->tkr_mono.clock),
+ offset = clocksource_delta(tk_clock_read(&tk->tkr_mono),
tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
#endif
diff --git a/lib/Kconfig b/lib/Kconfig
index 7f6feb7a4687..4bb61d2e6746 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -547,4 +547,8 @@ config QMI_ENCDEC_DEBUG
library. This will log the information regarding the element
and message being encoded & decoded.
+config STACKDEPOT
+ bool
+ select STACKTRACE
+
endmenu
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 20b74735508d..6da96c4f98a5 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -244,6 +244,7 @@ config PAGE_OWNER
depends on DEBUG_KERNEL && STACKTRACE_SUPPORT
select DEBUG_FS
select STACKTRACE
+ select STACKDEPOT
select PAGE_EXTENSION
help
This keeps track of what call chain is the owner of a page, may
diff --git a/lib/Makefile b/lib/Makefile
index 03a447a234cc..3ea641eb91bc 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -167,6 +167,8 @@ obj-$(CONFIG_GENERIC_NET_UTILS) += net_utils.o
obj-$(CONFIG_SG_SPLIT) += sg_split.o
obj-$(CONFIG_STMP_DEVICE) += stmp_device.o
+obj-$(CONFIG_STACKDEPOT) += stackdepot.o
+
libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o \
fdt_empty_tree.o
$(foreach file, $(libfdt_files), \
diff --git a/lib/cmdline.c b/lib/cmdline.c
index 8f13cf73c2ec..79069d7938ea 100644
--- a/lib/cmdline.c
+++ b/lib/cmdline.c
@@ -22,14 +22,14 @@
* the values[M, M+1, ..., N] into the ints array in get_options.
*/
-static int get_range(char **str, int *pint)
+static int get_range(char **str, int *pint, int n)
{
int x, inc_counter, upper_range;
(*str)++;
upper_range = simple_strtol((*str), NULL, 0);
inc_counter = upper_range - *pint;
- for (x = *pint; x < upper_range; x++)
+ for (x = *pint; n && x < upper_range; x++, n--)
*pint++ = x;
return inc_counter;
}
@@ -96,7 +96,7 @@ char *get_options(const char *str, int nints, int *ints)
break;
if (res == 3) {
int range_nums;
- range_nums = get_range((char **)&str, ints + i);
+ range_nums = get_range((char **)&str, ints + i, nints - i);
if (range_nums < 0)
break;
/*
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
new file mode 100644
index 000000000000..192134b225ca
--- /dev/null
+++ b/lib/stackdepot.c
@@ -0,0 +1,284 @@
+/*
+ * Generic stack depot for storing stack traces.
+ *
+ * Some debugging tools need to save stack traces of certain events which can
+ * be later presented to the user. For example, KASAN needs to safe alloc and
+ * free stacks for each object, but storing two stack traces per object
+ * requires too much memory (e.g. SLUB_DEBUG needs 256 bytes per object for
+ * that).
+ *
+ * Instead, stack depot maintains a hashtable of unique stacktraces. Since alloc
+ * and free stacks repeat a lot, we save about 100x space.
+ * Stacks are never removed from depot, so we store them contiguously one after
+ * another in a contiguos memory allocation.
+ *
+ * Author: Alexander Potapenko <glider@google.com>
+ * Copyright (C) 2016 Google, Inc.
+ *
+ * Based on code by Dmitry Chernenkov.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ */
+
+#include <linux/gfp.h>
+#include <linux/jhash.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/percpu.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include <linux/stacktrace.h>
+#include <linux/stackdepot.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#define DEPOT_STACK_BITS (sizeof(depot_stack_handle_t) * 8)
+
+#define STACK_ALLOC_NULL_PROTECTION_BITS 1
+#define STACK_ALLOC_ORDER 2 /* 'Slab' size order for stack depot, 4 pages */
+#define STACK_ALLOC_SIZE (1LL << (PAGE_SHIFT + STACK_ALLOC_ORDER))
+#define STACK_ALLOC_ALIGN 4
+#define STACK_ALLOC_OFFSET_BITS (STACK_ALLOC_ORDER + PAGE_SHIFT - \
+ STACK_ALLOC_ALIGN)
+#define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - \
+ STACK_ALLOC_NULL_PROTECTION_BITS - STACK_ALLOC_OFFSET_BITS)
+#define STACK_ALLOC_SLABS_CAP 1024
+#define STACK_ALLOC_MAX_SLABS \
+ (((1LL << (STACK_ALLOC_INDEX_BITS)) < STACK_ALLOC_SLABS_CAP) ? \
+ (1LL << (STACK_ALLOC_INDEX_BITS)) : STACK_ALLOC_SLABS_CAP)
+
+/* The compact structure to store the reference to stacks. */
+union handle_parts {
+ depot_stack_handle_t handle;
+ struct {
+ u32 slabindex : STACK_ALLOC_INDEX_BITS;
+ u32 offset : STACK_ALLOC_OFFSET_BITS;
+ u32 valid : STACK_ALLOC_NULL_PROTECTION_BITS;
+ };
+};
+
+struct stack_record {
+ struct stack_record *next; /* Link in the hashtable */
+ u32 hash; /* Hash in the hastable */
+ u32 size; /* Number of frames in the stack */
+ union handle_parts handle;
+ unsigned long entries[1]; /* Variable-sized array of entries. */
+};
+
+static void *stack_slabs[STACK_ALLOC_MAX_SLABS];
+
+static int depot_index;
+static int next_slab_inited;
+static size_t depot_offset;
+static DEFINE_SPINLOCK(depot_lock);
+
+static bool init_stack_slab(void **prealloc)
+{
+ if (!*prealloc)
+ return false;
+ /*
+ * This smp_load_acquire() pairs with smp_store_release() to
+ * |next_slab_inited| below and in depot_alloc_stack().
+ */
+ if (smp_load_acquire(&next_slab_inited))
+ return true;
+ if (stack_slabs[depot_index] == NULL) {
+ stack_slabs[depot_index] = *prealloc;
+ } else {
+ stack_slabs[depot_index + 1] = *prealloc;
+ /*
+ * This smp_store_release pairs with smp_load_acquire() from
+ * |next_slab_inited| above and in depot_save_stack().
+ */
+ smp_store_release(&next_slab_inited, 1);
+ }
+ *prealloc = NULL;
+ return true;
+}
+
+/* Allocation of a new stack in raw storage */
+static struct stack_record *depot_alloc_stack(unsigned long *entries, int size,
+ u32 hash, void **prealloc, gfp_t alloc_flags)
+{
+ int required_size = offsetof(struct stack_record, entries) +
+ sizeof(unsigned long) * size;
+ struct stack_record *stack;
+
+ required_size = ALIGN(required_size, 1 << STACK_ALLOC_ALIGN);
+
+ if (unlikely(depot_offset + required_size > STACK_ALLOC_SIZE)) {
+ if (unlikely(depot_index + 1 >= STACK_ALLOC_MAX_SLABS)) {
+ WARN_ONCE(1, "Stack depot reached limit capacity");
+ return NULL;
+ }
+ depot_index++;
+ depot_offset = 0;
+ /*
+ * smp_store_release() here pairs with smp_load_acquire() from
+ * |next_slab_inited| in depot_save_stack() and
+ * init_stack_slab().
+ */
+ if (depot_index + 1 < STACK_ALLOC_MAX_SLABS)
+ smp_store_release(&next_slab_inited, 0);
+ }
+ init_stack_slab(prealloc);
+ if (stack_slabs[depot_index] == NULL)
+ return NULL;
+
+ stack = stack_slabs[depot_index] + depot_offset;
+
+ stack->hash = hash;
+ stack->size = size;
+ stack->handle.slabindex = depot_index;
+ stack->handle.offset = depot_offset >> STACK_ALLOC_ALIGN;
+ stack->handle.valid = 1;
+ memcpy(stack->entries, entries, size * sizeof(unsigned long));
+ depot_offset += required_size;
+
+ return stack;
+}
+
+#define STACK_HASH_ORDER 18
+#define STACK_HASH_SIZE (1L << STACK_HASH_ORDER)
+#define STACK_HASH_MASK (STACK_HASH_SIZE - 1)
+#define STACK_HASH_SEED 0x9747b28c
+
+static struct stack_record *stack_table[STACK_HASH_SIZE] = {
+ [0 ... STACK_HASH_SIZE - 1] = NULL
+};
+
+/* Calculate hash for a stack */
+static inline u32 hash_stack(unsigned long *entries, unsigned int size)
+{
+ return jhash2((u32 *)entries,
+ size * sizeof(unsigned long) / sizeof(u32),
+ STACK_HASH_SEED);
+}
+
+/* Find a stack that is equal to the one stored in entries in the hash */
+static inline struct stack_record *find_stack(struct stack_record *bucket,
+ unsigned long *entries, int size,
+ u32 hash)
+{
+ struct stack_record *found;
+
+ for (found = bucket; found; found = found->next) {
+ if (found->hash == hash &&
+ found->size == size &&
+ !memcmp(entries, found->entries,
+ size * sizeof(unsigned long))) {
+ return found;
+ }
+ }
+ return NULL;
+}
+
+void depot_fetch_stack(depot_stack_handle_t handle, struct stack_trace *trace)
+{
+ union handle_parts parts = { .handle = handle };
+ void *slab = stack_slabs[parts.slabindex];
+ size_t offset = parts.offset << STACK_ALLOC_ALIGN;
+ struct stack_record *stack = slab + offset;
+
+ trace->nr_entries = trace->max_entries = stack->size;
+ trace->entries = stack->entries;
+ trace->skip = 0;
+}
+
+/**
+ * depot_save_stack - save stack in a stack depot.
+ * @trace - the stacktrace to save.
+ * @alloc_flags - flags for allocating additional memory if required.
+ *
+ * Returns the handle of the stack struct stored in depot.
+ */
+depot_stack_handle_t depot_save_stack(struct stack_trace *trace,
+ gfp_t alloc_flags)
+{
+ u32 hash;
+ depot_stack_handle_t retval = 0;
+ struct stack_record *found = NULL, **bucket;
+ unsigned long flags;
+ struct page *page = NULL;
+ void *prealloc = NULL;
+
+ if (unlikely(trace->nr_entries == 0))
+ goto fast_exit;
+
+ hash = hash_stack(trace->entries, trace->nr_entries);
+ bucket = &stack_table[hash & STACK_HASH_MASK];
+
+ /*
+ * Fast path: look the stack trace up without locking.
+ * The smp_load_acquire() here pairs with smp_store_release() to
+ * |bucket| below.
+ */
+ found = find_stack(smp_load_acquire(bucket), trace->entries,
+ trace->nr_entries, hash);
+ if (found)
+ goto exit;
+
+ /*
+ * Check if the current or the next stack slab need to be initialized.
+ * If so, allocate the memory - we won't be able to do that under the
+ * lock.
+ *
+ * The smp_load_acquire() here pairs with smp_store_release() to
+ * |next_slab_inited| in depot_alloc_stack() and init_stack_slab().
+ */
+ if (unlikely(!smp_load_acquire(&next_slab_inited))) {
+ /*
+ * Zero out zone modifiers, as we don't have specific zone
+ * requirements. Keep the flags related to allocation in atomic
+ * contexts and I/O.
+ */
+ alloc_flags &= ~GFP_ZONEMASK;
+ alloc_flags &= (GFP_ATOMIC | GFP_KERNEL);
+ page = alloc_pages(alloc_flags, STACK_ALLOC_ORDER);
+ if (page)
+ prealloc = page_address(page);
+ }
+
+ spin_lock_irqsave(&depot_lock, flags);
+
+ found = find_stack(*bucket, trace->entries, trace->nr_entries, hash);
+ if (!found) {
+ struct stack_record *new =
+ depot_alloc_stack(trace->entries, trace->nr_entries,
+ hash, &prealloc, alloc_flags);
+ if (new) {
+ new->next = *bucket;
+ /*
+ * This smp_store_release() pairs with
+ * smp_load_acquire() from |bucket| above.
+ */
+ smp_store_release(bucket, new);
+ found = new;
+ }
+ } else if (prealloc) {
+ /*
+ * We didn't need to store this stack trace, but let's keep
+ * the preallocated memory for the future.
+ */
+ WARN_ON(!init_stack_slab(&prealloc));
+ }
+
+ spin_unlock_irqrestore(&depot_lock, flags);
+exit:
+ if (prealloc) {
+ /* Nobody used this memory, ok to free it. */
+ free_pages((unsigned long)prealloc, STACK_ALLOC_ORDER);
+ }
+ if (found)
+ retval = found->handle.handle;
+fast_exit:
+ return retval;
+}
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 76f29ecba8f4..771234d050c7 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -452,11 +452,11 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
: 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
/*
- * For mappings greater than a page, we limit the stride (and
- * hence alignment) to a page size.
+ * For mappings greater than or equal to a page, we limit the stride
+ * (and hence alignment) to a page size.
*/
nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
- if (size > PAGE_SIZE)
+ if (size >= PAGE_SIZE)
stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
else
stride = 1;
diff --git a/mm/compaction.c b/mm/compaction.c
index 87ed50835881..f96a58e1843a 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -19,6 +19,7 @@
#include <linux/kasan.h>
#include <linux/kthread.h>
#include <linux/freezer.h>
+#include <linux/page_owner.h>
#include "internal.h"
#ifdef CONFIG_COMPACTION
@@ -59,13 +60,27 @@ static unsigned long release_freepages(struct list_head *freelist)
static void map_pages(struct list_head *list)
{
- struct page *page;
+ unsigned int i, order, nr_pages;
+ struct page *page, *next;
+ LIST_HEAD(tmp_list);
+
+ list_for_each_entry_safe(page, next, list, lru) {
+ list_del(&page->lru);
- list_for_each_entry(page, list, lru) {
- kasan_alloc_pages(page, 0);
- arch_alloc_page(page, 0);
- kernel_map_pages(page, 1, 1);
+ order = page_private(page);
+ nr_pages = 1 << order;
+
+ post_alloc_hook(page, order, __GFP_MOVABLE);
+ if (order)
+ split_page(page, order);
+
+ for (i = 0; i < nr_pages; i++) {
+ list_add(&page->lru, &tmp_list);
+ page++;
+ }
}
+
+ list_splice(&tmp_list, list);
}
static inline bool migrate_async_suitable(int migratetype)
@@ -442,12 +457,13 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
unsigned long flags = 0;
bool locked = false;
unsigned long blockpfn = *start_pfn;
+ unsigned int order;
cursor = pfn_to_page(blockpfn);
/* Isolate free pages. */
for (; blockpfn < end_pfn; blockpfn++, cursor++) {
- int isolated, i;
+ int isolated;
struct page *page = cursor;
/*
@@ -513,17 +529,17 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
goto isolate_fail;
}
- /* Found a free page, break it into order-0 pages */
- isolated = split_free_page(page);
+ /* Found a free page, will break it into order-0 pages */
+ order = page_order(page);
+ isolated = __isolate_free_page(page, order);
if (!isolated)
break;
+ set_page_private(page, order);
total_isolated += isolated;
cc->nr_freepages += isolated;
- for (i = 0; i < isolated; i++) {
- list_add(&page->lru, freelist);
- page++;
- }
+ list_add_tail(&page->lru, freelist);
+
if (!strict && cc->nr_migratepages <= cc->nr_freepages) {
blockpfn += isolated;
break;
@@ -636,7 +652,7 @@ isolate_freepages_range(struct compact_control *cc,
*/
}
- /* split_free_page does not map the pages */
+ /* __isolate_free_page() does not map the pages */
map_pages(&freelist);
if (pfn < end_pfn) {
@@ -1085,7 +1101,7 @@ static void isolate_freepages(struct compact_control *cc)
}
}
- /* split_free_page does not map the pages */
+ /* __isolate_free_page() does not map the pages */
map_pages(freelist);
/*
diff --git a/mm/debug.c b/mm/debug.c
index 5cf26f8c69a3..3621385c09ac 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -9,6 +9,18 @@
#include <linux/mm.h>
#include <linux/trace_events.h>
#include <linux/memcontrol.h>
+#include <linux/migrate.h>
+#include <linux/page_owner.h>
+
+char *migrate_reason_names[MR_TYPES] = {
+ "compaction",
+ "memory_failure",
+ "memory_hotplug",
+ "syscall_or_cpuset",
+ "mempolicy_mbind",
+ "numa_misplaced",
+ "cma",
+};
static const struct trace_print_flags pageflag_names[] = {
{1UL << PG_locked, "locked" },
@@ -106,6 +118,7 @@ void dump_page_badflags(struct page *page, const char *reason,
void dump_page(struct page *page, const char *reason)
{
dump_page_badflags(page, reason, 0);
+ dump_page_owner(page);
}
EXPORT_SYMBOL(dump_page);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 47b469663822..6c6f5ccfcda1 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1363,8 +1363,11 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
*/
if (unlikely(pmd_trans_migrating(*pmdp))) {
page = pmd_page(*pmdp);
+ if (!get_page_unless_zero(page))
+ goto out_unlock;
spin_unlock(ptl);
wait_on_page_locked(page);
+ put_page(page);
goto out;
}
@@ -1396,8 +1399,11 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
/* Migration could have started since the pmd_trans_migrating check */
if (!page_locked) {
+ if (!get_page_unless_zero(page))
+ goto out_unlock;
spin_unlock(ptl);
wait_on_page_locked(page);
+ put_page(page);
page_nid = -1;
goto out;
}
diff --git a/mm/internal.h b/mm/internal.h
index 7b9e313d9dea..46d27f378885 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -182,6 +182,8 @@ extern void prep_compound_page(struct page *page, unsigned int order);
#ifdef CONFIG_MEMORY_FAILURE
extern bool is_free_buddy_page(struct page *page);
#endif
+extern void post_alloc_hook(struct page *page, unsigned int order,
+ gfp_t gfp_flags);
extern int user_min_free_kbytes;
#if defined CONFIG_COMPACTION || defined CONFIG_CMA
diff --git a/mm/list_lru.c b/mm/list_lru.c
index 5d8dffd5b57c..786176b1a0ee 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -117,6 +117,7 @@ bool list_lru_add(struct list_lru *lru, struct list_head *item)
l = list_lru_from_kmem(nlru, item);
list_add_tail(item, &l->list);
l->nr_items++;
+ nlru->nr_items++;
spin_unlock(&nlru->lock);
return true;
}
@@ -136,6 +137,7 @@ bool list_lru_del(struct list_lru *lru, struct list_head *item)
l = list_lru_from_kmem(nlru, item);
list_del_init(item);
l->nr_items--;
+ nlru->nr_items--;
spin_unlock(&nlru->lock);
return true;
}
@@ -183,15 +185,10 @@ EXPORT_SYMBOL_GPL(list_lru_count_one);
unsigned long list_lru_count_node(struct list_lru *lru, int nid)
{
- long count = 0;
- int memcg_idx;
+ struct list_lru_node *nlru;
- count += __list_lru_count_one(lru, nid, -1);
- if (list_lru_memcg_aware(lru)) {
- for_each_memcg_cache_index(memcg_idx)
- count += __list_lru_count_one(lru, nid, memcg_idx);
- }
- return count;
+ nlru = &lru->node[nid];
+ return nlru->nr_items;
}
EXPORT_SYMBOL_GPL(list_lru_count_node);
@@ -226,6 +223,7 @@ restart:
assert_spin_locked(&nlru->lock);
case LRU_REMOVED:
isolated++;
+ nlru->nr_items--;
/*
* If the lru lock has been dropped, our list
* traversal is now invalid and so we have to
diff --git a/mm/migrate.c b/mm/migrate.c
index 9a5ccfc71afc..85af2816b6d2 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -39,6 +39,7 @@
#include <linux/balloon_compaction.h>
#include <linux/mmu_notifier.h>
#include <linux/page_idle.h>
+#include <linux/page_owner.h>
#include <asm/tlbflush.h>
@@ -668,6 +669,8 @@ void migrate_page_copy(struct page *newpage, struct page *page)
*/
if (PageWriteback(newpage))
end_page_writeback(newpage);
+
+ copy_page_owner(page, newpage);
}
EXPORT_SYMBOL(migrate_page_copy);
@@ -1097,6 +1100,9 @@ static ICE_noinline int unmap_and_move(new_page_t get_new_page,
goto out;
rc = __unmap_and_move(page, newpage, force, mode);
+ if (rc == MIGRATEPAGE_SUCCESS) {
+ set_page_owner_migrate_reason(newpage, reason);
+ }
out:
if (rc != -EAGAIN) {
@@ -1179,7 +1185,7 @@ put_new:
static int unmap_and_move_huge_page(new_page_t get_new_page,
free_page_t put_new_page, unsigned long private,
struct page *hpage, int force,
- enum migrate_mode mode)
+ enum migrate_mode mode, int reason)
{
int rc = -EAGAIN;
int *result = NULL;
@@ -1237,6 +1243,7 @@ put_anon:
if (rc == MIGRATEPAGE_SUCCESS) {
hugetlb_cgroup_migrate(hpage, new_hpage);
put_new_page = NULL;
+ set_page_owner_migrate_reason(new_hpage, reason);
}
unlock_page(hpage);
@@ -1311,7 +1318,7 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
if (PageHuge(page))
rc = unmap_and_move_huge_page(get_new_page,
put_new_page, private, page,
- pass > 2, mode);
+ pass > 2, mode, reason);
else
rc = unmap_and_move(get_new_page, put_new_page,
private, page, pass > 2, mode,
@@ -2001,6 +2008,7 @@ fail_putback:
set_page_memcg(new_page, page_memcg(page));
set_page_memcg(page, NULL);
page_remove_rmap(page);
+ set_page_owner_migrate_reason(new_page, MR_NUMA_MISPLACED);
spin_unlock(ptl);
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 4fbb23c1cba7..6759192e69de 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -223,6 +223,20 @@ static char * const zone_names[MAX_NR_ZONES] = {
};
static void free_compound_page(struct page *page);
+
+char * const migratetype_names[MIGRATE_TYPES] = {
+ "Unmovable",
+ "Movable",
+ "Reclaimable",
+#ifdef CONFIG_CMA
+ "CMA",
+#endif
+ "HighAtomic",
+#ifdef CONFIG_MEMORY_ISOLATION
+ "Isolate",
+#endif
+};
+
compound_page_dtor * const compound_page_dtors[] = {
NULL,
free_compound_page,
@@ -459,6 +473,7 @@ static void bad_page(struct page *page, const char *reason,
printk(KERN_ALERT "BUG: Bad page state in process %s pfn:%05lx\n",
current->comm, page_to_pfn(page));
dump_page_badflags(page, reason, bad_flags);
+ dump_page_owner(page);
print_modules();
dump_stack();
@@ -569,6 +584,9 @@ static inline void set_page_guard(struct zone *zone, struct page *page,
return;
page_ext = lookup_page_ext(page);
+ if (unlikely(!page_ext))
+ return;
+
__set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
INIT_LIST_HEAD(&page->lru);
@@ -586,6 +604,9 @@ static inline void clear_page_guard(struct zone *zone, struct page *page,
return;
page_ext = lookup_page_ext(page);
+ if (unlikely(!page_ext))
+ return;
+
__clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
set_page_private(page, 0);
@@ -1422,8 +1443,21 @@ static inline bool free_pages_prezeroed(void)
page_poisoning_enabled();
}
+inline void post_alloc_hook(struct page *page, unsigned int order,
+ gfp_t gfp_flags)
+{
+ set_page_private(page, 0);
+ set_page_refcounted(page);
+
+ kasan_alloc_pages(page, order);
+ arch_alloc_page(page, order);
+ kernel_map_pages(page, 1 << order, 1);
+ kernel_poison_pages(page, 1 << order, 1);
+ set_page_owner(page, order, gfp_flags);
+}
+
static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
- int alloc_flags)
+ int alloc_flags)
{
int i;
@@ -1433,13 +1467,7 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
return 1;
}
- set_page_private(page, 0);
- set_page_refcounted(page);
-
- kasan_alloc_pages(page, order);
- arch_alloc_page(page, order);
- kernel_map_pages(page, 1 << order, 1);
- kernel_poison_pages(page, 1 << order, 1);
+ post_alloc_hook(page, order, gfp_flags);
if (!free_pages_prezeroed() && (gfp_flags & __GFP_ZERO))
for (i = 0; i < (1 << order); i++)
@@ -1448,8 +1476,6 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
if (order && (gfp_flags & __GFP_COMP))
prep_compound_page(page, order);
- set_page_owner(page, order, gfp_flags);
-
/*
* page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
* allocate the page. The expectation is that the caller is taking
@@ -2217,7 +2243,6 @@ void free_hot_cold_page_list(struct list_head *list, bool cold)
void split_page(struct page *page, unsigned int order)
{
int i;
- gfp_t gfp_mask;
VM_BUG_ON_PAGE(PageCompound(page), page);
VM_BUG_ON_PAGE(!page_count(page), page);
@@ -2231,12 +2256,9 @@ void split_page(struct page *page, unsigned int order)
split_page(virt_to_page(page[0].shadow), order);
#endif
- gfp_mask = get_page_owner_gfp(page);
- set_page_owner(page, 0, gfp_mask);
- for (i = 1; i < (1 << order); i++) {
+ for (i = 1; i < (1 << order); i++)
set_page_refcounted(page + i);
- set_page_owner(page + i, 0, gfp_mask);
- }
+ split_page_owner(page, order);
}
EXPORT_SYMBOL_GPL(split_page);
@@ -2266,8 +2288,6 @@ int __isolate_free_page(struct page *page, unsigned int order)
zone->free_area[order].nr_free--;
rmv_page_order(page);
- set_page_owner(page, order, __GFP_MOVABLE);
-
/* Set the pageblock if the isolated page is at least a pageblock */
if (order >= pageblock_order - 1) {
struct page *endpage = page + (1 << order) - 1;
@@ -2285,33 +2305,6 @@ int __isolate_free_page(struct page *page, unsigned int order)
}
/*
- * Similar to split_page except the page is already free. As this is only
- * being used for migration, the migratetype of the block also changes.
- * As this is called with interrupts disabled, the caller is responsible
- * for calling arch_alloc_page() and kernel_map_page() after interrupts
- * are enabled.
- *
- * Note: this is probably too low level an operation for use in drivers.
- * Please consult with lkml before using this in your driver.
- */
-int split_free_page(struct page *page)
-{
- unsigned int order;
- int nr_pages;
-
- order = page_order(page);
-
- nr_pages = __isolate_free_page(page, order);
- if (!nr_pages)
- return 0;
-
- /* Split into individual pages */
- set_page_refcounted(page);
- split_page(page, order);
- return nr_pages;
-}
-
-/*
* Allocate a page from the given zone. Use pcplists for order-0 allocations.
*/
static inline
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 3ecd3807c2c2..efb6c3c38c01 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -8,6 +8,7 @@
#include <linux/memory.h>
#include <linux/hugetlb.h>
#include <linux/kasan.h>
+#include <linux/page_owner.h>
#include "internal.h"
static int set_migratetype_isolate(struct page *page,
@@ -106,10 +107,6 @@ static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
if (pfn_valid_within(page_to_pfn(buddy)) &&
!is_migrate_isolate_page(buddy)) {
__isolate_free_page(page, order);
- kasan_alloc_pages(page, order);
- arch_alloc_page(page, order);
- kernel_map_pages(page, (1 << order), 1);
- set_page_refcounted(page);
isolated_page = page;
}
}
@@ -128,8 +125,10 @@ static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
zone->nr_isolate_pageblock--;
out:
spin_unlock_irqrestore(&zone->lock, flags);
- if (isolated_page)
+ if (isolated_page) {
+ post_alloc_hook(page, order, __GFP_MOVABLE);
__free_pages(isolated_page, order);
+ }
}
static inline struct page *
diff --git a/mm/page_owner.c b/mm/page_owner.c
index 45cccaa1ce32..3a9a358e7c63 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -5,11 +5,24 @@
#include <linux/bootmem.h>
#include <linux/stacktrace.h>
#include <linux/page_owner.h>
+#include <linux/jump_label.h>
+#include <linux/migrate.h>
+#include <linux/stackdepot.h>
+
#include "internal.h"
+/*
+ * TODO: teach PAGE_OWNER_STACK_DEPTH (__dump_page_owner and save_stack)
+ * to use off stack temporal storage
+ */
+#define PAGE_OWNER_STACK_DEPTH (16)
+
static bool page_owner_disabled =
!IS_ENABLED(CONFIG_PAGE_OWNER_ENABLE_DEFAULT);
-bool page_owner_inited __read_mostly;
+DEFINE_STATIC_KEY_FALSE(page_owner_inited);
+
+static depot_stack_handle_t dummy_handle;
+static depot_stack_handle_t failure_handle;
static void init_early_allocated_pages(void);
@@ -36,12 +49,42 @@ static bool need_page_owner(void)
return true;
}
+static noinline void register_dummy_stack(void)
+{
+ unsigned long entries[4];
+ struct stack_trace dummy;
+
+ dummy.nr_entries = 0;
+ dummy.max_entries = ARRAY_SIZE(entries);
+ dummy.entries = &entries[0];
+ dummy.skip = 0;
+
+ save_stack_trace(&dummy);
+ dummy_handle = depot_save_stack(&dummy, GFP_KERNEL);
+}
+
+static noinline void register_failure_stack(void)
+{
+ unsigned long entries[4];
+ struct stack_trace failure;
+
+ failure.nr_entries = 0;
+ failure.max_entries = ARRAY_SIZE(entries);
+ failure.entries = &entries[0];
+ failure.skip = 0;
+
+ save_stack_trace(&failure);
+ failure_handle = depot_save_stack(&failure, GFP_KERNEL);
+}
+
static void init_page_owner(void)
{
if (page_owner_disabled)
return;
- page_owner_inited = true;
+ register_dummy_stack();
+ register_failure_stack();
+ static_branch_enable(&page_owner_inited);
init_early_allocated_pages();
}
@@ -57,46 +100,141 @@ void __reset_page_owner(struct page *page, unsigned int order)
for (i = 0; i < (1 << order); i++) {
page_ext = lookup_page_ext(page + i);
+ if (unlikely(!page_ext))
+ continue;
__clear_bit(PAGE_EXT_OWNER, &page_ext->flags);
}
}
-void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask)
+static inline bool check_recursive_alloc(struct stack_trace *trace,
+ unsigned long ip)
{
- struct page_ext *page_ext = lookup_page_ext(page);
+ int i, count;
+
+ if (!trace->nr_entries)
+ return false;
+
+ for (i = 0, count = 0; i < trace->nr_entries; i++) {
+ if (trace->entries[i] == ip && ++count == 2)
+ return true;
+ }
+
+ return false;
+}
+
+static noinline depot_stack_handle_t save_stack(gfp_t flags)
+{
+ unsigned long entries[PAGE_OWNER_STACK_DEPTH];
struct stack_trace trace = {
.nr_entries = 0,
- .max_entries = ARRAY_SIZE(page_ext->trace_entries),
- .entries = &page_ext->trace_entries[0],
- .skip = 3,
+ .entries = entries,
+ .max_entries = PAGE_OWNER_STACK_DEPTH,
+ .skip = 0
};
+ depot_stack_handle_t handle;
save_stack_trace(&trace);
+ if (trace.nr_entries != 0 &&
+ trace.entries[trace.nr_entries-1] == ULONG_MAX)
+ trace.nr_entries--;
+
+ /*
+ * We need to check recursion here because our request to stackdepot
+ * could trigger memory allocation to save new entry. New memory
+ * allocation would reach here and call depot_save_stack() again
+ * if we don't catch it. There is still not enough memory in stackdepot
+ * so it would try to allocate memory again and loop forever.
+ */
+ if (check_recursive_alloc(&trace, _RET_IP_))
+ return dummy_handle;
+
+ handle = depot_save_stack(&trace, flags);
+ if (!handle)
+ handle = failure_handle;
+
+ return handle;
+}
+
+noinline void __set_page_owner(struct page *page, unsigned int order,
+ gfp_t gfp_mask)
+{
+ struct page_ext *page_ext = lookup_page_ext(page);
+
+ if (unlikely(!page_ext))
+ return;
+ page_ext->handle = save_stack(gfp_mask);
page_ext->order = order;
page_ext->gfp_mask = gfp_mask;
- page_ext->nr_entries = trace.nr_entries;
+ page_ext->last_migrate_reason = -1;
__set_bit(PAGE_EXT_OWNER, &page_ext->flags);
}
-gfp_t __get_page_owner_gfp(struct page *page)
+void __set_page_owner_migrate_reason(struct page *page, int reason)
+{
+ struct page_ext *page_ext = lookup_page_ext(page);
+ if (unlikely(!page_ext))
+ return;
+
+ page_ext->last_migrate_reason = reason;
+}
+
+void __split_page_owner(struct page *page, unsigned int order)
{
+ int i;
struct page_ext *page_ext = lookup_page_ext(page);
+ if (unlikely(!page_ext))
+ /*
+ * The caller just returns if no valid gfp
+ * So return here too.
+ */
+ return;
- return page_ext->gfp_mask;
+ page_ext->order = 0;
+ for (i = 1; i < (1 << order); i++)
+ __copy_page_owner(page, page + i);
+}
+
+void __copy_page_owner(struct page *oldpage, struct page *newpage)
+{
+ struct page_ext *old_ext = lookup_page_ext(oldpage);
+ struct page_ext *new_ext = lookup_page_ext(newpage);
+
+ if (unlikely(!old_ext || !new_ext))
+ return;
+
+ new_ext->order = old_ext->order;
+ new_ext->gfp_mask = old_ext->gfp_mask;
+ new_ext->last_migrate_reason = old_ext->last_migrate_reason;
+ new_ext->handle = old_ext->handle;
+
+ /*
+ * We don't clear the bit on the oldpage as it's going to be freed
+ * after migration. Until then, the info can be useful in case of
+ * a bug, and the overal stats will be off a bit only temporarily.
+ * Also, migrate_misplaced_transhuge_page() can still fail the
+ * migration and then we want the oldpage to retain the info. But
+ * in that case we also don't need to explicitly clear the info from
+ * the new page, which will be freed.
+ */
+ __set_bit(PAGE_EXT_OWNER, &new_ext->flags);
}
static ssize_t
print_page_owner(char __user *buf, size_t count, unsigned long pfn,
- struct page *page, struct page_ext *page_ext)
+ struct page *page, struct page_ext *page_ext,
+ depot_stack_handle_t handle)
{
int ret;
int pageblock_mt, page_mt;
char *kbuf;
+ unsigned long entries[PAGE_OWNER_STACK_DEPTH];
struct stack_trace trace = {
- .nr_entries = page_ext->nr_entries,
- .entries = &page_ext->trace_entries[0],
+ .nr_entries = 0,
+ .entries = entries,
+ .max_entries = PAGE_OWNER_STACK_DEPTH,
+ .skip = 0
};
kbuf = kmalloc(count, GFP_KERNEL);
@@ -104,8 +242,9 @@ print_page_owner(char __user *buf, size_t count, unsigned long pfn,
return -ENOMEM;
ret = snprintf(kbuf, count,
- "Page allocated via order %u, mask 0x%x\n",
- page_ext->order, page_ext->gfp_mask);
+ "Page allocated via order %u, mask %#x(%pGg)\n",
+ page_ext->order, page_ext->gfp_mask,
+ &page_ext->gfp_mask);
if (ret >= count)
goto err;
@@ -114,31 +253,29 @@ print_page_owner(char __user *buf, size_t count, unsigned long pfn,
pageblock_mt = get_pfnblock_migratetype(page, pfn);
page_mt = gfpflags_to_migratetype(page_ext->gfp_mask);
ret += snprintf(kbuf + ret, count - ret,
- "PFN %lu Block %lu type %d %s Flags %s%s%s%s%s%s%s%s%s%s%s%s\n",
+ "PFN %lu type %s Block %lu type %s Flags %#lx(%pGp)\n",
pfn,
+ migratetype_names[page_mt],
pfn >> pageblock_order,
- pageblock_mt,
- pageblock_mt != page_mt ? "Fallback" : " ",
- PageLocked(page) ? "K" : " ",
- PageError(page) ? "E" : " ",
- PageReferenced(page) ? "R" : " ",
- PageUptodate(page) ? "U" : " ",
- PageDirty(page) ? "D" : " ",
- PageLRU(page) ? "L" : " ",
- PageActive(page) ? "A" : " ",
- PageSlab(page) ? "S" : " ",
- PageWriteback(page) ? "W" : " ",
- PageCompound(page) ? "C" : " ",
- PageSwapCache(page) ? "B" : " ",
- PageMappedToDisk(page) ? "M" : " ");
+ migratetype_names[pageblock_mt],
+ page->flags, &page->flags);
if (ret >= count)
goto err;
+ depot_fetch_stack(handle, &trace);
ret += snprint_stack_trace(kbuf + ret, count - ret, &trace, 0);
if (ret >= count)
goto err;
+ if (page_ext->last_migrate_reason != -1) {
+ ret += snprintf(kbuf + ret, count - ret,
+ "Page has been migrated, last migrate reason: %s\n",
+ migrate_reason_names[page_ext->last_migrate_reason]);
+ if (ret >= count)
+ goto err;
+ }
+
ret += snprintf(kbuf + ret, count - ret, "\n");
if (ret >= count)
goto err;
@@ -154,14 +291,58 @@ err:
return -ENOMEM;
}
+void __dump_page_owner(struct page *page)
+{
+ struct page_ext *page_ext = lookup_page_ext(page);
+ unsigned long entries[PAGE_OWNER_STACK_DEPTH];
+ struct stack_trace trace = {
+ .nr_entries = 0,
+ .entries = entries,
+ .max_entries = PAGE_OWNER_STACK_DEPTH,
+ .skip = 0
+ };
+ depot_stack_handle_t handle;
+ gfp_t gfp_mask;
+ int mt;
+
+ if (unlikely(!page_ext)) {
+ pr_alert("There is not page extension available.\n");
+ return;
+ }
+ gfp_mask = page_ext->gfp_mask;
+ mt = gfpflags_to_migratetype(gfp_mask);
+
+ if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) {
+ pr_alert("page_owner info is not active (free page?)\n");
+ return;
+ }
+
+ handle = READ_ONCE(page_ext->handle);
+ if (!handle) {
+ pr_alert("page_owner info is not active (free page?)\n");
+ return;
+ }
+
+ depot_fetch_stack(handle, &trace);
+ pr_alert("page allocated via order %u, migratetype %s, "
+ "gfp_mask %#x(%pGg)\n", page_ext->order,
+ migratetype_names[mt], gfp_mask, &gfp_mask);
+ print_stack_trace(&trace, 0);
+
+ if (page_ext->last_migrate_reason != -1)
+ pr_alert("page has been migrated, last migrate reason: %s\n",
+ migrate_reason_names[page_ext->last_migrate_reason]);
+}
+
static ssize_t
read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
unsigned long pfn;
struct page *page;
struct page_ext *page_ext;
+ depot_stack_handle_t handle;
- if (!page_owner_inited)
+ if (!static_branch_unlikely(&page_owner_inited))
return -EINVAL;
page = NULL;
@@ -198,6 +379,8 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
}
page_ext = lookup_page_ext(page);
+ if (unlikely(!page_ext))
+ continue;
/*
* Some pages could be missed by concurrent allocation or free,
@@ -206,10 +389,19 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
continue;
+ /*
+ * Access to page_ext->handle isn't synchronous so we should
+ * be careful to access it.
+ */
+ handle = READ_ONCE(page_ext->handle);
+ if (!handle)
+ continue;
+
/* Record the next PFN to read in the file offset */
*ppos = (pfn - min_low_pfn) + 1;
- return print_page_owner(buf, count, pfn, page, page_ext);
+ return print_page_owner(buf, count, pfn, page,
+ page_ext, handle);
}
return 0;
@@ -248,6 +440,9 @@ static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
page = pfn_to_page(pfn);
+ if (page_zone(page) != zone)
+ continue;
+
/*
* We are safe to check buddy flag and order, because
* this is init stage and only single thread runs.
@@ -261,6 +456,8 @@ static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
continue;
page_ext = lookup_page_ext(page);
+ if (unlikely(!page_ext))
+ continue;
/* Maybe overraping zone */
if (test_bit(PAGE_EXT_OWNER, &page_ext->flags))
@@ -309,7 +506,7 @@ static int __init pageowner_init(void)
{
struct dentry *dentry;
- if (!page_owner_inited) {
+ if (!static_branch_unlikely(&page_owner_inited)) {
pr_info("page_owner is disabled\n");
return 0;
}
diff --git a/mm/swap_cgroup.c b/mm/swap_cgroup.c
index 40dd0f9b00d6..09f733b0424a 100644
--- a/mm/swap_cgroup.c
+++ b/mm/swap_cgroup.c
@@ -205,6 +205,8 @@ void swap_cgroup_swapoff(int type)
struct page *page = map[i];
if (page)
__free_page(page);
+ if (!(i % SWAP_CLUSTER_MAX))
+ cond_resched();
}
vfree(map);
}
diff --git a/mm/vmstat.c b/mm/vmstat.c
index d6b4817c4416..3c0796cd3f80 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -927,19 +927,6 @@ static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
#endif
#ifdef CONFIG_PROC_FS
-static char * const migratetype_names[MIGRATE_TYPES] = {
- "Unmovable",
- "Movable",
- "Reclaimable",
-#ifdef CONFIG_CMA
- "CMA",
-#endif
- "HighAtomic",
-#ifdef CONFIG_MEMORY_ISOLATION
- "Isolate",
-#endif
-};
-
static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
struct zone *zone)
{
@@ -1103,6 +1090,8 @@ static void pagetypeinfo_showmixedcount_print(struct seq_file *m,
continue;
page_ext = lookup_page_ext(page);
+ if (unlikely(!page_ext))
+ continue;
if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
continue;
@@ -1140,7 +1129,7 @@ static void pagetypeinfo_showmixedcount(struct seq_file *m, pg_data_t *pgdat)
#ifdef CONFIG_PAGE_OWNER
int mtype;
- if (!page_owner_inited)
+ if (!static_branch_unlikely(&page_owner_inited))
return;
drain_all_pages(NULL);
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index ad8d6e6b87ca..e20ae2d3c498 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -278,7 +278,8 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
return 0;
out_free_newdev:
- free_netdev(new_dev);
+ if (new_dev->reg_state == NETREG_UNINITIALIZED)
+ free_netdev(new_dev);
return err;
}
diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c
index 59ce1fcc220c..71b6ab240dea 100644
--- a/net/caif/cfpkt_skbuff.c
+++ b/net/caif/cfpkt_skbuff.c
@@ -81,11 +81,7 @@ static struct cfpkt *cfpkt_create_pfx(u16 len, u16 pfx)
{
struct sk_buff *skb;
- if (likely(in_interrupt()))
- skb = alloc_skb(len + pfx, GFP_ATOMIC);
- else
- skb = alloc_skb(len + pfx, GFP_KERNEL);
-
+ skb = alloc_skb(len + pfx, GFP_ATOMIC);
if (unlikely(skb == NULL))
return NULL;
diff --git a/net/core/dev.c b/net/core/dev.c
index 50e77fe096f4..9a3aaba15c5a 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1248,8 +1248,9 @@ int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
if (!new_ifalias)
return -ENOMEM;
dev->ifalias = new_ifalias;
+ memcpy(dev->ifalias, alias, len);
+ dev->ifalias[len] = 0;
- strlcpy(dev->ifalias, alias, len+1);
return len;
}
@@ -4560,8 +4561,13 @@ static void net_rps_send_ipi(struct softnet_data *remsd)
while (remsd) {
struct softnet_data *next = remsd->rps_ipi_next;
- if (cpu_online(remsd->cpu))
+ if (cpu_online(remsd->cpu)) {
smp_call_function_single_async(remsd->cpu, &remsd->csd);
+ } else {
+ rps_lock(remsd);
+ remsd->backlog.state = 0;
+ rps_unlock(remsd);
+ }
remsd = next;
}
#endif
diff --git a/net/core/dst.c b/net/core/dst.c
index d7ad628bf64e..e72d706f8d0c 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -462,6 +462,20 @@ static int dst_dev_event(struct notifier_block *this, unsigned long event,
spin_lock_bh(&dst_garbage.lock);
dst = dst_garbage.list;
dst_garbage.list = NULL;
+ /* The code in dst_ifdown places a hold on the loopback device.
+ * If the gc entry processing is set to expire after a lengthy
+ * interval, this hold can cause netdev_wait_allrefs() to hang
+ * out and wait for a long time -- until the the loopback
+ * interface is released. If we're really unlucky, it'll emit
+ * pr_emerg messages to console too. Reset the interval here,
+ * so dst cleanups occur in a more timely fashion.
+ */
+ if (dst_garbage.timer_inc > DST_GC_INC) {
+ dst_garbage.timer_inc = DST_GC_INC;
+ dst_garbage.timer_expires = DST_GC_MIN;
+ mod_delayed_work(system_wq, &dst_gc_work,
+ dst_garbage.timer_expires);
+ }
spin_unlock_bh(&dst_garbage.lock);
if (last)
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index d43544ce7550..2ec5324a7ff7 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -897,6 +897,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
+ nla_total_size(1) /* IFLA_LINKMODE */
+ nla_total_size(4) /* IFLA_CARRIER_CHANGES */
+ nla_total_size(4) /* IFLA_LINK_NETNSID */
+ + nla_total_size(4) /* IFLA_GROUP */
+ nla_total_size(ext_filter_mask
& RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
+ rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
@@ -1089,6 +1090,8 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
struct ifla_vf_mac vf_mac;
struct ifla_vf_info ivi;
+ memset(&ivi, 0, sizeof(ivi));
+
/* Not all SR-IOV capable drivers support the
* spoofcheck and "RSS query enable" query. Preset to
* -1 so the user space tool can detect that the driver
@@ -1097,7 +1100,6 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
ivi.spoofchk = -1;
ivi.rss_query_en = -1;
ivi.trusted = -1;
- memset(ivi.mac, 0, sizeof(ivi.mac));
/* The default value for VF link state is "auto"
* IFLA_VF_LINK_STATE_AUTO which equals zero
*/
@@ -1370,6 +1372,7 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
[IFLA_PHYS_SWITCH_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
[IFLA_LINK_NETNSID] = { .type = NLA_S32 },
[IFLA_PROTO_DOWN] = { .type = NLA_U8 },
+ [IFLA_GROUP] = { .type = NLA_U32 },
};
static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index b1dc096d22f8..403593bd2b83 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -188,12 +188,6 @@ static inline void dnrt_free(struct dn_route *rt)
call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
}
-static inline void dnrt_drop(struct dn_route *rt)
-{
- dst_release(&rt->dst);
- call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
-}
-
static void dn_dst_check_expire(unsigned long dummy)
{
int i;
@@ -248,7 +242,7 @@ static int dn_dst_gc(struct dst_ops *ops)
}
*rtp = rt->dst.dn_next;
rt->dst.dn_next = NULL;
- dnrt_drop(rt);
+ dnrt_free(rt);
break;
}
spin_unlock_bh(&dn_rt_hash_table[i].lock);
@@ -350,7 +344,7 @@ static int dn_insert_route(struct dn_route *rt, unsigned int hash, struct dn_rou
dst_use(&rth->dst, now);
spin_unlock_bh(&dn_rt_hash_table[hash].lock);
- dnrt_drop(rt);
+ dst_free(&rt->dst);
*rp = rth;
return 0;
}
@@ -380,7 +374,7 @@ static void dn_run_flush(unsigned long dummy)
for(; rt; rt = next) {
next = rcu_dereference_raw(rt->dst.dn_next);
RCU_INIT_POINTER(rt->dst.dn_next, NULL);
- dst_free((struct dst_entry *)rt);
+ dnrt_free(rt);
}
nothing_to_declare:
@@ -1187,7 +1181,7 @@ make_route:
if (dev_out->flags & IFF_LOOPBACK)
flags |= RTCF_LOCAL;
- rt = dst_alloc(&dn_dst_ops, dev_out, 1, DST_OBSOLETE_NONE, DST_HOST);
+ rt = dst_alloc(&dn_dst_ops, dev_out, 0, DST_OBSOLETE_NONE, DST_HOST);
if (rt == NULL)
goto e_nobufs;
diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c
index 85f2fdc360c2..29246bc9a7b4 100644
--- a/net/decnet/netfilter/dn_rtmsg.c
+++ b/net/decnet/netfilter/dn_rtmsg.c
@@ -102,7 +102,9 @@ static inline void dnrmg_receive_user_skb(struct sk_buff *skb)
{
struct nlmsghdr *nlh = nlmsg_hdr(skb);
- if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len)
+ if (skb->len < sizeof(*nlh) ||
+ nlh->nlmsg_len < sizeof(*nlh) ||
+ skb->len < nlh->nlmsg_len)
return;
if (!netlink_capable(skb, CAP_NET_ADMIN))
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 8dfe9fb7ad36..554c2a961ad5 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -1006,10 +1006,8 @@ static int dsa_slave_phy_connect(struct dsa_slave_priv *p,
/* Use already configured phy mode */
if (p->phy_interface == PHY_INTERFACE_MODE_NA)
p->phy_interface = p->phy->interface;
- phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link,
- p->phy_interface);
-
- return 0;
+ return phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link,
+ p->phy_interface);
}
static int dsa_slave_phy_setup(struct dsa_slave_priv *p,
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 17adfdaf5795..3809d523d012 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1102,6 +1102,7 @@ static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im)
pmc = kzalloc(sizeof(*pmc), GFP_KERNEL);
if (!pmc)
return;
+ spin_lock_init(&pmc->lock);
spin_lock_bh(&im->lock);
pmc->interface = im->interface;
in_dev_hold(in_dev);
@@ -2026,21 +2027,26 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
static void ip_mc_clear_src(struct ip_mc_list *pmc)
{
- struct ip_sf_list *psf, *nextpsf;
+ struct ip_sf_list *psf, *nextpsf, *tomb, *sources;
- for (psf = pmc->tomb; psf; psf = nextpsf) {
+ spin_lock_bh(&pmc->lock);
+ tomb = pmc->tomb;
+ pmc->tomb = NULL;
+ sources = pmc->sources;
+ pmc->sources = NULL;
+ pmc->sfmode = MCAST_EXCLUDE;
+ pmc->sfcount[MCAST_INCLUDE] = 0;
+ pmc->sfcount[MCAST_EXCLUDE] = 1;
+ spin_unlock_bh(&pmc->lock);
+
+ for (psf = tomb; psf; psf = nextpsf) {
nextpsf = psf->sf_next;
kfree(psf);
}
- pmc->tomb = NULL;
- for (psf = pmc->sources; psf; psf = nextpsf) {
+ for (psf = sources; psf; psf = nextpsf) {
nextpsf = psf->sf_next;
kfree(psf);
}
- pmc->sources = NULL;
- pmc->sfmode = MCAST_EXCLUDE;
- pmc->sfcount[MCAST_INCLUDE] = 0;
- pmc->sfcount[MCAST_EXCLUDE] = 1;
}
/* Join a multicast group
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 4fe0359d8abf..55e928819846 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -320,9 +320,9 @@ static void addrconf_mod_rs_timer(struct inet6_dev *idev,
static void addrconf_mod_dad_work(struct inet6_ifaddr *ifp,
unsigned long delay)
{
- if (!delayed_work_pending(&ifp->dad_work))
- in6_ifa_hold(ifp);
- mod_delayed_work(addrconf_wq, &ifp->dad_work, delay);
+ in6_ifa_hold(ifp);
+ if (mod_delayed_work(addrconf_wq, &ifp->dad_work, delay))
+ in6_ifa_put(ifp);
}
static int snmp6_alloc_dev(struct inet6_dev *idev)
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 27796c33ca05..d7c1ee7cf0e2 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -184,6 +184,11 @@ ipv4_connected:
err = 0;
if (IS_ERR(dst)) {
err = PTR_ERR(dst);
+ /* Reset daddr and dport so that udp_v6_early_demux()
+ * fails to find this socket
+ */
+ memset(&sk->sk_v6_daddr, 0, sizeof(sk->sk_v6_daddr));
+ inet->inet_dport = 0;
goto out;
}
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index ed33abf57abd..9ac4f0cef27d 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -32,7 +32,6 @@ struct fib6_rule {
struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
int flags, pol_lookup_t lookup)
{
- struct rt6_info *rt;
struct fib_lookup_arg arg = {
.lookup_ptr = lookup,
.flags = FIB_LOOKUP_NOREF,
@@ -41,21 +40,11 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
fib_rules_lookup(net->ipv6.fib6_rules_ops,
flowi6_to_flowi(fl6), flags, &arg);
- rt = arg.result;
+ if (arg.result)
+ return arg.result;
- if (!rt) {
- dst_hold(&net->ipv6.ip6_null_entry->dst);
- return &net->ipv6.ip6_null_entry->dst;
- }
-
- if (rt->rt6i_flags & RTF_REJECT &&
- rt->dst.error == -EAGAIN) {
- ip6_rt_put(rt);
- rt = net->ipv6.ip6_null_entry;
- dst_hold(&rt->dst);
- }
-
- return &rt->dst;
+ dst_hold(&net->ipv6.ip6_null_entry->dst);
+ return &net->ipv6.ip6_null_entry->dst;
}
static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
@@ -116,7 +105,8 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
flp6->saddr = saddr;
}
err = rt->dst.error;
- goto out;
+ if (err != -EAGAIN)
+ goto out;
}
again:
ip6_rt_put(rt);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 85bf86458706..1ac06723f0d7 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -290,8 +290,7 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
struct rt6_info *rt;
rt = lookup(net, net->ipv6.fib6_main_tbl, fl6, flags);
- if (rt->rt6i_flags & RTF_REJECT &&
- rt->dst.error == -EAGAIN) {
+ if (rt->dst.error == -EAGAIN) {
ip6_rt_put(rt);
rt = net->ipv6.ip6_null_entry;
dst_hold(&rt->dst);
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index ef745df36fc8..72b53b83a720 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1004,8 +1004,10 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
}
#endif
if (ipv6_addr_v4mapped(&fl6->saddr) &&
- !(ipv6_addr_v4mapped(&fl6->daddr) || ipv6_addr_any(&fl6->daddr)))
- return -EAFNOSUPPORT;
+ !(ipv6_addr_v4mapped(&fl6->daddr) || ipv6_addr_any(&fl6->daddr))) {
+ err = -EAFNOSUPPORT;
+ goto out_err_release;
+ }
return 0;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 8a07c63ddccd..a8cabc876348 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -45,6 +45,7 @@
#include <net/tcp_states.h>
#include <net/ip6_checksum.h>
#include <net/xfrm.h>
+#include <net/inet_hashtables.h>
#include <net/inet6_hashtables.h>
#include <net/busy_poll.h>
@@ -964,15 +965,22 @@ static struct sock *__udp6_lib_demux_lookup(struct net *net,
int dif)
{
struct sock *sk;
+ struct hlist_nulls_node *hnode;
+ unsigned short hnum = ntohs(loc_port);
+ unsigned int hash2 = udp6_portaddr_hash(net, loc_addr, hnum);
+ unsigned int slot2 = hash2 & udp_table.mask;
+ struct udp_hslot *hslot2 = &udp_table.hash2[slot2];
- rcu_read_lock();
- sk = __udp6_lib_lookup(net, rmt_addr, rmt_port, loc_addr, loc_port,
- dif, &udp_table);
- if (sk && !atomic_inc_not_zero(&sk->sk_refcnt))
- sk = NULL;
- rcu_read_unlock();
+ const __portpair ports = INET_COMBINED_PORTS(rmt_port, hnum);
- return sk;
+ udp_portaddr_for_each_entry_rcu(sk, hnode, &hslot2->head) {
+ if (sk->sk_state == TCP_ESTABLISHED &&
+ INET6_MATCH(sk, net, rmt_addr, loc_addr, ports, dif))
+ return sk;
+ /* Only check first socket in chain */
+ break;
+ }
+ return NULL;
}
static void udp_v6_early_demux(struct sk_buff *skb)
@@ -997,7 +1005,7 @@ static void udp_v6_early_demux(struct sk_buff *skb)
else
return;
- if (!sk)
+ if (!sk || !atomic_inc_not_zero_hint(&sk->sk_refcnt, 2))
return;
skb->sk = sk;
diff --git a/net/key/af_key.c b/net/key/af_key.c
index f9c9ecb0cdd3..e67c28e614b9 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -1135,6 +1135,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
goto out;
}
+ err = -ENOBUFS;
key = ext_hdrs[SADB_EXT_KEY_AUTH - 1];
if (sa->sadb_sa_auth) {
int keysize = 0;
@@ -1146,8 +1147,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
if (key)
keysize = (key->sadb_key_bits + 7) / 8;
x->aalg = kmalloc(sizeof(*x->aalg) + keysize, GFP_KERNEL);
- if (!x->aalg)
+ if (!x->aalg) {
+ err = -ENOMEM;
goto out;
+ }
strcpy(x->aalg->alg_name, a->name);
x->aalg->alg_key_len = 0;
if (key) {
@@ -1166,8 +1169,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
goto out;
}
x->calg = kmalloc(sizeof(*x->calg), GFP_KERNEL);
- if (!x->calg)
+ if (!x->calg) {
+ err = -ENOMEM;
goto out;
+ }
strcpy(x->calg->alg_name, a->name);
x->props.calgo = sa->sadb_sa_encrypt;
} else {
@@ -1181,8 +1186,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
if (key)
keysize = (key->sadb_key_bits + 7) / 8;
x->ealg = kmalloc(sizeof(*x->ealg) + keysize, GFP_KERNEL);
- if (!x->ealg)
+ if (!x->ealg) {
+ err = -ENOMEM;
goto out;
+ }
strcpy(x->ealg->alg_name, a->name);
x->ealg->alg_key_len = 0;
if (key) {
@@ -1227,8 +1234,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
struct xfrm_encap_tmpl *natt;
x->encap = kmalloc(sizeof(*x->encap), GFP_KERNEL);
- if (!x->encap)
+ if (!x->encap) {
+ err = -ENOMEM;
goto out;
+ }
natt = x->encap;
n_type = ext_hdrs[SADB_X_EXT_NAT_T_TYPE-1];
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 175ffcf7fb06..2ee53dc1ddf7 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -891,12 +891,17 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
supp_ht = supp_ht || sband->ht_cap.ht_supported;
supp_vht = supp_vht || sband->vht_cap.vht_supported;
- if (sband->ht_cap.ht_supported)
- local->rx_chains =
- max(ieee80211_mcs_to_chains(&sband->ht_cap.mcs),
- local->rx_chains);
+ if (!sband->ht_cap.ht_supported)
+ continue;
/* TODO: consider VHT for RX chains, hopefully it's the same */
+ local->rx_chains =
+ max(ieee80211_mcs_to_chains(&sband->ht_cap.mcs),
+ local->rx_chains);
+
+ /* no need to mask, SM_PS_DISABLED has all bits set */
+ sband->ht_cap.cap |= WLAN_HT_CAP_SM_PS_DISABLED <<
+ IEEE80211_HT_CAP_SM_PS_SHIFT;
}
/* if low-level driver supports AP, we also support VLAN */
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 9f5272968abb..e565b2becb14 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -45,6 +45,8 @@
#include <net/netfilter/nf_conntrack_zones.h>
#include <net/netfilter/nf_conntrack_timestamp.h>
#include <net/netfilter/nf_conntrack_labels.h>
+#include <net/netfilter/nf_conntrack_seqadj.h>
+#include <net/netfilter/nf_conntrack_synproxy.h>
#ifdef CONFIG_NF_NAT_NEEDED
#include <net/netfilter/nf_nat_core.h>
#include <net/netfilter/nf_nat_l4proto.h>
@@ -1798,6 +1800,8 @@ ctnetlink_create_conntrack(struct net *net,
nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
nf_ct_ecache_ext_add(ct, 0, 0, GFP_ATOMIC);
nf_ct_labels_ext_add(ct);
+ nfct_seqadj_ext_add(ct);
+ nfct_synproxy_ext_add(ct);
/* we must add conntrack extensions before confirmation. */
ct->status |= IPS_CONFIRMED;
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
index b7c43def0dc6..00f798b20b20 100644
--- a/net/netfilter/xt_TCPMSS.c
+++ b/net/netfilter/xt_TCPMSS.c
@@ -104,7 +104,7 @@ tcpmss_mangle_packet(struct sk_buff *skb,
tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
tcp_hdrlen = tcph->doff * 4;
- if (len < tcp_hdrlen)
+ if (len < tcp_hdrlen || tcp_hdrlen < sizeof(struct tcphdr))
return -1;
if (info->mss == XT_TCPMSS_CLAMP_PMTU) {
@@ -156,6 +156,10 @@ tcpmss_mangle_packet(struct sk_buff *skb,
if (len > tcp_hdrlen)
return 0;
+ /* tcph->doff has 4 bits, do not wrap it to 0 */
+ if (tcp_hdrlen >= 15 * 4)
+ return 0;
+
/*
* MSS Option not found ?! add it..
*/
diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c
index da3cc09f683e..91d43ab3a961 100644
--- a/net/rxrpc/ar-key.c
+++ b/net/rxrpc/ar-key.c
@@ -215,7 +215,7 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ,
unsigned int *_toklen)
{
const __be32 *xdr = *_xdr;
- unsigned int toklen = *_toklen, n_parts, loop, tmp;
+ unsigned int toklen = *_toklen, n_parts, loop, tmp, paddedlen;
/* there must be at least one name, and at least #names+1 length
* words */
@@ -245,16 +245,16 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ,
toklen -= 4;
if (tmp <= 0 || tmp > AFSTOKEN_STRING_MAX)
return -EINVAL;
- if (tmp > toklen)
+ paddedlen = (tmp + 3) & ~3;
+ if (paddedlen > toklen)
return -EINVAL;
princ->name_parts[loop] = kmalloc(tmp + 1, GFP_KERNEL);
if (!princ->name_parts[loop])
return -ENOMEM;
memcpy(princ->name_parts[loop], xdr, tmp);
princ->name_parts[loop][tmp] = 0;
- tmp = (tmp + 3) & ~3;
- toklen -= tmp;
- xdr += tmp >> 2;
+ toklen -= paddedlen;
+ xdr += paddedlen >> 2;
}
if (toklen < 4)
@@ -263,16 +263,16 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ,
toklen -= 4;
if (tmp <= 0 || tmp > AFSTOKEN_K5_REALM_MAX)
return -EINVAL;
- if (tmp > toklen)
+ paddedlen = (tmp + 3) & ~3;
+ if (paddedlen > toklen)
return -EINVAL;
princ->realm = kmalloc(tmp + 1, GFP_KERNEL);
if (!princ->realm)
return -ENOMEM;
memcpy(princ->realm, xdr, tmp);
princ->realm[tmp] = 0;
- tmp = (tmp + 3) & ~3;
- toklen -= tmp;
- xdr += tmp >> 2;
+ toklen -= paddedlen;
+ xdr += paddedlen >> 2;
_debug("%s/...@%s", princ->name_parts[0], princ->realm);
@@ -291,7 +291,7 @@ static int rxrpc_krb5_decode_tagged_data(struct krb5_tagged_data *td,
unsigned int *_toklen)
{
const __be32 *xdr = *_xdr;
- unsigned int toklen = *_toklen, len;
+ unsigned int toklen = *_toklen, len, paddedlen;
/* there must be at least one tag and one length word */
if (toklen <= 8)
@@ -305,15 +305,17 @@ static int rxrpc_krb5_decode_tagged_data(struct krb5_tagged_data *td,
toklen -= 8;
if (len > max_data_size)
return -EINVAL;
+ paddedlen = (len + 3) & ~3;
+ if (paddedlen > toklen)
+ return -EINVAL;
td->data_len = len;
if (len > 0) {
td->data = kmemdup(xdr, len, GFP_KERNEL);
if (!td->data)
return -ENOMEM;
- len = (len + 3) & ~3;
- toklen -= len;
- xdr += len >> 2;
+ toklen -= paddedlen;
+ xdr += paddedlen >> 2;
}
_debug("tag %x len %x", td->tag, td->data_len);
@@ -385,7 +387,7 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen,
const __be32 **_xdr, unsigned int *_toklen)
{
const __be32 *xdr = *_xdr;
- unsigned int toklen = *_toklen, len;
+ unsigned int toklen = *_toklen, len, paddedlen;
/* there must be at least one length word */
if (toklen <= 4)
@@ -397,6 +399,9 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen,
toklen -= 4;
if (len > AFSTOKEN_K5_TIX_MAX)
return -EINVAL;
+ paddedlen = (len + 3) & ~3;
+ if (paddedlen > toklen)
+ return -EINVAL;
*_tktlen = len;
_debug("ticket len %u", len);
@@ -405,9 +410,8 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen,
*_ticket = kmemdup(xdr, len, GFP_KERNEL);
if (!*_ticket)
return -ENOMEM;
- len = (len + 3) & ~3;
- toklen -= len;
- xdr += len >> 2;
+ toklen -= paddedlen;
+ xdr += paddedlen >> 2;
}
*_xdr = xdr;
@@ -550,7 +554,7 @@ static int rxrpc_preparse_xdr(struct key_preparsed_payload *prep)
{
const __be32 *xdr = prep->data, *token;
const char *cp;
- unsigned int len, tmp, loop, ntoken, toklen, sec_ix;
+ unsigned int len, paddedlen, loop, ntoken, toklen, sec_ix;
size_t datalen = prep->datalen;
int ret;
@@ -576,22 +580,21 @@ static int rxrpc_preparse_xdr(struct key_preparsed_payload *prep)
if (len < 1 || len > AFSTOKEN_CELL_MAX)
goto not_xdr;
datalen -= 4;
- tmp = (len + 3) & ~3;
- if (tmp > datalen)
+ paddedlen = (len + 3) & ~3;
+ if (paddedlen > datalen)
goto not_xdr;
cp = (const char *) xdr;
for (loop = 0; loop < len; loop++)
if (!isprint(cp[loop]))
goto not_xdr;
- if (len < tmp)
- for (; loop < tmp; loop++)
- if (cp[loop])
- goto not_xdr;
+ for (; loop < paddedlen; loop++)
+ if (cp[loop])
+ goto not_xdr;
_debug("cellname: [%u/%u] '%*.*s'",
- len, tmp, len, len, (const char *) xdr);
- datalen -= tmp;
- xdr += tmp >> 2;
+ len, paddedlen, len, len, (const char *) xdr);
+ datalen -= paddedlen;
+ xdr += paddedlen >> 2;
/* get the token count */
if (datalen < 12)
@@ -612,10 +615,11 @@ static int rxrpc_preparse_xdr(struct key_preparsed_payload *prep)
sec_ix = ntohl(*xdr);
datalen -= 4;
_debug("token: [%x/%zx] %x", toklen, datalen, sec_ix);
- if (toklen < 20 || toklen > datalen)
+ paddedlen = (toklen + 3) & ~3;
+ if (toklen < 20 || toklen > datalen || paddedlen > datalen)
goto not_xdr;
- datalen -= (toklen + 3) & ~3;
- xdr += (toklen + 3) >> 2;
+ datalen -= paddedlen;
+ xdr += paddedlen >> 2;
} while (--loop > 0);
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 956141b71619..3ebf3b652d60 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -239,7 +239,7 @@ static struct sctp_transport *sctp_addr_id2transport(struct sock *sk,
union sctp_addr *laddr = (union sctp_addr *)addr;
struct sctp_transport *transport;
- if (sctp_verify_addr(sk, laddr, af->sockaddr_len))
+ if (!af || sctp_verify_addr(sk, laddr, af->sockaddr_len))
return NULL;
addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep,
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 1f5d18d80fba..decd8b8d751f 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -997,7 +997,8 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
struct path path = { NULL, NULL };
err = -EINVAL;
- if (sunaddr->sun_family != AF_UNIX)
+ if (addr_len < offsetofend(struct sockaddr_un, sun_family) ||
+ sunaddr->sun_family != AF_UNIX)
goto out;
if (addr_len == sizeof(short)) {
@@ -1108,6 +1109,10 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
unsigned int hash;
int err;
+ err = -EINVAL;
+ if (alen < offsetofend(struct sockaddr, sa_family))
+ goto out;
+
if (addr->sa_family != AF_UNSPEC) {
err = unix_mkname(sunaddr, alen, &hash);
if (err < 0)
diff --git a/net/wireless/db.txt b/net/wireless/db.txt
index 9ff010cee67e..1afd88a87e0f 100644
--- a/net/wireless/db.txt
+++ b/net/wireless/db.txt
@@ -731,7 +731,7 @@ country JO:
(5170 - 5250 @ 80), (23)
(5735 - 5835 @ 80), (23)
# 60 gHz band channels 1-4, ref: Etsi En 302 567
- ((57000 - 66000 @ 2160), (40)
+ (57000 - 66000 @ 2160), (40)
country JP: DFS-JP
(2402 - 2482 @ 40), (20)
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index d5b4ac7bf0d8..0f45e1a3a7d1 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1773,43 +1773,6 @@ free_dst:
goto out;
}
-#ifdef CONFIG_XFRM_SUB_POLICY
-static int xfrm_dst_alloc_copy(void **target, const void *src, int size)
-{
- if (!*target) {
- *target = kmalloc(size, GFP_ATOMIC);
- if (!*target)
- return -ENOMEM;
- }
-
- memcpy(*target, src, size);
- return 0;
-}
-#endif
-
-static int xfrm_dst_update_parent(struct dst_entry *dst,
- const struct xfrm_selector *sel)
-{
-#ifdef CONFIG_XFRM_SUB_POLICY
- struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
- return xfrm_dst_alloc_copy((void **)&(xdst->partner),
- sel, sizeof(*sel));
-#else
- return 0;
-#endif
-}
-
-static int xfrm_dst_update_origin(struct dst_entry *dst,
- const struct flowi *fl)
-{
-#ifdef CONFIG_XFRM_SUB_POLICY
- struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
- return xfrm_dst_alloc_copy((void **)&(xdst->origin), fl, sizeof(*fl));
-#else
- return 0;
-#endif
-}
-
static int xfrm_expand_policies(const struct flowi *fl, u16 family,
struct xfrm_policy **pols,
int *num_pols, int *num_xfrms)
@@ -1881,16 +1844,6 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
xdst = (struct xfrm_dst *)dst;
xdst->num_xfrms = err;
- if (num_pols > 1)
- err = xfrm_dst_update_parent(dst, &pols[1]->selector);
- else
- err = xfrm_dst_update_origin(dst, fl);
- if (unlikely(err)) {
- dst_free(dst);
- XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
- return ERR_PTR(err);
- }
-
xdst->num_pols = num_pols;
memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
xdst->policy_genid = atomic_read(&pols[0]->genid);
diff --git a/security/pfe/pfk_kc.c b/security/pfe/pfk_kc.c
index 3a12dd172d04..b428e1b8b11c 100644
--- a/security/pfe/pfk_kc.c
+++ b/security/pfe/pfk_kc.c
@@ -527,10 +527,12 @@ int pfk_kc_load_key_start(const unsigned char *key, size_t key_size,
kc_update_timestamp(entry);
entry->state = ACTIVE_ICE_LOADED;
- if (async && (!strcmp(s_type,
- (char *)PFK_UFS)))
+ if (!strcmp(s_type, (char *)PFK_UFS)) {
+ if (async)
+ entry->loaded_ref_cnt++;
+ } else {
entry->loaded_ref_cnt++;
-
+ }
break;
}
case (FREE):
@@ -544,14 +546,16 @@ int pfk_kc_load_key_start(const unsigned char *key, size_t key_size,
entry->state = ACTIVE_ICE_LOADED;
/*
- * only increase ref cnt for async calls,
+ * In case of UFS only increase ref cnt for async calls,
* sync calls from within work thread do not pass
* requests further to HW
*/
- if (async && (!strcmp(s_type,
- (char *)PFK_UFS)))
+ if (!strcmp(s_type, (char *)PFK_UFS)) {
+ if (async)
+ entry->loaded_ref_cnt++;
+ } else {
entry->loaded_ref_cnt++;
-
+ }
}
break;
case (ACTIVE_ICE_PRELOAD):
@@ -561,9 +565,12 @@ int pfk_kc_load_key_start(const unsigned char *key, size_t key_size,
case (ACTIVE_ICE_LOADED):
kc_update_timestamp(entry);
- if (async && (!strcmp(s_type,
- (char *)PFK_UFS)))
+ if (!strcmp(s_type, (char *)PFK_UFS)) {
+ if (async)
+ entry->loaded_ref_cnt++;
+ } else {
entry->loaded_ref_cnt++;
+ }
break;
case(SCM_ERROR):
ret = entry->scm_error;
@@ -621,36 +628,24 @@ void pfk_kc_load_key_end(const unsigned char *key, size_t key_size,
return;
}
- if (!strcmp(s_type, (char *)PFK_UFS)) {
- ref_cnt = --entry->loaded_ref_cnt;
+ ref_cnt = --entry->loaded_ref_cnt;
- if (ref_cnt < 0)
- pr_err("internal error, ref count should never be negative\n");
+ if (ref_cnt < 0)
+ pr_err("internal error, ref count should never be negative\n");
- if (!ref_cnt) {
- entry->state = INACTIVE;
- /*
- * wake-up invalidation if it's waiting
- * for the entry to be released
- */
- if (entry->thread_pending) {
- tmp_pending = entry->thread_pending;
- entry->thread_pending = NULL;
-
- kc_spin_unlock();
- wake_up_process(tmp_pending);
- return;
- }
- }
- } else {
+ if (!ref_cnt) {
entry->state = INACTIVE;
/*
- * wake-up invalidation if it's waiting
- * for the entry to be released
- */
+ * wake-up invalidation if it's waiting
+ * for the entry to be released
+ */
if (entry->thread_pending) {
- wake_up_process(entry->thread_pending);
+ tmp_pending = entry->thread_pending;
entry->thread_pending = NULL;
+
+ kc_spin_unlock();
+ wake_up_process(tmp_pending);
+ return;
}
}
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 5ab9d1e3e2b8..d0221769ba52 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -740,6 +740,7 @@ static int selinux_set_mnt_opts(struct super_block *sb,
sbsec->flags |= SE_SBPROC | SE_SBGENFS;
if (!strcmp(sb->s_type->name, "debugfs") ||
+ !strcmp(sb->s_type->name, "tracefs") ||
!strcmp(sb->s_type->name, "sysfs") ||
!strcmp(sb->s_type->name, "pstore"))
sbsec->flags |= SE_SBGENFS;
diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
index 373fcad840ea..776dffa88aee 100644
--- a/sound/pci/hda/hda_codec.h
+++ b/sound/pci/hda/hda_codec.h
@@ -294,6 +294,8 @@ struct hda_codec {
#define list_for_each_codec(c, bus) \
list_for_each_entry(c, &(bus)->core.codec_list, core.list)
+#define list_for_each_codec_safe(c, n, bus) \
+ list_for_each_entry_safe(c, n, &(bus)->core.codec_list, core.list)
/* snd_hda_codec_read/write optional flags */
#define HDA_RW_NO_RESPONSE_FALLBACK (1 << 0)
diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
index 5baf8b56b6e7..9c6e10fb479f 100644
--- a/sound/pci/hda/hda_controller.c
+++ b/sound/pci/hda/hda_controller.c
@@ -1128,8 +1128,12 @@ EXPORT_SYMBOL_GPL(azx_probe_codecs);
/* configure each codec instance */
int azx_codec_configure(struct azx *chip)
{
- struct hda_codec *codec;
- list_for_each_codec(codec, &chip->bus) {
+ struct hda_codec *codec, *next;
+
+ /* use _safe version here since snd_hda_codec_configure() deregisters
+ * the device upon error and deletes itself from the bus list.
+ */
+ list_for_each_codec_safe(codec, next, &chip->bus) {
snd_hda_codec_configure(codec);
}
return 0;
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index dc2fa576d60d..689df78f640a 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -3190,6 +3190,7 @@ static int check_dyn_adc_switch(struct hda_codec *codec)
spec->input_paths[i][nums]);
spec->input_paths[i][nums] =
spec->input_paths[i][n];
+ spec->input_paths[i][n] = 0;
}
}
nums++;
diff --git a/sound/soc/codecs/msm_sdw/msm_sdw_cdc.c b/sound/soc/codecs/msm_sdw/msm_sdw_cdc.c
index b91d13c8e010..58df020811f1 100644
--- a/sound/soc/codecs/msm_sdw/msm_sdw_cdc.c
+++ b/sound/soc/codecs/msm_sdw/msm_sdw_cdc.c
@@ -1039,7 +1039,6 @@ static int msm_sdw_swrm_read(void *handle, int reg)
__func__, reg);
sdw_rd_addr_base = MSM_SDW_AHB_BRIDGE_RD_ADDR_0;
sdw_rd_data_base = MSM_SDW_AHB_BRIDGE_RD_DATA_0;
-
/*
* Add sleep as SWR slave access read takes time.
* Allow for RD_DONE to complete for previous register if any.
@@ -1054,6 +1053,8 @@ static int msm_sdw_swrm_read(void *handle, int reg)
dev_err(msm_sdw->dev, "%s: RD Addr Failure\n", __func__);
goto err;
}
+ /* Add sleep for SWR register read value to get updated. */
+ usleep_range(100, 105);
/* Check for RD value */
ret = regmap_bulk_read(msm_sdw->regmap, sdw_rd_data_base,
(u8 *)&val, 4);
@@ -1079,12 +1080,12 @@ static int msm_sdw_bulk_write(struct msm_sdw_priv *msm_sdw,
sdw_wr_addr_base = MSM_SDW_AHB_BRIDGE_WR_ADDR_0;
sdw_wr_data_base = MSM_SDW_AHB_BRIDGE_WR_DATA_0;
- /*
- * Add sleep as SWR slave write takes time.
- * Allow for any previous pending write to complete.
- */
- usleep_range(50, 55);
for (i = 0; i < len; i += 2) {
+ /*
+ * Add sleep as SWR slave write takes time.
+ * Allow for any previous pending write to complete.
+ */
+ usleep_range(100, 105);
/* First Write the Data to register */
ret = regmap_bulk_write(msm_sdw->regmap,
sdw_wr_data_base, bulk_reg[i].buf, 4);
diff --git a/sound/soc/codecs/wcd-mbhc-v2.c b/sound/soc/codecs/wcd-mbhc-v2.c
index 3d131ed463c9..997a623033fb 100644
--- a/sound/soc/codecs/wcd-mbhc-v2.c
+++ b/sound/soc/codecs/wcd-mbhc-v2.c
@@ -668,7 +668,8 @@ static void wcd_mbhc_report_plug(struct wcd_mbhc *mbhc, int insertion,
jack_type == SND_JACK_LINEOUT) &&
(mbhc->hph_status && mbhc->hph_status != jack_type)) {
- if (mbhc->micbias_enable) {
+ if (mbhc->micbias_enable &&
+ mbhc->hph_status == SND_JACK_HEADSET) {
if (mbhc->mbhc_cb->mbhc_micbias_control)
mbhc->mbhc_cb->mbhc_micbias_control(
codec, MIC_BIAS_2,
diff --git a/sound/soc/codecs/wcd9335.c b/sound/soc/codecs/wcd9335.c
index 2ab787f57b31..10883b0939d6 100644
--- a/sound/soc/codecs/wcd9335.c
+++ b/sound/soc/codecs/wcd9335.c
@@ -12127,8 +12127,10 @@ static int tasha_dig_core_power_collapse(struct tasha_priv *tasha,
goto unlock_mutex;
if (tasha->power_active_ref < 0) {
- dev_dbg(tasha->dev, "%s: power_active_ref is negative\n",
+ dev_info(tasha->dev,
+ "%s: power_active_ref is negative, resetting it\n",
__func__);
+ tasha->power_active_ref = 0;
goto unlock_mutex;
}
diff --git a/sound/soc/codecs/wsa881x.c b/sound/soc/codecs/wsa881x.c
index fe975a7dbe4e..058f6c5fa676 100644
--- a/sound/soc/codecs/wsa881x.c
+++ b/sound/soc/codecs/wsa881x.c
@@ -1365,6 +1365,7 @@ static int wsa881x_swr_reset(struct swr_device *pdev)
/* Retry after 1 msec delay */
usleep_range(1000, 1100);
}
+ pdev->dev_num = devnum;
regcache_mark_dirty(wsa881x->regmap);
regcache_sync(wsa881x->regmap);
return 0;
diff --git a/sound/soc/msm/apq8096-auto.c b/sound/soc/msm/apq8096-auto.c
index 1d0fc93eb77f..b1dff8764618 100644
--- a/sound/soc/msm/apq8096-auto.c
+++ b/sound/soc/msm/apq8096-auto.c
@@ -3044,21 +3044,22 @@ static struct snd_soc_dai_link apq8096_common_dai_links[] = {
.codec_name = "snd-soc-dummy",
},
{
- .name = "SLIMBUS_4 Hostless",
- .stream_name = "SLIMBUS_4 Hostless",
- .cpu_dai_name = "SLIMBUS4_HOSTLESS",
- .platform_name = "msm-pcm-hostless",
+ .name = "MSM8996 HFP RX",
+ .stream_name = "MultiMedia21",
+ .cpu_dai_name = "MultiMedia21",
+ .platform_name = "msm-pcm-loopback",
.dynamic = 1,
.dpcm_playback = 1,
.dpcm_capture = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
.trigger = {SND_SOC_DPCM_TRIGGER_POST,
SND_SOC_DPCM_TRIGGER_POST},
- .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
.ignore_suspend = 1,
- /* this dailink has playback support */
+ .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+ /* this dainlink has playback support */
.ignore_pmdown_time = 1,
- .codec_dai_name = "snd-soc-dummy-dai",
- .codec_name = "snd-soc-dummy",
+ .be_id = MSM_FRONTEND_DAI_MULTIMEDIA21,
},
{
.name = "VoLTE",
diff --git a/sound/soc/msm/msm-dai-fe.c b/sound/soc/msm/msm-dai-fe.c
index a0ff2e2c64c8..5facafdc7729 100644
--- a/sound/soc/msm/msm-dai-fe.c
+++ b/sound/soc/msm/msm-dai-fe.c
@@ -2654,6 +2654,154 @@ static struct snd_soc_dai_driver msm_fe_dais[] = {
.name = "MultiMedia20",
.probe = fe_dai_probe,
},
+ {
+ .playback = {
+ .stream_name = "MultiMedia21 Playback",
+ .aif_name = "MM_DL21",
+ .rates = (SNDRV_PCM_RATE_8000_384000 |
+ SNDRV_PCM_RATE_KNOT),
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S24_3LE |
+ SNDRV_PCM_FMTBIT_S32_LE),
+ .channels_min = 1,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 384000,
+ },
+ .capture = {
+ .stream_name = "MultiMedia21 Capture",
+ .aif_name = "MM_UL21",
+ .rates = (SNDRV_PCM_RATE_8000_48000|
+ SNDRV_PCM_RATE_KNOT),
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S24_3LE |
+ SNDRV_PCM_FMTBIT_S32_LE),
+ .channels_min = 1,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+ .ops = &msm_fe_Multimedia_dai_ops,
+ .name = "MultiMedia21",
+ .probe = fe_dai_probe,
+ },
+ {
+ .playback = {
+ .stream_name = "MultiMedia22 Playback",
+ .aif_name = "MM_DL22",
+ .rates = (SNDRV_PCM_RATE_8000_384000 |
+ SNDRV_PCM_RATE_KNOT),
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S24_3LE |
+ SNDRV_PCM_FMTBIT_S32_LE),
+ .channels_min = 1,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 384000,
+ },
+ .ops = &msm_fe_Multimedia_dai_ops,
+ .name = "MultiMedia22",
+ .probe = fe_dai_probe,
+ },
+ {
+ .playback = {
+ .stream_name = "MultiMedia23 Playback",
+ .aif_name = "MM_DL23",
+ .rates = (SNDRV_PCM_RATE_8000_384000 |
+ SNDRV_PCM_RATE_KNOT),
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S24_3LE |
+ SNDRV_PCM_FMTBIT_S32_LE),
+ .channels_min = 1,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 384000,
+ },
+ .ops = &msm_fe_Multimedia_dai_ops,
+ .name = "MultiMedia23",
+ .probe = fe_dai_probe,
+ },
+ {
+ .playback = {
+ .stream_name = "MultiMedia24 Playback",
+ .aif_name = "MM_DL24",
+ .rates = (SNDRV_PCM_RATE_8000_384000 |
+ SNDRV_PCM_RATE_KNOT),
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S24_3LE |
+ SNDRV_PCM_FMTBIT_S32_LE),
+ .channels_min = 1,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 384000,
+ },
+ .ops = &msm_fe_Multimedia_dai_ops,
+ .name = "MultiMedia24",
+ .probe = fe_dai_probe,
+ },
+ {
+ .playback = {
+ .stream_name = "MultiMedia25 Playback",
+ .aif_name = "MM_DL25",
+ .rates = (SNDRV_PCM_RATE_8000_384000 |
+ SNDRV_PCM_RATE_KNOT),
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S24_3LE |
+ SNDRV_PCM_FMTBIT_S32_LE),
+ .channels_min = 1,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 384000,
+ },
+ .ops = &msm_fe_Multimedia_dai_ops,
+ .name = "MultiMedia25",
+ .probe = fe_dai_probe,
+ },
+ {
+ .playback = {
+ .stream_name = "MultiMedia26 Playback",
+ .aif_name = "MM_DL26",
+ .rates = (SNDRV_PCM_RATE_8000_384000 |
+ SNDRV_PCM_RATE_KNOT),
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S24_3LE |
+ SNDRV_PCM_FMTBIT_S32_LE),
+ .channels_min = 1,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 384000,
+ },
+ .ops = &msm_fe_Multimedia_dai_ops,
+ .compress_new = snd_soc_new_compress,
+ .name = "MultiMedia26",
+ .probe = fe_dai_probe,
+ },
+ {
+ .capture = {
+ .stream_name = "MultiMedia27 Capture",
+ .aif_name = "MM_UL27",
+ .rates = (SNDRV_PCM_RATE_8000_192000|
+ SNDRV_PCM_RATE_KNOT),
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S24_3LE),
+ .channels_min = 1,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ },
+ .ops = &msm_fe_Multimedia_dai_ops,
+ .compress_new = snd_soc_new_compress,
+ .name = "MultiMedia27",
+ .probe = fe_dai_probe,
+ },
};
static int msm_fe_dai_dev_probe(struct platform_device *pdev)
diff --git a/sound/soc/msm/msm8998.c b/sound/soc/msm/msm8998.c
index c287e31844a8..b8bd32e046a3 100644
--- a/sound/soc/msm/msm8998.c
+++ b/sound/soc/msm/msm8998.c
@@ -397,7 +397,9 @@ static struct dev_config aux_pcm_tx_cfg[] = {
};
static int msm_vi_feed_tx_ch = 2;
-static const char *const slim_rx_ch_text[] = {"One", "Two"};
+static const char *const slim_rx_ch_text[] = {"One", "Two", "Three", "Four",
+ "Five", "Six", "Seven",
+ "Eight"};
static const char *const slim_tx_ch_text[] = {"One", "Two", "Three", "Four",
"Five", "Six", "Seven",
"Eight"};
@@ -555,6 +557,13 @@ static struct snd_soc_dapm_route wcd_audio_paths[] = {
{"MIC BIAS4", NULL, "MCLK"},
};
+static u32 mi2s_ebit_clk[MI2S_MAX] = {
+ Q6AFE_LPASS_CLK_ID_PRI_MI2S_EBIT,
+ Q6AFE_LPASS_CLK_ID_SEC_MI2S_EBIT,
+ Q6AFE_LPASS_CLK_ID_TER_MI2S_EBIT,
+ Q6AFE_LPASS_CLK_ID_QUAD_MI2S_EBIT,
+};
+
static struct afe_clk_set mi2s_clk[MI2S_MAX] = {
{
AFE_API_VERSION_I2S_CONFIG,
@@ -4506,6 +4515,11 @@ static int msm_mi2s_snd_startup(struct snd_pcm_substream *substream)
*/
mutex_lock(&mi2s_intf_conf[index].lock);
if (++mi2s_intf_conf[index].ref_cnt == 1) {
+ /* Check if msm needs to provide the clock to the interface */
+ if (!mi2s_intf_conf[index].msm_is_mi2s_master) {
+ fmt = SND_SOC_DAIFMT_CBM_CFM;
+ mi2s_clk[index].clk_id = mi2s_ebit_clk[index];
+ }
ret = msm_mi2s_set_sclk(substream, true);
if (IS_ERR_VALUE(ret)) {
dev_err(rtd->card->dev,
@@ -4525,9 +4539,6 @@ static int msm_mi2s_snd_startup(struct snd_pcm_substream *substream)
ret = -EINVAL;
goto clk_off;
}
- /* Check if msm needs to provide the clock to the interface */
- if (!mi2s_intf_conf[index].msm_is_mi2s_master)
- fmt = SND_SOC_DAIFMT_CBM_CFM;
ret = snd_soc_dai_set_fmt(cpu_dai, fmt);
if (IS_ERR_VALUE(ret)) {
pr_err("%s: set fmt cpu dai failed for MI2S (%d), err:%d\n",
@@ -5499,7 +5510,7 @@ static struct snd_soc_dai_link msm_common_misc_fe_dai_links[] = {
{
.name = MSM_DAILINK_NAME(Transcode Loopback Playback),
.stream_name = "Transcode Loopback Playback",
- .cpu_dai_name = "MultiMedia14",
+ .cpu_dai_name = "MultiMedia26",
.platform_name = "msm-transcode-loopback",
.dynamic = 1,
.dpcm_playback = 1,
@@ -5510,12 +5521,12 @@ static struct snd_soc_dai_link msm_common_misc_fe_dai_links[] = {
.ignore_suspend = 1,
.ignore_pmdown_time = 1,
/* this dainlink has playback support */
- .be_id = MSM_FRONTEND_DAI_MULTIMEDIA14,
+ .be_id = MSM_FRONTEND_DAI_MULTIMEDIA26,
},
{
.name = MSM_DAILINK_NAME(Transcode Loopback Capture),
.stream_name = "Transcode Loopback Capture",
- .cpu_dai_name = "MultiMedia18",
+ .cpu_dai_name = "MultiMedia27",
.platform_name = "msm-transcode-loopback",
.dynamic = 1,
.dpcm_capture = 1,
@@ -5525,7 +5536,92 @@ static struct snd_soc_dai_link msm_common_misc_fe_dai_links[] = {
.codec_name = "snd-soc-dummy",
.ignore_suspend = 1,
.ignore_pmdown_time = 1,
- .be_id = MSM_FRONTEND_DAI_MULTIMEDIA18,
+ .be_id = MSM_FRONTEND_DAI_MULTIMEDIA27,
+ },
+ {
+ .name = "MultiMedia21",
+ .stream_name = "MultiMedia21",
+ .cpu_dai_name = "MultiMedia21",
+ .platform_name = "msm-pcm-dsp.0",
+ .dynamic = 1,
+ .async_ops = ASYNC_DPCM_SND_SOC_PREPARE,
+ .dpcm_playback = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .ignore_suspend = 1,
+ /* this dainlink has playback support */
+ .ignore_pmdown_time = 1,
+ .be_id = MSM_FRONTEND_DAI_MULTIMEDIA21,
+ },
+ {
+ .name = "MultiMedia22",
+ .stream_name = "MultiMedia22",
+ .cpu_dai_name = "MultiMedia22",
+ .platform_name = "msm-pcm-dsp.0",
+ .dynamic = 1,
+ .async_ops = ASYNC_DPCM_SND_SOC_PREPARE,
+ .dpcm_playback = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .ignore_suspend = 1,
+ /* this dainlink has playback support */
+ .ignore_pmdown_time = 1,
+ .be_id = MSM_FRONTEND_DAI_MULTIMEDIA22,
+ },
+ {
+ .name = "MultiMedia23",
+ .stream_name = "MultiMedia23",
+ .cpu_dai_name = "MultiMedia23",
+ .platform_name = "msm-pcm-dsp.0",
+ .dynamic = 1,
+ .async_ops = ASYNC_DPCM_SND_SOC_PREPARE,
+ .dpcm_playback = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .ignore_suspend = 1,
+ /* this dainlink has playback support */
+ .ignore_pmdown_time = 1,
+ .be_id = MSM_FRONTEND_DAI_MULTIMEDIA23,
+ },
+ {
+ .name = "MultiMedia24",
+ .stream_name = "MultiMedia24",
+ .cpu_dai_name = "MultiMedia24",
+ .platform_name = "msm-pcm-dsp.0",
+ .dynamic = 1,
+ .async_ops = ASYNC_DPCM_SND_SOC_PREPARE,
+ .dpcm_playback = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .ignore_suspend = 1,
+ /* this dainlink has playback support */
+ .ignore_pmdown_time = 1,
+ .be_id = MSM_FRONTEND_DAI_MULTIMEDIA24,
+ },
+ {
+ .name = "MultiMedia25",
+ .stream_name = "MultiMedia25",
+ .cpu_dai_name = "MultiMedia25",
+ .platform_name = "msm-pcm-dsp.0",
+ .dynamic = 1,
+ .async_ops = ASYNC_DPCM_SND_SOC_PREPARE,
+ .dpcm_playback = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .ignore_suspend = 1,
+ /* this dainlink has playback support */
+ .ignore_pmdown_time = 1,
+ .be_id = MSM_FRONTEND_DAI_MULTIMEDIA25,
},
};
diff --git a/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
index fa8bdddacef2..3610901addea 100644
--- a/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
@@ -5147,6 +5147,27 @@ static int msm_dai_tdm_q6_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "%s: Clk Rate from DT file %d\n",
__func__, tdm_clk_set.clk_freq_in_hz);
+ /* initialize static tdm clk attribute to default value */
+ tdm_clk_set.clk_attri = Q6AFE_LPASS_CLK_ATTRIBUTE_INVERT_COUPLE_NO;
+
+ /* extract tdm clk attribute into static */
+ if (of_find_property(pdev->dev.of_node,
+ "qcom,msm-cpudai-tdm-clk-attribute", NULL)) {
+ rc = of_property_read_u16(pdev->dev.of_node,
+ "qcom,msm-cpudai-tdm-clk-attribute",
+ &tdm_clk_set.clk_attri);
+ if (rc) {
+ dev_err(&pdev->dev, "%s: clk attribute from DT file %s\n",
+ __func__, "qcom,msm-cpudai-tdm-clk-attribute");
+ goto rtn;
+ }
+ dev_dbg(&pdev->dev, "%s: clk attribute from DT file %d\n",
+ __func__, tdm_clk_set.clk_attri);
+ } else {
+ dev_dbg(&pdev->dev, "%s: No optional clk attribute found\n",
+ __func__);
+ }
+
/* extract tdm clk src master/slave info into static */
rc = of_property_read_u32(pdev->dev.of_node,
"qcom,msm-cpudai-tdm-clk-internal",
@@ -6226,6 +6247,41 @@ static int msm_dai_q6_tdm_set_tdm_slot(struct snd_soc_dai *dai,
return rc;
}
+static int msm_dai_q6_tdm_set_sysclk(struct snd_soc_dai *dai,
+ int clk_id, unsigned int freq, int dir)
+{
+ struct msm_dai_q6_tdm_dai_data *dai_data =
+ dev_get_drvdata(dai->dev);
+
+ switch (dai->id) {
+ case AFE_PORT_ID_PRIMARY_TDM_RX:
+ case AFE_PORT_ID_PRIMARY_TDM_RX_1:
+ case AFE_PORT_ID_PRIMARY_TDM_RX_2:
+ case AFE_PORT_ID_PRIMARY_TDM_RX_3:
+ case AFE_PORT_ID_PRIMARY_TDM_RX_4:
+ case AFE_PORT_ID_PRIMARY_TDM_RX_5:
+ case AFE_PORT_ID_PRIMARY_TDM_RX_6:
+ case AFE_PORT_ID_PRIMARY_TDM_RX_7:
+ case AFE_PORT_ID_PRIMARY_TDM_TX:
+ case AFE_PORT_ID_PRIMARY_TDM_TX_1:
+ case AFE_PORT_ID_PRIMARY_TDM_TX_2:
+ case AFE_PORT_ID_PRIMARY_TDM_TX_3:
+ case AFE_PORT_ID_PRIMARY_TDM_TX_4:
+ case AFE_PORT_ID_PRIMARY_TDM_TX_5:
+ case AFE_PORT_ID_PRIMARY_TDM_TX_6:
+ case AFE_PORT_ID_PRIMARY_TDM_TX_7:
+ dai_data->clk_set.clk_freq_in_hz = freq;
+ break;
+ default:
+ return 0;
+ }
+
+ dev_dbg(dai->dev, "%s: dai id = 0x%x group clk_freq %d\n",
+ __func__, dai->id, freq);
+ return 0;
+}
+
+
static int msm_dai_q6_tdm_set_channel_map(struct snd_soc_dai *dai,
unsigned int tx_num, unsigned int *tx_slot,
unsigned int rx_num, unsigned int *rx_slot)
@@ -6653,6 +6709,7 @@ static struct snd_soc_dai_ops msm_dai_q6_tdm_ops = {
.hw_params = msm_dai_q6_tdm_hw_params,
.set_tdm_slot = msm_dai_q6_tdm_set_tdm_slot,
.set_channel_map = msm_dai_q6_tdm_set_channel_map,
+ .set_sysclk = msm_dai_q6_tdm_set_sysclk,
.shutdown = msm_dai_q6_tdm_shutdown,
};
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-q6-noirq.c b/sound/soc/msm/qdsp6v2/msm-pcm-q6-noirq.c
index 6b026bafa276..276270258771 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-q6-noirq.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-q6-noirq.c
@@ -30,9 +30,12 @@
#include <sound/control.h>
#include <sound/q6audio-v2.h>
#include <sound/timer.h>
+#include <sound/hwdep.h>
+
#include <asm/dma.h>
#include <sound/tlv.h>
#include <sound/pcm_params.h>
+#include <sound/devdep_params.h>
#include "msm-pcm-q6-v2.h"
#include "msm-pcm-routing-v2.h"
@@ -421,6 +424,42 @@ static int msm_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
return ret;
}
+
+static int msm_pcm_mmap_fd(struct snd_pcm_substream *substream,
+ struct snd_pcm_mmap_fd *mmap_fd)
+{
+ struct msm_audio *prtd;
+ struct audio_port_data *apd;
+ struct audio_buffer *ab;
+ int dir = -1;
+
+ if (!substream->runtime) {
+ pr_err("%s substream runtime not found\n", __func__);
+ return -EFAULT;
+ }
+
+ prtd = substream->runtime->private_data;
+ if (!prtd || !prtd->audio_client || !prtd->mmap_flag) {
+ pr_err("%s no audio client or not an mmap session\n", __func__);
+ return -EINVAL;
+ }
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ dir = IN;
+ else
+ dir = OUT;
+
+ apd = prtd->audio_client->port;
+ ab = &(apd[dir].buf[0]);
+ mmap_fd->fd = ion_share_dma_buf_fd(ab->client, ab->handle);
+ if (mmap_fd->fd >= 0) {
+ mmap_fd->dir = dir;
+ mmap_fd->actual_size = ab->actual_size;
+ mmap_fd->size = ab->size;
+ }
+ return mmap_fd->fd < 0 ? -EFAULT : 0;
+}
+
static int msm_pcm_ioctl(struct snd_pcm_substream *substream,
unsigned int cmd, void *arg)
{
@@ -445,6 +484,15 @@ static int msm_pcm_ioctl(struct snd_pcm_substream *substream,
return snd_pcm_lib_ioctl(substream, cmd, arg);
}
+#ifdef CONFIG_COMPAT
+static int msm_pcm_compat_ioctl(struct snd_pcm_substream *substream,
+ unsigned int cmd, void *arg)
+{
+ /* we only handle RESET which is common for both modes */
+ return msm_pcm_ioctl(substream, cmd, arg);
+}
+#endif
+
static snd_pcm_uframes_t msm_pcm_pointer(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
@@ -994,6 +1042,101 @@ static int msm_pcm_add_app_type_controls(struct snd_soc_pcm_runtime *rtd)
return 0;
}
+static int msm_pcm_hwdep_ioctl(struct snd_hwdep *hw, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ int ret = 0;
+ struct snd_pcm *pcm = hw->private_data;
+ struct snd_pcm_mmap_fd __user *_mmap_fd = NULL;
+ struct snd_pcm_mmap_fd mmap_fd;
+ struct snd_pcm_substream *substream = NULL;
+ int32_t dir = -1;
+
+ switch (cmd) {
+ case SNDRV_PCM_IOCTL_MMAP_DATA_FD:
+ _mmap_fd = (struct snd_pcm_mmap_fd __user *)arg;
+ if (get_user(dir, (int32_t __user *)&(_mmap_fd->dir))) {
+ pr_err("%s: error copying mmap_fd from user\n",
+ __func__);
+ ret = -EFAULT;
+ break;
+ }
+ if (dir != OUT && dir != IN) {
+ pr_err("%s invalid stream dir\n", __func__);
+ ret = -EINVAL;
+ break;
+ }
+ substream = pcm->streams[dir].substream;
+ if (!substream) {
+ pr_err("%s substream not found\n", __func__);
+ ret = -ENODEV;
+ break;
+ }
+ pr_debug("%s : %s MMAP Data fd\n", __func__,
+ dir == 0 ? "P" : "C");
+ if (msm_pcm_mmap_fd(substream, &mmap_fd) < 0) {
+ pr_err("%s: error getting fd\n",
+ __func__);
+ ret = -EFAULT;
+ break;
+ }
+ if (put_user(mmap_fd.fd, &_mmap_fd->fd) ||
+ put_user(mmap_fd.size, &_mmap_fd->size) ||
+ put_user(mmap_fd.actual_size, &_mmap_fd->actual_size)) {
+ pr_err("%s: error copying fd\n", __func__);
+ return -EFAULT;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+#ifdef CONFIG_COMPAT
+static int msm_pcm_hwdep_compat_ioctl(struct snd_hwdep *hw,
+ struct file *file,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ /* we only support mmap fd. Handling is common in both modes */
+ return msm_pcm_hwdep_ioctl(hw, file, cmd, arg);
+}
+#else
+static int msm_pcm_hwdep_compat_ioctl(struct snd_hwdep *hw,
+ struct file *file,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ return -EINVAL;
+}
+#endif
+
+static int msm_pcm_add_hwdep_dev(struct snd_soc_pcm_runtime *runtime)
+{
+ struct snd_hwdep *hwdep;
+ int rc;
+ char id[] = "NOIRQ_NN";
+
+ snprintf(id, sizeof(id), "NOIRQ_%d", runtime->pcm->device);
+ pr_debug("%s: pcm dev %d\n", __func__, runtime->pcm->device);
+ rc = snd_hwdep_new(runtime->card->snd_card,
+ &id[0],
+ HWDEP_FE_BASE + runtime->pcm->device,
+ &hwdep);
+ if (!hwdep || rc < 0) {
+ pr_err("%s: hwdep intf failed to create %s - hwdep\n", __func__,
+ id);
+ return rc;
+ }
+
+ hwdep->iface = SNDRV_HWDEP_IFACE_AUDIO_BE; /* for lack of a FE iface */
+ hwdep->private_data = runtime->pcm; /* of type struct snd_pcm */
+ hwdep->ops.ioctl = msm_pcm_hwdep_ioctl;
+ hwdep->ops.ioctl_compat = msm_pcm_hwdep_compat_ioctl;
+ return 0;
+}
static int msm_asoc_pcm_new(struct snd_soc_pcm_runtime *rtd)
{
@@ -1027,7 +1170,9 @@ static int msm_asoc_pcm_new(struct snd_soc_pcm_runtime *rtd)
pr_err("%s: Could not add app type controls failed %d\n",
__func__, ret);
}
-
+ ret = msm_pcm_add_hwdep_dev(rtd);
+ if (ret)
+ pr_err("%s: Could not add hw dep node\n", __func__);
pcm->nonatomic = true;
exit:
return ret;
@@ -1040,6 +1185,9 @@ static struct snd_pcm_ops msm_pcm_ops = {
.copy = msm_pcm_copy,
.hw_params = msm_pcm_hw_params,
.ioctl = msm_pcm_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = msm_pcm_compat_ioctl,
+#endif
.trigger = msm_pcm_trigger,
.pointer = msm_pcm_pointer,
.mmap = msm_pcm_mmap,
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c
index 46a3324d2d6b..de769e8b806c 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c
@@ -1603,6 +1603,262 @@ done:
return ret;
}
+static int msm_pcm_playback_pan_scale_ctl_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int ret = 0;
+ int len = 0;
+ int i = 0;
+ struct snd_pcm_usr *usr_info = snd_kcontrol_chip(kcontrol);
+ struct snd_pcm_substream *substream;
+ struct msm_audio *prtd;
+ struct asm_stream_pan_ctrl_params pan_param;
+
+ if (!usr_info) {
+ pr_err("%s: usr_info is null\n", __func__);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ substream = usr_info->pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
+ if (!substream) {
+ pr_err("%s substream not found\n", __func__);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ if (!substream->runtime) {
+ pr_err("%s substream runtime not found\n", __func__);
+ ret = -EINVAL;
+ goto done;
+ }
+ prtd = substream->runtime->private_data;
+ if (!prtd) {
+ ret = -EINVAL;
+ goto done;
+ }
+ pan_param.num_output_channels =
+ ucontrol->value.integer.value[len++];
+ if (pan_param.num_output_channels >
+ PCM_FORMAT_MAX_NUM_CHANNEL) {
+ ret = -EINVAL;
+ goto done;
+ }
+ pan_param.num_input_channels =
+ ucontrol->value.integer.value[len++];
+ if (pan_param.num_input_channels >
+ PCM_FORMAT_MAX_NUM_CHANNEL) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ if (ucontrol->value.integer.value[len++]) {
+ for (i = 0; i < pan_param.num_output_channels; i++) {
+ pan_param.output_channel_map[i] =
+ ucontrol->value.integer.value[len++];
+ }
+ }
+ if (ucontrol->value.integer.value[len++]) {
+ for (i = 0; i < pan_param.num_input_channels; i++) {
+ pan_param.input_channel_map[i] =
+ ucontrol->value.integer.value[len++];
+ }
+ }
+ if (ucontrol->value.integer.value[len++]) {
+ for (i = 0; i < pan_param.num_output_channels *
+ pan_param.num_input_channels; i++) {
+ pan_param.gain[i] =
+ !(ucontrol->value.integer.value[len++] > 0) ?
+ 0 : 2 << 13;
+ }
+ }
+
+ ret = q6asm_set_mfc_panning_params(prtd->audio_client,
+ &pan_param);
+ len -= pan_param.num_output_channels *
+ pan_param.num_input_channels;
+ for (i = 0; i < pan_param.num_output_channels *
+ pan_param.num_input_channels; i++) {
+ pan_param.gain[i] =
+ ucontrol->value.integer.value[len++];
+ }
+ ret = q6asm_set_vol_ctrl_gain_pair(prtd->audio_client,
+ &pan_param);
+
+done:
+ return ret;
+}
+
+static int msm_pcm_playback_pan_scale_ctl_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ return 0;
+}
+
+static int msm_add_stream_pan_scale_controls(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_pcm *pcm;
+ struct snd_pcm_usr *pan_ctl_info;
+ struct snd_kcontrol *kctl;
+ const char *playback_mixer_ctl_name = "Audio Stream";
+ const char *deviceNo = "NN";
+ const char *suffix = "Pan Scale Control";
+ int ctl_len, ret = 0;
+
+ if (!rtd) {
+ pr_err("%s: rtd is NULL\n", __func__);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ pcm = rtd->pcm;
+ ctl_len = strlen(playback_mixer_ctl_name) + 1 + strlen(deviceNo) + 1 +
+ strlen(suffix) + 1;
+
+ ret = snd_pcm_add_usr_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK,
+ NULL, 1, ctl_len, rtd->dai_link->be_id,
+ &pan_ctl_info);
+
+ if (ret < 0) {
+ pr_err("%s: failed add ctl %s. err = %d\n",
+ __func__, suffix, ret);
+ goto done;
+ }
+ kctl = pan_ctl_info->kctl;
+ snprintf(kctl->id.name, ctl_len, "%s %d %s", playback_mixer_ctl_name,
+ rtd->pcm->device, suffix);
+ kctl->put = msm_pcm_playback_pan_scale_ctl_put;
+ kctl->get = msm_pcm_playback_pan_scale_ctl_get;
+ pr_debug("%s: Registering new mixer ctl = %s\n", __func__,
+ kctl->id.name);
+done:
+ return ret;
+
+}
+
+static int msm_pcm_playback_dnmix_ctl_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ return 0;
+}
+
+static int msm_pcm_playback_dnmix_ctl_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int ret = 0;
+ int len = 0;
+ int i = 0;
+ struct snd_pcm_usr *usr_info = snd_kcontrol_chip(kcontrol);
+ struct snd_pcm_substream *substream;
+ struct msm_audio *prtd;
+ struct asm_stream_pan_ctrl_params dnmix_param;
+
+ int be_id = ucontrol->value.integer.value[len];
+ int stream_id = 0;
+
+ if (!usr_info) {
+ pr_err("%s usr_info is null\n", __func__);
+ ret = -EINVAL;
+ goto done;
+ }
+ substream = usr_info->pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
+ if (!substream) {
+ pr_err("%s substream not found\n", __func__);
+ ret = -EINVAL;
+ goto done;
+ }
+ if (!substream->runtime) {
+ pr_err("%s substream runtime not found\n", __func__);
+ ret = -EINVAL;
+ goto done;
+ }
+ prtd = substream->runtime->private_data;
+ if (!prtd) {
+ ret = -EINVAL;
+ goto done;
+ }
+ stream_id = prtd->audio_client->session;
+ dnmix_param.num_output_channels =
+ ucontrol->value.integer.value[len++];
+ if (dnmix_param.num_output_channels >
+ PCM_FORMAT_MAX_NUM_CHANNEL) {
+ ret = -EINVAL;
+ goto done;
+ }
+ dnmix_param.num_input_channels =
+ ucontrol->value.integer.value[len++];
+ if (dnmix_param.num_input_channels >
+ PCM_FORMAT_MAX_NUM_CHANNEL) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ if (ucontrol->value.integer.value[len++]) {
+ for (i = 0; i < dnmix_param.num_output_channels; i++) {
+ dnmix_param.output_channel_map[i] =
+ ucontrol->value.integer.value[len++];
+ }
+ }
+ if (ucontrol->value.integer.value[len++]) {
+ for (i = 0; i < dnmix_param.num_input_channels; i++) {
+ dnmix_param.input_channel_map[i] =
+ ucontrol->value.integer.value[len++];
+ }
+ }
+ if (ucontrol->value.integer.value[len++]) {
+ for (i = 0; i < dnmix_param.num_output_channels *
+ dnmix_param.num_input_channels; i++) {
+ dnmix_param.gain[i] =
+ ucontrol->value.integer.value[len++];
+ }
+ }
+ msm_routing_set_downmix_control_data(be_id,
+ stream_id,
+ &dnmix_param);
+
+done:
+ return ret;
+}
+
+static int msm_add_device_down_mix_controls(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_pcm *pcm;
+ struct snd_pcm_usr *usr_info;
+ struct snd_kcontrol *kctl;
+ const char *playback_mixer_ctl_name = "Audio Device";
+ const char *deviceNo = "NN";
+ const char *suffix = "Downmix Control";
+ int ctl_len, ret = 0;
+
+ if (!rtd) {
+ pr_err("%s: rtd is NULL\n", __func__);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ pcm = rtd->pcm;
+ ctl_len = strlen(playback_mixer_ctl_name) + 1 +
+ strlen(deviceNo) + 1 + strlen(suffix) + 1;
+ ret = snd_pcm_add_usr_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK,
+ NULL, 1, ctl_len, rtd->dai_link->be_id,
+ &usr_info);
+ if (ret < 0) {
+ pr_err("%s: downmix control add failed: %d\n",
+ __func__, ret);
+ goto done;
+ }
+
+ kctl = usr_info->kctl;
+ snprintf(kctl->id.name, ctl_len, "%s %d %s",
+ playback_mixer_ctl_name, rtd->pcm->device, suffix);
+ kctl->put = msm_pcm_playback_dnmix_ctl_put;
+ kctl->get = msm_pcm_playback_dnmix_ctl_get;
+ pr_debug("%s: downmix control name = %s\n",
+ __func__, playback_mixer_ctl_name);
+done:
+ return ret;
+}
+
static int msm_pcm_capture_app_type_cfg_ctl_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
@@ -1719,6 +1975,14 @@ static int msm_pcm_add_controls(struct snd_soc_pcm_runtime *rtd)
if (ret)
pr_err("%s: pcm add app type controls failed:%d\n",
__func__, ret);
+ ret = msm_add_stream_pan_scale_controls(rtd);
+ if (ret)
+ pr_err("%s: pcm add pan scale controls failed:%d\n",
+ __func__, ret);
+ ret = msm_add_device_down_mix_controls(rtd);
+ if (ret)
+ pr_err("%s: pcm add dnmix controls failed:%d\n",
+ __func__, ret);
return ret;
}
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
index d20434bf8835..38937612dcce 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
@@ -35,6 +35,8 @@
#include <sound/audio_cal_utils.h>
#include <sound/audio_effects.h>
#include <sound/hwdep.h>
+#include <sound/q6adm-v2.h>
+#include <sound/apr_audio-v2.h>
#include "msm-pcm-routing-v2.h"
#include "msm-pcm-routing-devdep.h"
@@ -118,17 +120,13 @@ static const char * const lsm_port_text[] = {
};
struct msm_pcm_route_bdai_pp_params {
- u16 port_id; /* AFE port ID */
unsigned long pp_params_config;
bool mute_on;
int latency;
};
static struct msm_pcm_route_bdai_pp_params
- msm_bedais_pp_params[MSM_BACKEND_DAI_PP_PARAMS_REQ_MAX] = {
- {HDMI_RX, 0, 0, 0},
- {DISPLAY_PORT_RX, 0, 0, 0},
-};
+ msm_bedais_pp_params[MSM_BACKEND_DAI_MAX];
/*
* The be_dai_name_table is passed to HAL so that it can specify the
@@ -142,6 +140,7 @@ struct msm_pcm_route_bdai_name {
};
static struct msm_pcm_route_bdai_name be_dai_name_table[MSM_BACKEND_DAI_MAX];
+static bool msm_pcm_routing_test_pp_param(int be_idx, long param_bit);
static int msm_routing_send_device_pp_params(int port_id, int copp_idx,
int fe_id);
@@ -606,6 +605,27 @@ static struct msm_pcm_routing_fdai_data
/* MULTIMEDIA20 */
{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+ /* MULTIMEDIA21 */
+ {{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+ {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+ /* MULTIMEDIA22 */
+ {{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+ {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+ /* MULTIMEDIA23 */
+ {{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+ {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+ /* MULTIMEDIA24 */
+ {{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+ {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+ /* MULTIMEDIA25 */
+ {{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+ {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+ /* MULTIMEDIA26 */
+ {{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+ {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+ /* MULTIMEDIA27 */
+ {{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+ {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
/* CS_VOICE */
{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
@@ -953,7 +973,7 @@ static void msm_pcm_routing_build_matrix(int fedai_id, int sess_type,
uint32_t passthr_mode)
{
int i, port_type, j, num_copps = 0;
- struct route_payload payload;
+ struct route_payload payload = { {0} };
port_type = ((path_type == ADM_PATH_PLAYBACK ||
path_type == ADM_PATH_COMPRESSED_RX) ?
@@ -983,6 +1003,11 @@ static void msm_pcm_routing_build_matrix(int fedai_id, int sess_type,
fe_dai_app_type_cfg
[fedai_id][sess_type][i]
.sample_rate;
+ if (msm_pcm_routing_test_pp_param(i,
+ ADM_PP_PARAM_LIMITER_BIT))
+ set_bit(ADM_STATUS_LIMITER,
+ &payload.route_status
+ [num_copps]);
num_copps++;
}
}
@@ -1088,7 +1113,7 @@ int msm_pcm_routing_reg_phy_compr_stream(int fe_id, int perf_mode,
port_type = MSM_AFE_PORT_TYPE_RX;
} else if (stream_type == SNDRV_PCM_STREAM_CAPTURE) {
session_type = SESSION_TYPE_TX;
- if (passthr_mode != LEGACY_PCM)
+ if ((passthr_mode != LEGACY_PCM) && (passthr_mode != LISTEN))
path_type = ADM_PATH_COMPRESSED_TX;
else
path_type = ADM_PATH_LIVE_REC;
@@ -1204,6 +1229,11 @@ int msm_pcm_routing_reg_phy_compr_stream(int fe_id, int perf_mode,
fe_dai_app_type_cfg
[fe_id][session_type][i]
.sample_rate;
+ if (msm_pcm_routing_test_pp_param(i,
+ ADM_PP_PARAM_LIMITER_BIT))
+ set_bit(ADM_STATUS_LIMITER,
+ &payload.route_status
+ [num_copps]);
num_copps++;
}
}
@@ -1434,6 +1464,11 @@ int msm_pcm_routing_reg_phy_stream(int fedai_id, int perf_mode,
fe_dai_app_type_cfg
[fedai_id][session_type]
[i].sample_rate;
+ if (msm_pcm_routing_test_pp_param(i,
+ ADM_PP_PARAM_LIMITER_BIT))
+ set_bit(ADM_STATUS_LIMITER,
+ &payload.route_status
+ [num_copps]);
num_copps++;
}
}
@@ -3803,6 +3838,9 @@ static const struct snd_kcontrol_new pri_i2s_rx_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia19", MSM_BACKEND_DAI_PRI_I2S_RX,
MSM_FRONTEND_DAI_MULTIMEDIA19, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_PRI_I2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new sec_i2s_rx_mixer_controls[] = {
@@ -3863,6 +3901,9 @@ static const struct snd_kcontrol_new sec_i2s_rx_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia19", MSM_BACKEND_DAI_SEC_I2S_RX,
MSM_FRONTEND_DAI_MULTIMEDIA19, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_SEC_I2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new spdif_rx_mixer_controls[] = {
@@ -3923,7 +3964,9 @@ static const struct snd_kcontrol_new spdif_rx_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia19", MSM_BACKEND_DAI_SPDIF_RX,
MSM_FRONTEND_DAI_MULTIMEDIA19, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
-
+ SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_SPDIF_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new slimbus_2_rx_mixer_controls[] = {
@@ -3975,6 +4018,9 @@ static const struct snd_kcontrol_new slimbus_2_rx_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_SLIMBUS_2_RX,
MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_SLIMBUS_2_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new slimbus_5_rx_mixer_controls[] = {
@@ -4035,6 +4081,9 @@ static const struct snd_kcontrol_new slimbus_5_rx_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia19", MSM_BACKEND_DAI_SLIMBUS_5_RX,
MSM_FRONTEND_DAI_MULTIMEDIA19, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_SLIMBUS_5_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new slimbus_rx_mixer_controls[] = {
@@ -4095,6 +4144,24 @@ static const struct snd_kcontrol_new slimbus_rx_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia19", MSM_BACKEND_DAI_SLIMBUS_0_RX,
MSM_FRONTEND_DAI_MULTIMEDIA19, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia22", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA22, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia23", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA23, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia24", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA24, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia25", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA25, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new mi2s_rx_mixer_controls[] = {
@@ -4155,6 +4222,9 @@ static const struct snd_kcontrol_new mi2s_rx_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia19", MSM_BACKEND_DAI_MI2S_RX,
MSM_FRONTEND_DAI_MULTIMEDIA19, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new quaternary_mi2s_rx_mixer_controls[] = {
@@ -4215,6 +4285,9 @@ static const struct snd_kcontrol_new quaternary_mi2s_rx_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia19", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX,
MSM_FRONTEND_DAI_MULTIMEDIA19, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new quinary_mi2s_rx_mixer_controls[] = {
@@ -4275,6 +4348,9 @@ static const struct snd_kcontrol_new quinary_mi2s_rx_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia19", MSM_BACKEND_DAI_QUINARY_MI2S_RX,
MSM_FRONTEND_DAI_MULTIMEDIA19, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_QUINARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new tertiary_mi2s_rx_mixer_controls[] = {
@@ -4329,6 +4405,9 @@ static const struct snd_kcontrol_new tertiary_mi2s_rx_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia19", MSM_BACKEND_DAI_TERTIARY_MI2S_RX,
MSM_FRONTEND_DAI_MULTIMEDIA19, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_TERTIARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new secondary_mi2s_rx2_mixer_controls[] = {
@@ -4395,6 +4474,9 @@ static const struct snd_kcontrol_new secondary_mi2s_rx_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia19", MSM_BACKEND_DAI_SECONDARY_MI2S_RX,
MSM_FRONTEND_DAI_MULTIMEDIA19, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_SECONDARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new primary_mi2s_rx_mixer_controls[] = {
@@ -4455,6 +4537,9 @@ static const struct snd_kcontrol_new primary_mi2s_rx_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia19", MSM_BACKEND_DAI_PRI_MI2S_RX,
MSM_FRONTEND_DAI_MULTIMEDIA19, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_PRI_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new int0_mi2s_rx_mixer_controls[] = {
@@ -4506,6 +4591,9 @@ static const struct snd_kcontrol_new int0_mi2s_rx_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_INT0_MI2S_RX,
MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_INT0_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new int4_mi2s_rx_mixer_controls[] = {
@@ -4557,6 +4645,9 @@ static const struct snd_kcontrol_new int4_mi2s_rx_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_INT4_MI2S_RX,
MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_INT4_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new hdmi_mixer_controls[] = {
@@ -4617,6 +4708,24 @@ static const struct snd_kcontrol_new hdmi_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia19", MSM_BACKEND_DAI_HDMI_RX,
MSM_FRONTEND_DAI_MULTIMEDIA19, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_HDMI_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia22", MSM_BACKEND_DAI_HDMI_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA22, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia23", MSM_BACKEND_DAI_HDMI_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA23, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia24", MSM_BACKEND_DAI_HDMI_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA24, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia25", MSM_BACKEND_DAI_HDMI_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA25, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_HDMI_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new display_port_mixer_controls[] = {
@@ -4668,6 +4777,9 @@ static const struct snd_kcontrol_new display_port_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_DISPLAY_PORT_RX,
MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_DISPLAY_PORT_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
/* incall music delivery mixer */
@@ -4765,6 +4877,24 @@ static const struct snd_kcontrol_new slimbus_6_rx_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_SLIMBUS_6_RX,
MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_SLIMBUS_6_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia22", MSM_BACKEND_DAI_SLIMBUS_6_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA22, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia23", MSM_BACKEND_DAI_SLIMBUS_6_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA23, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia24", MSM_BACKEND_DAI_SLIMBUS_6_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA24, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia25", MSM_BACKEND_DAI_SLIMBUS_6_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA25, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_SLIMBUS_6_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new slimbus_7_rx_mixer_controls[] = {
@@ -4816,6 +4946,24 @@ static const struct snd_kcontrol_new slimbus_7_rx_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_SLIMBUS_7_RX,
MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_SLIMBUS_7_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia22", MSM_BACKEND_DAI_SLIMBUS_7_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA22, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia23", MSM_BACKEND_DAI_SLIMBUS_7_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA23, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia24", MSM_BACKEND_DAI_SLIMBUS_7_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA24, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia25", MSM_BACKEND_DAI_SLIMBUS_7_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA25, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_SLIMBUS_7_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new usb_audio_rx_mixer_controls[] = {
@@ -4867,6 +5015,9 @@ static const struct snd_kcontrol_new usb_audio_rx_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_USB_RX,
MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_USB_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new int_bt_sco_rx_mixer_controls[] = {
@@ -4927,6 +5078,9 @@ static const struct snd_kcontrol_new int_bt_sco_rx_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia19", MSM_BACKEND_DAI_INT_BT_SCO_RX,
MSM_FRONTEND_DAI_MULTIMEDIA19, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_INT_BT_SCO_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new int_bt_a2dp_rx_mixer_controls[] = {
@@ -4978,6 +5132,9 @@ static const struct snd_kcontrol_new int_bt_a2dp_rx_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_INT_BT_A2DP_RX,
MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_INT_BT_A2DP_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new int_fm_rx_mixer_controls[] = {
@@ -5038,6 +5195,9 @@ static const struct snd_kcontrol_new int_fm_rx_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia19", MSM_BACKEND_DAI_INT_FM_RX,
MSM_FRONTEND_DAI_MULTIMEDIA19, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_INT_FM_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new afe_pcm_rx_mixer_controls[] = {
@@ -5098,6 +5258,9 @@ static const struct snd_kcontrol_new afe_pcm_rx_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia19", MSM_BACKEND_DAI_AFE_PCM_RX,
MSM_FRONTEND_DAI_MULTIMEDIA19, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_AFE_PCM_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new auxpcm_rx_mixer_controls[] = {
@@ -5158,6 +5321,12 @@ static const struct snd_kcontrol_new auxpcm_rx_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia19", MSM_BACKEND_DAI_AUXPCM_RX,
MSM_FRONTEND_DAI_MULTIMEDIA19, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_AUXPCM_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_AUXPCM_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new sec_auxpcm_rx_mixer_controls[] = {
@@ -5218,6 +5387,12 @@ static const struct snd_kcontrol_new sec_auxpcm_rx_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia19", MSM_BACKEND_DAI_SEC_AUXPCM_RX,
MSM_FRONTEND_DAI_MULTIMEDIA19, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_SEC_AUXPCM_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_SEC_AUXPCM_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new tert_auxpcm_rx_mixer_controls[] = {
@@ -5269,6 +5444,9 @@ static const struct snd_kcontrol_new tert_auxpcm_rx_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_TERT_AUXPCM_RX,
MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_TERT_AUXPCM_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new quat_auxpcm_rx_mixer_controls[] = {
@@ -5320,6 +5498,9 @@ static const struct snd_kcontrol_new quat_auxpcm_rx_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_QUAT_AUXPCM_RX,
MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_QUAT_AUXPCM_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new pri_tdm_rx_0_mixer_controls[] = {
@@ -5371,6 +5552,12 @@ static const struct snd_kcontrol_new pri_tdm_rx_0_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_PRI_TDM_RX_0,
MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_PRI_TDM_RX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new pri_tdm_rx_1_mixer_controls[] = {
@@ -5422,6 +5609,12 @@ static const struct snd_kcontrol_new pri_tdm_rx_1_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_PRI_TDM_RX_1,
MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+ MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_PRI_TDM_RX_1,
+ MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new pri_tdm_rx_2_mixer_controls[] = {
@@ -5473,6 +5666,12 @@ static const struct snd_kcontrol_new pri_tdm_rx_2_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_PRI_TDM_RX_2,
MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+ MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_PRI_TDM_RX_2,
+ MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new pri_tdm_rx_3_mixer_controls[] = {
@@ -5524,6 +5723,12 @@ static const struct snd_kcontrol_new pri_tdm_rx_3_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_PRI_TDM_RX_3,
MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+ MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_PRI_TDM_RX_3,
+ MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new pri_tdm_tx_0_mixer_controls[] = {
@@ -5626,6 +5831,12 @@ static const struct snd_kcontrol_new sec_tdm_rx_0_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_SEC_TDM_RX_0,
MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_SEC_TDM_RX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new sec_tdm_rx_1_mixer_controls[] = {
@@ -5677,6 +5888,12 @@ static const struct snd_kcontrol_new sec_tdm_rx_1_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_SEC_TDM_RX_1,
MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+ MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_SEC_TDM_RX_1,
+ MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new sec_tdm_rx_2_mixer_controls[] = {
@@ -5728,6 +5945,12 @@ static const struct snd_kcontrol_new sec_tdm_rx_2_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_SEC_TDM_RX_2,
MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+ MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_SEC_TDM_RX_2,
+ MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new sec_tdm_rx_3_mixer_controls[] = {
@@ -5779,6 +6002,12 @@ static const struct snd_kcontrol_new sec_tdm_rx_3_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_SEC_TDM_RX_3,
MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+ MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_SEC_TDM_RX_3,
+ MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new sec_tdm_tx_0_mixer_controls[] = {
@@ -5881,6 +6110,12 @@ static const struct snd_kcontrol_new tert_tdm_rx_0_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_TERT_TDM_RX_0,
MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_TERT_TDM_RX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_TERT_TDM_RX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new tert_tdm_tx_0_mixer_controls[] = {
@@ -5983,6 +6218,12 @@ static const struct snd_kcontrol_new tert_tdm_rx_1_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_TERT_TDM_RX_1,
MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_TERT_TDM_RX_1,
+ MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_TERT_TDM_RX_1,
+ MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new tert_tdm_rx_2_mixer_controls[] = {
@@ -6034,6 +6275,12 @@ static const struct snd_kcontrol_new tert_tdm_rx_2_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_TERT_TDM_RX_2,
MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_TERT_TDM_RX_2,
+ MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_TERT_TDM_RX_2,
+ MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new tert_tdm_rx_3_mixer_controls[] = {
@@ -6085,6 +6332,12 @@ static const struct snd_kcontrol_new tert_tdm_rx_3_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_TERT_TDM_RX_3,
MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_TERT_TDM_RX_3,
+ MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_TERT_TDM_RX_3,
+ MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new tert_tdm_rx_4_mixer_controls[] = {
@@ -6136,6 +6389,12 @@ static const struct snd_kcontrol_new tert_tdm_rx_4_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_TERT_TDM_RX_4,
MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+ MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+ MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new quat_tdm_rx_0_mixer_controls[] = {
@@ -6190,6 +6449,12 @@ static const struct snd_kcontrol_new quat_tdm_rx_0_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia20", MSM_BACKEND_DAI_QUAT_TDM_RX_0,
MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_QUAT_TDM_RX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_QUAT_TDM_RX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new quat_tdm_tx_0_mixer_controls[] = {
@@ -6295,6 +6560,12 @@ static const struct snd_kcontrol_new quat_tdm_rx_1_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia20", MSM_BACKEND_DAI_QUAT_TDM_RX_1,
MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_QUAT_TDM_RX_1,
+ MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_QUAT_TDM_RX_1,
+ MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new quat_tdm_rx_2_mixer_controls[] = {
@@ -6349,6 +6620,12 @@ static const struct snd_kcontrol_new quat_tdm_rx_2_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia20", MSM_BACKEND_DAI_QUAT_TDM_RX_2,
MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_QUAT_TDM_RX_2,
+ MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_QUAT_TDM_RX_2,
+ MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new quat_tdm_rx_3_mixer_controls[] = {
@@ -6403,6 +6680,12 @@ static const struct snd_kcontrol_new quat_tdm_rx_3_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia20", MSM_BACKEND_DAI_QUAT_TDM_RX_3,
MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_QUAT_TDM_RX_3,
+ MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_QUAT_TDM_RX_3,
+ MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new mmul1_mixer_controls[] = {
@@ -7389,6 +7672,75 @@ static const struct snd_kcontrol_new mmul20_mixer_controls[] = {
msm_routing_put_audio_mixer),
};
+static const struct snd_kcontrol_new mmul21_mixer_controls[] = {
+ SOC_SINGLE_EXT("AUX_PCM_UL_TX", MSM_BACKEND_DAI_AUXPCM_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("SEC_AUX_PCM_UL_TX", MSM_BACKEND_DAI_SEC_AUXPCM_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("PRI_TDM_TX_0", MSM_BACKEND_DAI_PRI_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("PRI_TDM_TX_1", MSM_BACKEND_DAI_PRI_TDM_TX_1,
+ MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("PRI_TDM_TX_2", MSM_BACKEND_DAI_PRI_TDM_TX_2,
+ MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("PRI_TDM_TX_3", MSM_BACKEND_DAI_PRI_TDM_TX_3,
+ MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("SEC_TDM_TX_0", MSM_BACKEND_DAI_SEC_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("SEC_TDM_TX_1", MSM_BACKEND_DAI_SEC_TDM_TX_1,
+ MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("SEC_TDM_TX_2", MSM_BACKEND_DAI_SEC_TDM_TX_2,
+ MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("SEC_TDM_TX_3", MSM_BACKEND_DAI_SEC_TDM_TX_3,
+ MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("TERT_TDM_TX_0", MSM_BACKEND_DAI_TERT_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("TERT_TDM_TX_1", MSM_BACKEND_DAI_TERT_TDM_TX_1,
+ MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("TERT_TDM_TX_2", MSM_BACKEND_DAI_TERT_TDM_TX_2,
+ MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("TERT_TDM_TX_3", MSM_BACKEND_DAI_TERT_TDM_TX_3,
+ MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("QUAT_TDM_TX_0", MSM_BACKEND_DAI_QUAT_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("QUAT_TDM_TX_1", MSM_BACKEND_DAI_QUAT_TDM_TX_1,
+ MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("QUAT_TDM_TX_2", MSM_BACKEND_DAI_QUAT_TDM_TX_2,
+ MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("QUAT_TDM_TX_3", MSM_BACKEND_DAI_QUAT_TDM_TX_3,
+ MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new mmul27_mixer_controls[] = {
+ SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA27, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_PRI_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA27, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA27, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+};
+
static const struct snd_kcontrol_new pri_rx_voice_mixer_controls[] = {
SOC_SINGLE_EXT("CSVoice", MSM_BACKEND_DAI_PRI_I2S_RX,
MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer,
@@ -9910,6 +10262,22 @@ static const struct snd_kcontrol_new quat_tdm_rx_0_port_mixer_controls[] = {
MSM_BACKEND_DAI_SEC_AUXPCM_TX, 1, 0,
msm_routing_get_port_mixer,
msm_routing_put_port_mixer),
+ SOC_SINGLE_EXT("PRI_TDM_TX_0", MSM_BACKEND_DAI_QUAT_TDM_RX_0,
+ MSM_BACKEND_DAI_PRI_TDM_TX_0, 1, 0,
+ msm_routing_get_port_mixer,
+ msm_routing_put_port_mixer),
+ SOC_SINGLE_EXT("PRI_TDM_TX_1", MSM_BACKEND_DAI_QUAT_TDM_RX_0,
+ MSM_BACKEND_DAI_PRI_TDM_TX_1, 1, 0,
+ msm_routing_get_port_mixer,
+ msm_routing_put_port_mixer),
+ SOC_SINGLE_EXT("PRI_TDM_TX_2", MSM_BACKEND_DAI_QUAT_TDM_RX_0,
+ MSM_BACKEND_DAI_PRI_TDM_TX_2, 1, 0,
+ msm_routing_get_port_mixer,
+ msm_routing_put_port_mixer),
+ SOC_SINGLE_EXT("PRI_TDM_TX_3", MSM_BACKEND_DAI_QUAT_TDM_RX_0,
+ MSM_BACKEND_DAI_PRI_TDM_TX_3, 1, 0,
+ msm_routing_get_port_mixer,
+ msm_routing_put_port_mixer),
SOC_SINGLE_EXT("TERT_TDM_TX_0", MSM_BACKEND_DAI_QUAT_TDM_RX_0,
MSM_BACKEND_DAI_TERT_TDM_TX_0, 1, 0,
msm_routing_get_port_mixer,
@@ -9977,6 +10345,22 @@ static const struct snd_kcontrol_new quat_tdm_rx_1_port_mixer_controls[] = {
MSM_BACKEND_DAI_SEC_AUXPCM_TX, 1, 0,
msm_routing_get_port_mixer,
msm_routing_put_port_mixer),
+ SOC_SINGLE_EXT("PRI_TDM_TX_0", MSM_BACKEND_DAI_QUAT_TDM_RX_1,
+ MSM_BACKEND_DAI_PRI_TDM_TX_0, 1, 0,
+ msm_routing_get_port_mixer,
+ msm_routing_put_port_mixer),
+ SOC_SINGLE_EXT("PRI_TDM_TX_1", MSM_BACKEND_DAI_QUAT_TDM_RX_1,
+ MSM_BACKEND_DAI_PRI_TDM_TX_1, 1, 0,
+ msm_routing_get_port_mixer,
+ msm_routing_put_port_mixer),
+ SOC_SINGLE_EXT("PRI_TDM_TX_2", MSM_BACKEND_DAI_QUAT_TDM_RX_1,
+ MSM_BACKEND_DAI_PRI_TDM_TX_2, 1, 0,
+ msm_routing_get_port_mixer,
+ msm_routing_put_port_mixer),
+ SOC_SINGLE_EXT("PRI_TDM_TX_3", MSM_BACKEND_DAI_QUAT_TDM_RX_1,
+ MSM_BACKEND_DAI_PRI_TDM_TX_3, 1, 0,
+ msm_routing_get_port_mixer,
+ msm_routing_put_port_mixer),
SOC_SINGLE_EXT("TERT_TDM_TX_0", MSM_BACKEND_DAI_QUAT_TDM_RX_1,
MSM_BACKEND_DAI_TERT_TDM_TX_0, 1, 0,
msm_routing_get_port_mixer,
@@ -10044,6 +10428,22 @@ static const struct snd_kcontrol_new quat_tdm_rx_2_port_mixer_controls[] = {
MSM_BACKEND_DAI_SEC_AUXPCM_TX, 1, 0,
msm_routing_get_port_mixer,
msm_routing_put_port_mixer),
+ SOC_SINGLE_EXT("PRI_TDM_TX_0", MSM_BACKEND_DAI_QUAT_TDM_RX_2,
+ MSM_BACKEND_DAI_PRI_TDM_TX_0, 1, 0,
+ msm_routing_get_port_mixer,
+ msm_routing_put_port_mixer),
+ SOC_SINGLE_EXT("PRI_TDM_TX_1", MSM_BACKEND_DAI_QUAT_TDM_RX_2,
+ MSM_BACKEND_DAI_PRI_TDM_TX_1, 1, 0,
+ msm_routing_get_port_mixer,
+ msm_routing_put_port_mixer),
+ SOC_SINGLE_EXT("PRI_TDM_TX_2", MSM_BACKEND_DAI_QUAT_TDM_RX_2,
+ MSM_BACKEND_DAI_PRI_TDM_TX_2, 1, 0,
+ msm_routing_get_port_mixer,
+ msm_routing_put_port_mixer),
+ SOC_SINGLE_EXT("PRI_TDM_TX_3", MSM_BACKEND_DAI_QUAT_TDM_RX_2,
+ MSM_BACKEND_DAI_PRI_TDM_TX_3, 1, 0,
+ msm_routing_get_port_mixer,
+ msm_routing_put_port_mixer),
SOC_SINGLE_EXT("TERT_TDM_TX_0", MSM_BACKEND_DAI_QUAT_TDM_RX_2,
MSM_BACKEND_DAI_TERT_TDM_TX_0, 1, 0,
msm_routing_get_port_mixer,
@@ -10111,6 +10511,22 @@ static const struct snd_kcontrol_new quat_tdm_rx_3_port_mixer_controls[] = {
MSM_BACKEND_DAI_SEC_AUXPCM_TX, 1, 0,
msm_routing_get_port_mixer,
msm_routing_put_port_mixer),
+ SOC_SINGLE_EXT("PRI_TDM_TX_0", MSM_BACKEND_DAI_QUAT_TDM_RX_3,
+ MSM_BACKEND_DAI_PRI_TDM_TX_0, 1, 0,
+ msm_routing_get_port_mixer,
+ msm_routing_put_port_mixer),
+ SOC_SINGLE_EXT("PRI_TDM_TX_1", MSM_BACKEND_DAI_QUAT_TDM_RX_3,
+ MSM_BACKEND_DAI_PRI_TDM_TX_1, 1, 0,
+ msm_routing_get_port_mixer,
+ msm_routing_put_port_mixer),
+ SOC_SINGLE_EXT("PRI_TDM_TX_2", MSM_BACKEND_DAI_QUAT_TDM_RX_3,
+ MSM_BACKEND_DAI_PRI_TDM_TX_2, 1, 0,
+ msm_routing_get_port_mixer,
+ msm_routing_put_port_mixer),
+ SOC_SINGLE_EXT("PRI_TDM_TX_3", MSM_BACKEND_DAI_QUAT_TDM_RX_3,
+ MSM_BACKEND_DAI_PRI_TDM_TX_3, 1, 0,
+ msm_routing_get_port_mixer,
+ msm_routing_put_port_mixer),
SOC_SINGLE_EXT("TERT_TDM_TX_0", MSM_BACKEND_DAI_QUAT_TDM_RX_3,
MSM_BACKEND_DAI_TERT_TDM_TX_0, 1, 0,
msm_routing_get_port_mixer,
@@ -11427,6 +11843,12 @@ static const struct snd_soc_dapm_widget msm_qdsp6_widgets[] = {
SND_SOC_DAPM_AIF_IN("MM_DL15", "MultiMedia15 Playback", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_IN("MM_DL16", "MultiMedia16 Playback", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_IN("MM_DL20", "MultiMedia20 Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("MM_DL21", "MultiMedia21 Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("MM_DL22", "MultiMedia22 Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("MM_DL23", "MultiMedia23 Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("MM_DL24", "MultiMedia24 Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("MM_DL25", "MultiMedia25 Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("MM_DL26", "MultiMedia26 Playback", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_IN("VOIP_DL", "VoIP Playback", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_OUT("MM_UL1", "MultiMedia1 Capture", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_OUT("MM_UL2", "MultiMedia2 Capture", 0, 0, 0, 0),
@@ -11441,6 +11863,8 @@ static const struct snd_soc_dapm_widget msm_qdsp6_widgets[] = {
SND_SOC_DAPM_AIF_OUT("MM_UL18", "MultiMedia18 Capture", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_OUT("MM_UL19", "MultiMedia19 Capture", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_OUT("MM_UL20", "MultiMedia20 Capture", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("MM_UL21", "MultiMedia21 Capture", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("MM_UL27", "MultiMedia27 Capture", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_IN("CS-VOICE_DL1", "CS-VOICE Playback", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_OUT("CS-VOICE_UL1", "CS-VOICE Capture", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_IN("VOICE2_DL", "Voice2 Playback", 0, 0, 0, 0),
@@ -12183,6 +12607,10 @@ static const struct snd_soc_dapm_widget msm_qdsp6_widgets[] = {
mmul19_mixer_controls, ARRAY_SIZE(mmul19_mixer_controls)),
SND_SOC_DAPM_MIXER("MultiMedia20 Mixer", SND_SOC_NOPM, 0, 0,
mmul20_mixer_controls, ARRAY_SIZE(mmul20_mixer_controls)),
+ SND_SOC_DAPM_MIXER("MultiMedia21 Mixer", SND_SOC_NOPM, 0, 0,
+ mmul21_mixer_controls, ARRAY_SIZE(mmul21_mixer_controls)),
+ SND_SOC_DAPM_MIXER("MultiMedia27 Mixer", SND_SOC_NOPM, 0, 0,
+ mmul27_mixer_controls, ARRAY_SIZE(mmul27_mixer_controls)),
SND_SOC_DAPM_MIXER("AUX_PCM_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
auxpcm_rx_mixer_controls, ARRAY_SIZE(auxpcm_rx_mixer_controls)),
SND_SOC_DAPM_MIXER("SEC_AUX_PCM_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
@@ -12532,6 +12960,7 @@ static const struct snd_soc_dapm_route intercon[] = {
{"PRI_RX Audio Mixer", "MultiMedia14", "MM_DL14"},
{"PRI_RX Audio Mixer", "MultiMedia15", "MM_DL15"},
{"PRI_RX Audio Mixer", "MultiMedia16", "MM_DL16"},
+ {"PRI_RX Audio Mixer", "MultiMedia26", "MM_DL26"},
{"PRI_I2S_RX", NULL, "PRI_RX Audio Mixer"},
{"SEC_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
@@ -12550,6 +12979,7 @@ static const struct snd_soc_dapm_route intercon[] = {
{"SEC_RX Audio Mixer", "MultiMedia14", "MM_DL14"},
{"SEC_RX Audio Mixer", "MultiMedia15", "MM_DL15"},
{"SEC_RX Audio Mixer", "MultiMedia16", "MM_DL16"},
+ {"SEC_RX Audio Mixer", "MultiMedia26", "MM_DL26"},
{"SEC_I2S_RX", NULL, "SEC_RX Audio Mixer"},
{"SLIMBUS_0_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
@@ -12568,6 +12998,12 @@ static const struct snd_soc_dapm_route intercon[] = {
{"SLIMBUS_0_RX Audio Mixer", "MultiMedia14", "MM_DL14"},
{"SLIMBUS_0_RX Audio Mixer", "MultiMedia15", "MM_DL15"},
{"SLIMBUS_0_RX Audio Mixer", "MultiMedia16", "MM_DL16"},
+ {"SLIMBUS_0_RX Audio Mixer", "MultiMedia21", "MM_DL21"},
+ {"SLIMBUS_0_RX Audio Mixer", "MultiMedia22", "MM_DL22"},
+ {"SLIMBUS_0_RX Audio Mixer", "MultiMedia23", "MM_DL23"},
+ {"SLIMBUS_0_RX Audio Mixer", "MultiMedia24", "MM_DL24"},
+ {"SLIMBUS_0_RX Audio Mixer", "MultiMedia25", "MM_DL25"},
+ {"SLIMBUS_0_RX Audio Mixer", "MultiMedia26", "MM_DL26"},
{"SLIMBUS_0_RX", NULL, "SLIMBUS_0_RX Audio Mixer"},
{"SLIMBUS_2_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
@@ -12586,6 +13022,7 @@ static const struct snd_soc_dapm_route intercon[] = {
{"SLIMBUS_2_RX Audio Mixer", "MultiMedia14", "MM_DL14"},
{"SLIMBUS_2_RX Audio Mixer", "MultiMedia15", "MM_DL15"},
{"SLIMBUS_2_RX Audio Mixer", "MultiMedia16", "MM_DL16"},
+ {"SLIMBUS_2_RX Audio Mixer", "MultiMedia26", "MM_DL26"},
{"SLIMBUS_2_RX", NULL, "SLIMBUS_2_RX Audio Mixer"},
{"SLIMBUS_5_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
@@ -12604,6 +13041,7 @@ static const struct snd_soc_dapm_route intercon[] = {
{"SLIMBUS_5_RX Audio Mixer", "MultiMedia14", "MM_DL14"},
{"SLIMBUS_5_RX Audio Mixer", "MultiMedia15", "MM_DL15"},
{"SLIMBUS_5_RX Audio Mixer", "MultiMedia16", "MM_DL16"},
+ {"SLIMBUS_5_RX Audio Mixer", "MultiMedia26", "MM_DL26"},
{"SLIMBUS_5_RX", NULL, "SLIMBUS_5_RX Audio Mixer"},
{"HDMI Mixer", "MultiMedia1", "MM_DL1"},
@@ -12622,6 +13060,12 @@ static const struct snd_soc_dapm_route intercon[] = {
{"HDMI Mixer", "MultiMedia14", "MM_DL14"},
{"HDMI Mixer", "MultiMedia15", "MM_DL15"},
{"HDMI Mixer", "MultiMedia16", "MM_DL16"},
+ {"HDMI Mixer", "MultiMedia21", "MM_DL21"},
+ {"HDMI Mixer", "MultiMedia22", "MM_DL22"},
+ {"HDMI Mixer", "MultiMedia23", "MM_DL23"},
+ {"HDMI Mixer", "MultiMedia24", "MM_DL24"},
+ {"HDMI Mixer", "MultiMedia25", "MM_DL25"},
+ {"HDMI Mixer", "MultiMedia26", "MM_DL26"},
{"HDMI", NULL, "HDMI Mixer"},
{"DISPLAY_PORT Mixer", "MultiMedia1", "MM_DL1"},
@@ -12640,6 +13084,7 @@ static const struct snd_soc_dapm_route intercon[] = {
{"DISPLAY_PORT Mixer", "MultiMedia14", "MM_DL14"},
{"DISPLAY_PORT Mixer", "MultiMedia15", "MM_DL15"},
{"DISPLAY_PORT Mixer", "MultiMedia16", "MM_DL16"},
+ {"DISPLAY_PORT Mixer", "MultiMedia26", "MM_DL26"},
{"DISPLAY_PORT", NULL, "DISPLAY_PORT Mixer"},
{"SPDIF_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
@@ -12658,6 +13103,7 @@ static const struct snd_soc_dapm_route intercon[] = {
{"SPDIF_RX Audio Mixer", "MultiMedia14", "MM_DL14"},
{"SPDIF_RX Audio Mixer", "MultiMedia15", "MM_DL15"},
{"SPDIF_RX Audio Mixer", "MultiMedia16", "MM_DL16"},
+ {"SPDIF_RX Audio Mixer", "MultiMedia26", "MM_DL26"},
{"SPDIF_RX", NULL, "SPDIF_RX Audio Mixer"},
/* incall */
@@ -12693,6 +13139,12 @@ static const struct snd_soc_dapm_route intercon[] = {
{"SLIMBUS_6_RX Audio Mixer", "MultiMedia14", "MM_DL14"},
{"SLIMBUS_6_RX Audio Mixer", "MultiMedia15", "MM_DL15"},
{"SLIMBUS_6_RX Audio Mixer", "MultiMedia16", "MM_DL16"},
+ {"SLIMBUS_6_RX Audio Mixer", "MultiMedia21", "MM_DL21"},
+ {"SLIMBUS_6_RX Audio Mixer", "MultiMedia22", "MM_DL22"},
+ {"SLIMBUS_6_RX Audio Mixer", "MultiMedia23", "MM_DL23"},
+ {"SLIMBUS_6_RX Audio Mixer", "MultiMedia24", "MM_DL24"},
+ {"SLIMBUS_6_RX Audio Mixer", "MultiMedia25", "MM_DL25"},
+ {"SLIMBUS_6_RX Audio Mixer", "MultiMedia26", "MM_DL26"},
{"SLIMBUS_6_RX", NULL, "SLIMBUS_6_RX Audio Mixer"},
{"SLIMBUS_7_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
@@ -12711,6 +13163,12 @@ static const struct snd_soc_dapm_route intercon[] = {
{"SLIMBUS_7_RX Audio Mixer", "MultiMedia14", "MM_DL14"},
{"SLIMBUS_7_RX Audio Mixer", "MultiMedia15", "MM_DL15"},
{"SLIMBUS_7_RX Audio Mixer", "MultiMedia16", "MM_DL16"},
+ {"SLIMBUS_7_RX Audio Mixer", "MultiMedia21", "MM_DL21"},
+ {"SLIMBUS_7_RX Audio Mixer", "MultiMedia22", "MM_DL22"},
+ {"SLIMBUS_7_RX Audio Mixer", "MultiMedia23", "MM_DL23"},
+ {"SLIMBUS_7_RX Audio Mixer", "MultiMedia24", "MM_DL24"},
+ {"SLIMBUS_7_RX Audio Mixer", "MultiMedia25", "MM_DL25"},
+ {"SLIMBUS_7_RX Audio Mixer", "MultiMedia26", "MM_DL26"},
{"SLIMBUS_7_RX", NULL, "SLIMBUS_7_RX Audio Mixer"},
{"USB_AUDIO_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
@@ -12729,6 +13187,7 @@ static const struct snd_soc_dapm_route intercon[] = {
{"USB_AUDIO_RX Audio Mixer", "MultiMedia14", "MM_DL14"},
{"USB_AUDIO_RX Audio Mixer", "MultiMedia15", "MM_DL15"},
{"USB_AUDIO_RX Audio Mixer", "MultiMedia16", "MM_DL16"},
+ {"USB_AUDIO_RX Audio Mixer", "MultiMedia26", "MM_DL26"},
{"USB_AUDIO_RX", NULL, "USB_AUDIO_RX Audio Mixer"},
{"MultiMedia1 Mixer", "VOC_REC_UL", "INCALL_RECORD_TX"},
@@ -12777,6 +13236,7 @@ static const struct snd_soc_dapm_route intercon[] = {
{"MI2S_RX Audio Mixer", "MultiMedia14", "MM_DL14"},
{"MI2S_RX Audio Mixer", "MultiMedia15", "MM_DL15"},
{"MI2S_RX Audio Mixer", "MultiMedia16", "MM_DL16"},
+ {"MI2S_RX Audio Mixer", "MultiMedia26", "MM_DL26"},
{"MI2S_RX", NULL, "MI2S_RX Audio Mixer"},
{"QUAT_MI2S_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
@@ -12794,6 +13254,7 @@ static const struct snd_soc_dapm_route intercon[] = {
{"QUAT_MI2S_RX Audio Mixer", "MultiMedia14", "MM_DL14"},
{"QUAT_MI2S_RX Audio Mixer", "MultiMedia15", "MM_DL15"},
{"QUAT_MI2S_RX Audio Mixer", "MultiMedia16", "MM_DL16"},
+ {"QUAT_MI2S_RX Audio Mixer", "MultiMedia26", "MM_DL26"},
{"QUAT_MI2S_RX", NULL, "QUAT_MI2S_RX Audio Mixer"},
{"TERT_MI2S_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
@@ -12810,6 +13271,7 @@ static const struct snd_soc_dapm_route intercon[] = {
{"TERT_MI2S_RX Audio Mixer", "MultiMedia14", "MM_DL14"},
{"TERT_MI2S_RX Audio Mixer", "MultiMedia15", "MM_DL15"},
{"TERT_MI2S_RX Audio Mixer", "MultiMedia16", "MM_DL16"},
+ {"TERT_MI2S_RX Audio Mixer", "MultiMedia26", "MM_DL26"},
{"TERT_MI2S_RX", NULL, "TERT_MI2S_RX Audio Mixer"},
{"SEC_MI2S_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
@@ -12826,6 +13288,7 @@ static const struct snd_soc_dapm_route intercon[] = {
{"SEC_MI2S_RX Audio Mixer", "MultiMedia14", "MM_DL14"},
{"SEC_MI2S_RX Audio Mixer", "MultiMedia15", "MM_DL15"},
{"SEC_MI2S_RX Audio Mixer", "MultiMedia16", "MM_DL16"},
+ {"SEC_MI2S_RX Audio Mixer", "MultiMedia26", "MM_DL26"},
{"SEC_MI2S_RX", NULL, "SEC_MI2S_RX Audio Mixer"},
{"SEC_MI2S_RX_SD1 Audio Mixer", "MultiMedia6", "MM_DL6"},
@@ -12849,6 +13312,7 @@ static const struct snd_soc_dapm_route intercon[] = {
{"PRI_MI2S_RX Audio Mixer", "MultiMedia14", "MM_DL14"},
{"PRI_MI2S_RX Audio Mixer", "MultiMedia15", "MM_DL15"},
{"PRI_MI2S_RX Audio Mixer", "MultiMedia16", "MM_DL16"},
+ {"PRI_MI2S_RX Audio Mixer", "MultiMedia26", "MM_DL26"},
{"PRI_MI2S_RX", NULL, "PRI_MI2S_RX Audio Mixer"},
{"INT0_MI2S_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
@@ -12866,6 +13330,7 @@ static const struct snd_soc_dapm_route intercon[] = {
{"INT0_MI2S_RX Audio Mixer", "MultiMedia14", "MM_DL14"},
{"INT0_MI2S_RX Audio Mixer", "MultiMedia15", "MM_DL15"},
{"INT0_MI2S_RX Audio Mixer", "MultiMedia16", "MM_DL16"},
+ {"INT0_MI2S_RX Audio Mixer", "MultiMedia26", "MM_DL26"},
{"INT0_MI2S_RX", NULL, "INT0_MI2S_RX Audio Mixer"},
{"INT4_MI2S_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
@@ -12883,6 +13348,7 @@ static const struct snd_soc_dapm_route intercon[] = {
{"INT4_MI2S_RX Audio Mixer", "MultiMedia14", "MM_DL14"},
{"INT4_MI2S_RX Audio Mixer", "MultiMedia15", "MM_DL15"},
{"INT4_MI2S_RX Audio Mixer", "MultiMedia16", "MM_DL16"},
+ {"INT4_MI2S_RX Audio Mixer", "MultiMedia26", "MM_DL26"},
{"INT4_MI2S_RX", NULL, "INT4_MI2S_RX Audio Mixer"},
{"QUIN_MI2S_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
@@ -12902,6 +13368,7 @@ static const struct snd_soc_dapm_route intercon[] = {
{"QUIN_MI2S_RX Audio Mixer", "MultiMedia14", "MM_DL14"},
{"QUIN_MI2S_RX Audio Mixer", "MultiMedia15", "MM_DL15"},
{"QUIN_MI2S_RX Audio Mixer", "MultiMedia16", "MM_DL16"},
+ {"QUIN_MI2S_RX Audio Mixer", "MultiMedia26", "MM_DL26"},
{"QUIN_MI2S_RX", NULL, "QUIN_MI2S_RX Audio Mixer"},
{"PRI_TDM_RX_0 Audio Mixer", "MultiMedia1", "MM_DL1"},
@@ -12920,6 +13387,8 @@ static const struct snd_soc_dapm_route intercon[] = {
{"PRI_TDM_RX_0 Audio Mixer", "MultiMedia14", "MM_DL14"},
{"PRI_TDM_RX_0 Audio Mixer", "MultiMedia15", "MM_DL15"},
{"PRI_TDM_RX_0 Audio Mixer", "MultiMedia16", "MM_DL16"},
+ {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia21", "MM_DL21"},
+ {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia26", "MM_DL26"},
{"PRI_TDM_RX_0", NULL, "PRI_TDM_RX_0 Audio Mixer"},
{"PRI_TDM_RX_1 Audio Mixer", "MultiMedia1", "MM_DL1"},
@@ -12938,6 +13407,8 @@ static const struct snd_soc_dapm_route intercon[] = {
{"PRI_TDM_RX_1 Audio Mixer", "MultiMedia14", "MM_DL14"},
{"PRI_TDM_RX_1 Audio Mixer", "MultiMedia15", "MM_DL15"},
{"PRI_TDM_RX_1 Audio Mixer", "MultiMedia16", "MM_DL16"},
+ {"PRI_TDM_RX_1 Audio Mixer", "MultiMedia21", "MM_DL21"},
+ {"PRI_TDM_RX_1 Audio Mixer", "MultiMedia26", "MM_DL26"},
{"PRI_TDM_RX_1", NULL, "PRI_TDM_RX_1 Audio Mixer"},
{"PRI_TDM_RX_2 Audio Mixer", "MultiMedia1", "MM_DL1"},
@@ -12956,6 +13427,8 @@ static const struct snd_soc_dapm_route intercon[] = {
{"PRI_TDM_RX_2 Audio Mixer", "MultiMedia14", "MM_DL14"},
{"PRI_TDM_RX_2 Audio Mixer", "MultiMedia15", "MM_DL15"},
{"PRI_TDM_RX_2 Audio Mixer", "MultiMedia16", "MM_DL16"},
+ {"PRI_TDM_RX_2 Audio Mixer", "MultiMedia21", "MM_DL21"},
+ {"PRI_TDM_RX_2 Audio Mixer", "MultiMedia26", "MM_DL26"},
{"PRI_TDM_RX_2", NULL, "PRI_TDM_RX_2 Audio Mixer"},
{"PRI_TDM_RX_3 Audio Mixer", "MultiMedia1", "MM_DL1"},
@@ -12974,6 +13447,8 @@ static const struct snd_soc_dapm_route intercon[] = {
{"PRI_TDM_RX_3 Audio Mixer", "MultiMedia14", "MM_DL14"},
{"PRI_TDM_RX_3 Audio Mixer", "MultiMedia15", "MM_DL15"},
{"PRI_TDM_RX_3 Audio Mixer", "MultiMedia16", "MM_DL16"},
+ {"PRI_TDM_RX_3 Audio Mixer", "MultiMedia21", "MM_DL21"},
+ {"PRI_TDM_RX_3 Audio Mixer", "MultiMedia26", "MM_DL26"},
{"PRI_TDM_RX_3", NULL, "PRI_TDM_RX_3 Audio Mixer"},
{"PRI_TDM_TX_0 Audio Mixer", "MultiMedia1", "MM_DL1"},
@@ -13010,6 +13485,8 @@ static const struct snd_soc_dapm_route intercon[] = {
{"SEC_TDM_RX_0 Audio Mixer", "MultiMedia14", "MM_DL14"},
{"SEC_TDM_RX_0 Audio Mixer", "MultiMedia15", "MM_DL15"},
{"SEC_TDM_RX_0 Audio Mixer", "MultiMedia16", "MM_DL16"},
+ {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia21", "MM_DL21"},
+ {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia26", "MM_DL26"},
{"SEC_TDM_RX_0", NULL, "SEC_TDM_RX_0 Audio Mixer"},
{"SEC_TDM_RX_1 Audio Mixer", "MultiMedia1", "MM_DL1"},
@@ -13028,6 +13505,8 @@ static const struct snd_soc_dapm_route intercon[] = {
{"SEC_TDM_RX_1 Audio Mixer", "MultiMedia14", "MM_DL14"},
{"SEC_TDM_RX_1 Audio Mixer", "MultiMedia15", "MM_DL15"},
{"SEC_TDM_RX_1 Audio Mixer", "MultiMedia16", "MM_DL16"},
+ {"SEC_TDM_RX_1 Audio Mixer", "MultiMedia21", "MM_DL21"},
+ {"SEC_TDM_RX_1 Audio Mixer", "MultiMedia26", "MM_DL26"},
{"SEC_TDM_RX_1", NULL, "SEC_TDM_RX_1 Audio Mixer"},
{"SEC_TDM_RX_2 Audio Mixer", "MultiMedia1", "MM_DL1"},
@@ -13046,6 +13525,8 @@ static const struct snd_soc_dapm_route intercon[] = {
{"SEC_TDM_RX_2 Audio Mixer", "MultiMedia14", "MM_DL14"},
{"SEC_TDM_RX_2 Audio Mixer", "MultiMedia15", "MM_DL15"},
{"SEC_TDM_RX_2 Audio Mixer", "MultiMedia16", "MM_DL16"},
+ {"SEC_TDM_RX_2 Audio Mixer", "MultiMedia21", "MM_DL21"},
+ {"SEC_TDM_RX_2 Audio Mixer", "MultiMedia26", "MM_DL26"},
{"SEC_TDM_RX_2", NULL, "SEC_TDM_RX_2 Audio Mixer"},
{"SEC_TDM_RX_3 Audio Mixer", "MultiMedia1", "MM_DL1"},
@@ -13064,6 +13545,8 @@ static const struct snd_soc_dapm_route intercon[] = {
{"SEC_TDM_RX_3 Audio Mixer", "MultiMedia14", "MM_DL14"},
{"SEC_TDM_RX_3 Audio Mixer", "MultiMedia15", "MM_DL15"},
{"SEC_TDM_RX_3 Audio Mixer", "MultiMedia16", "MM_DL16"},
+ {"SEC_TDM_RX_3 Audio Mixer", "MultiMedia21", "MM_DL21"},
+ {"SEC_TDM_RX_3 Audio Mixer", "MultiMedia26", "MM_DL26"},
{"SEC_TDM_RX_3", NULL, "SEC_TDM_RX_3 Audio Mixer"},
{"SEC_TDM_TX_0 Audio Mixer", "MultiMedia1", "MM_DL1"},
@@ -13100,6 +13583,8 @@ static const struct snd_soc_dapm_route intercon[] = {
{"TERT_TDM_RX_0 Audio Mixer", "MultiMedia14", "MM_DL14"},
{"TERT_TDM_RX_0 Audio Mixer", "MultiMedia15", "MM_DL15"},
{"TERT_TDM_RX_0 Audio Mixer", "MultiMedia16", "MM_DL16"},
+ {"TERT_TDM_RX_0 Audio Mixer", "MultiMedia21", "MM_DL21"},
+ {"TERT_TDM_RX_0 Audio Mixer", "MultiMedia26", "MM_DL26"},
{"TERT_TDM_RX_0", NULL, "TERT_TDM_RX_0 Audio Mixer"},
{"TERT_TDM_TX_0 Audio Mixer", "MultiMedia1", "MM_DL1"},
@@ -13136,6 +13621,8 @@ static const struct snd_soc_dapm_route intercon[] = {
{"TERT_TDM_RX_1 Audio Mixer", "MultiMedia14", "MM_DL14"},
{"TERT_TDM_RX_1 Audio Mixer", "MultiMedia15", "MM_DL15"},
{"TERT_TDM_RX_1 Audio Mixer", "MultiMedia16", "MM_DL16"},
+ {"TERT_TDM_RX_1 Audio Mixer", "MultiMedia21", "MM_DL21"},
+ {"TERT_TDM_RX_1 Audio Mixer", "MultiMedia26", "MM_DL26"},
{"TERT_TDM_RX_1", NULL, "TERT_TDM_RX_1 Audio Mixer"},
{"TERT_TDM_RX_2 Audio Mixer", "MultiMedia1", "MM_DL1"},
@@ -13154,6 +13641,8 @@ static const struct snd_soc_dapm_route intercon[] = {
{"TERT_TDM_RX_2 Audio Mixer", "MultiMedia14", "MM_DL14"},
{"TERT_TDM_RX_2 Audio Mixer", "MultiMedia15", "MM_DL15"},
{"TERT_TDM_RX_2 Audio Mixer", "MultiMedia16", "MM_DL16"},
+ {"TERT_TDM_RX_2 Audio Mixer", "MultiMedia21", "MM_DL21"},
+ {"TERT_TDM_RX_2 Audio Mixer", "MultiMedia26", "MM_DL26"},
{"TERT_TDM_RX_2", NULL, "TERT_TDM_RX_2 Audio Mixer"},
{"TERT_TDM_RX_3 Audio Mixer", "MultiMedia1", "MM_DL1"},
@@ -13172,6 +13661,8 @@ static const struct snd_soc_dapm_route intercon[] = {
{"TERT_TDM_RX_3 Audio Mixer", "MultiMedia14", "MM_DL14"},
{"TERT_TDM_RX_3 Audio Mixer", "MultiMedia15", "MM_DL15"},
{"TERT_TDM_RX_3 Audio Mixer", "MultiMedia16", "MM_DL16"},
+ {"TERT_TDM_RX_3 Audio Mixer", "MultiMedia21", "MM_DL21"},
+ {"TERT_TDM_RX_3 Audio Mixer", "MultiMedia26", "MM_DL26"},
{"TERT_TDM_RX_3", NULL, "TERT_TDM_RX_3 Audio Mixer"},
{"TERT_TDM_RX_4 Audio Mixer", "MultiMedia1", "MM_DL1"},
@@ -13190,6 +13681,8 @@ static const struct snd_soc_dapm_route intercon[] = {
{"TERT_TDM_RX_4 Audio Mixer", "MultiMedia14", "MM_DL14"},
{"TERT_TDM_RX_4 Audio Mixer", "MultiMedia15", "MM_DL15"},
{"TERT_TDM_RX_4 Audio Mixer", "MultiMedia16", "MM_DL16"},
+ {"TERT_TDM_RX_4 Audio Mixer", "MultiMedia21", "MM_DL21"},
+ {"TERT_TDM_RX_4 Audio Mixer", "MultiMedia26", "MM_DL26"},
{"TERT_TDM_RX_4", NULL, "TERT_TDM_RX_4 Audio Mixer"},
{"QUAT_TDM_RX_0 Audio Mixer", "MultiMedia1", "MM_DL1"},
@@ -13209,6 +13702,8 @@ static const struct snd_soc_dapm_route intercon[] = {
{"QUAT_TDM_RX_0 Audio Mixer", "MultiMedia15", "MM_DL15"},
{"QUAT_TDM_RX_0 Audio Mixer", "MultiMedia16", "MM_DL16"},
{"QUAT_TDM_RX_0 Audio Mixer", "MultiMedia20", "MM_DL20"},
+ {"QUAT_TDM_RX_0 Audio Mixer", "MultiMedia21", "MM_DL21"},
+ {"QUAT_TDM_RX_0 Audio Mixer", "MultiMedia26", "MM_DL26"},
{"QUAT_TDM_RX_0", NULL, "QUAT_TDM_RX_0 Audio Mixer"},
{"PRI_TDM_RX_0 Audio Mixer", "MultiMedia1", "MM_DL1"},
@@ -13227,6 +13722,7 @@ static const struct snd_soc_dapm_route intercon[] = {
{"PRI_TDM_RX_0 Audio Mixer", "MultiMedia14", "MM_DL14"},
{"PRI_TDM_RX_0 Audio Mixer", "MultiMedia15", "MM_DL15"},
{"PRI_TDM_RX_0 Audio Mixer", "MultiMedia16", "MM_DL16"},
+ {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia26", "MM_DL26"},
{"PRI_TDM_RX_0", NULL, "PRI_TDM_RX_0 Audio Mixer"},
{"SEC_TDM_RX_0 Audio Mixer", "MultiMedia1", "MM_DL1"},
@@ -13245,6 +13741,7 @@ static const struct snd_soc_dapm_route intercon[] = {
{"SEC_TDM_RX_0 Audio Mixer", "MultiMedia14", "MM_DL14"},
{"SEC_TDM_RX_0 Audio Mixer", "MultiMedia15", "MM_DL15"},
{"SEC_TDM_RX_0 Audio Mixer", "MultiMedia16", "MM_DL16"},
+ {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia26", "MM_DL26"},
{"SEC_TDM_RX_0", NULL, "SEC_TDM_RX_0 Audio Mixer"},
{"QUAT_TDM_TX_0 Audio Mixer", "MultiMedia1", "MM_DL1"},
@@ -13282,6 +13779,8 @@ static const struct snd_soc_dapm_route intercon[] = {
{"QUAT_TDM_RX_1 Audio Mixer", "MultiMedia15", "MM_DL15"},
{"QUAT_TDM_RX_1 Audio Mixer", "MultiMedia16", "MM_DL16"},
{"QUAT_TDM_RX_1 Audio Mixer", "MultiMedia20", "MM_DL20"},
+ {"QUAT_TDM_RX_1 Audio Mixer", "MultiMedia21", "MM_DL21"},
+ {"QUAT_TDM_RX_1 Audio Mixer", "MultiMedia26", "MM_DL26"},
{"QUAT_TDM_RX_1", NULL, "QUAT_TDM_RX_1 Audio Mixer"},
{"QUAT_TDM_RX_2 Audio Mixer", "MultiMedia1", "MM_DL1"},
@@ -13301,6 +13800,8 @@ static const struct snd_soc_dapm_route intercon[] = {
{"QUAT_TDM_RX_2 Audio Mixer", "MultiMedia15", "MM_DL15"},
{"QUAT_TDM_RX_2 Audio Mixer", "MultiMedia16", "MM_DL16"},
{"QUAT_TDM_RX_2 Audio Mixer", "MultiMedia20", "MM_DL20"},
+ {"QUAT_TDM_RX_2 Audio Mixer", "MultiMedia21", "MM_DL21"},
+ {"QUAT_TDM_RX_2 Audio Mixer", "MultiMedia26", "MM_DL26"},
{"QUAT_TDM_RX_2", NULL, "QUAT_TDM_RX_2 Audio Mixer"},
{"QUAT_TDM_RX_3 Audio Mixer", "MultiMedia1", "MM_DL1"},
@@ -13320,6 +13821,8 @@ static const struct snd_soc_dapm_route intercon[] = {
{"QUAT_TDM_RX_3 Audio Mixer", "MultiMedia15", "MM_DL15"},
{"QUAT_TDM_RX_3 Audio Mixer", "MultiMedia16", "MM_DL16"},
{"QUAT_TDM_RX_3 Audio Mixer", "MultiMedia20", "MM_DL20"},
+ {"QUAT_TDM_RX_3 Audio Mixer", "MultiMedia21", "MM_DL21"},
+ {"QUAT_TDM_RX_3 Audio Mixer", "MultiMedia26", "MM_DL26"},
{"QUAT_TDM_RX_3", NULL, "QUAT_TDM_RX_3 Audio Mixer"},
{"MultiMedia1 Mixer", "PRI_TX", "PRI_I2S_TX"},
@@ -13331,6 +13834,7 @@ static const struct snd_soc_dapm_route intercon[] = {
{"MultiMedia1 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
{"MultiMedia2 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
{"MultiMedia6 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+ {"MultiMedia27 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
{"MultiMedia1 Mixer", "QUIN_MI2S_TX", "QUIN_MI2S_TX"},
{"MultiMedia2 Mixer", "QUIN_MI2S_TX", "QUIN_MI2S_TX"},
{"MultiMedia1 Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
@@ -13340,6 +13844,7 @@ static const struct snd_soc_dapm_route intercon[] = {
{"MultiMedia1 Mixer", "INT3_MI2S_TX", "INT3_MI2S_TX"},
{"MultiMedia2 Mixer", "INT3_MI2S_TX", "INT3_MI2S_TX"},
{"MultiMedia1 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
+ {"MultiMedia27 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
{"MultiMedia1 Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
{"MultiMedia3 Mixer", "AUX_PCM_TX", "AUX_PCM_TX"},
{"MultiMedia5 Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
@@ -13361,6 +13866,7 @@ static const struct snd_soc_dapm_route intercon[] = {
{"MultiMedia2 Mixer", "SLIM_8_TX", "SLIMBUS_8_TX"},
{"MultiMedia1 Mixer", "SEC_MI2S_TX", "SEC_MI2S_TX"},
{"MultiMedia1 Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+ {"MultiMedia27 Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
{"MultiMedia2 Mixer", "SEC_MI2S_TX", "SEC_MI2S_TX"},
{"MultiMedia6 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
{"MultiMedia6 Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
@@ -13528,6 +14034,25 @@ static const struct snd_soc_dapm_route intercon[] = {
{"MultiMedia20 Mixer", "QUAT_TDM_TX_2", "QUAT_TDM_TX_2"},
{"MultiMedia20 Mixer", "QUAT_TDM_TX_3", "QUAT_TDM_TX_3"},
+ {"MultiMedia21 Mixer", "PRI_TDM_TX_0", "PRI_TDM_TX_0"},
+ {"MultiMedia21 Mixer", "PRI_TDM_TX_1", "PRI_TDM_TX_1"},
+ {"MultiMedia21 Mixer", "PRI_TDM_TX_2", "PRI_TDM_TX_2"},
+ {"MultiMedia21 Mixer", "PRI_TDM_TX_3", "PRI_TDM_TX_3"},
+ {"MultiMedia21 Mixer", "SEC_TDM_TX_0", "SEC_TDM_TX_0"},
+ {"MultiMedia21 Mixer", "SEC_TDM_TX_1", "SEC_TDM_TX_1"},
+ {"MultiMedia21 Mixer", "SEC_TDM_TX_2", "SEC_TDM_TX_2"},
+ {"MultiMedia21 Mixer", "SEC_TDM_TX_3", "SEC_TDM_TX_3"},
+ {"MultiMedia21 Mixer", "TERT_TDM_TX_0", "TERT_TDM_TX_0"},
+ {"MultiMedia21 Mixer", "TERT_TDM_TX_1", "TERT_TDM_TX_1"},
+ {"MultiMedia21 Mixer", "TERT_TDM_TX_2", "TERT_TDM_TX_2"},
+ {"MultiMedia21 Mixer", "TERT_TDM_TX_3", "TERT_TDM_TX_3"},
+ {"MultiMedia21 Mixer", "QUAT_TDM_TX_0", "QUAT_TDM_TX_0"},
+ {"MultiMedia21 Mixer", "QUAT_TDM_TX_1", "QUAT_TDM_TX_1"},
+ {"MultiMedia21 Mixer", "QUAT_TDM_TX_2", "QUAT_TDM_TX_2"},
+ {"MultiMedia21 Mixer", "QUAT_TDM_TX_3", "QUAT_TDM_TX_3"},
+ {"MultiMedia21 Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
+ {"MultiMedia21 Mixer", "SEC_AUX_PCM_UL_TX", "SEC_AUX_PCM_TX"},
+
{"MultiMedia1 Mixer", "USB_AUDIO_TX", "USB_AUDIO_TX"},
{"MultiMedia2 Mixer", "USB_AUDIO_TX", "USB_AUDIO_TX"},
{"MultiMedia4 Mixer", "USB_AUDIO_TX", "USB_AUDIO_TX"},
@@ -13669,6 +14194,8 @@ static const struct snd_soc_dapm_route intercon[] = {
{"MM_UL18", NULL, "MultiMedia18 Mixer"},
{"MM_UL19", NULL, "MultiMedia19 Mixer"},
{"MM_UL20", NULL, "MultiMedia20 Mixer"},
+ {"MM_UL21", NULL, "MultiMedia21 Mixer"},
+ {"MM_UL27", NULL, "MultiMedia27 Mixer"},
{"AUX_PCM_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
{"AUX_PCM_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
@@ -13686,6 +14213,9 @@ static const struct snd_soc_dapm_route intercon[] = {
{"AUX_PCM_RX Audio Mixer", "MultiMedia14", "MM_DL14"},
{"AUX_PCM_RX Audio Mixer", "MultiMedia15", "MM_DL15"},
{"AUX_PCM_RX Audio Mixer", "MultiMedia16", "MM_DL16"},
+ {"AUX_PCM_RX Audio Mixer", "MultiMedia21", "MM_DL21"},
+ {"AUX_PCM_RX Audio Mixer", "MultiMedia6", "MM_UL6"},
+ {"AUX_PCM_RX Audio Mixer", "MultiMedia21", "MM_UL21"},
{"AUX_PCM_RX", NULL, "AUX_PCM_RX Audio Mixer"},
{"SEC_AUX_PCM_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
@@ -13704,6 +14234,9 @@ static const struct snd_soc_dapm_route intercon[] = {
{"SEC_AUX_PCM_RX Audio Mixer", "MultiMedia14", "MM_DL14"},
{"SEC_AUX_PCM_RX Audio Mixer", "MultiMedia15", "MM_DL15"},
{"SEC_AUX_PCM_RX Audio Mixer", "MultiMedia16", "MM_DL16"},
+ {"SEC_AUX_PCM_RX Audio Mixer", "MultiMedia21", "MM_DL21"},
+ {"SEC_AUX_PCM_RX Audio Mixer", "MultiMedia6", "MM_UL6"},
+ {"SEC_AUX_PCM_RX Audio Mixer", "MultiMedia21", "MM_UL21"},
{"SEC_AUX_PCM_RX", NULL, "SEC_AUX_PCM_RX Audio Mixer"},
{"TERT_AUX_PCM_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
@@ -14608,6 +15141,10 @@ static const struct snd_soc_dapm_route intercon[] = {
{"QUAT_TDM_RX_0 Port Mixer", "AFE_PCM_TX", "PCM_TX"},
{"QUAT_TDM_RX_0 Port Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
{"QUAT_TDM_RX_0 Port Mixer", "SEC_AUX_PCM_UL_TX", "SEC_AUX_PCM_TX"},
+ {"QUAT_TDM_RX_0 Port Mixer", "PRI_TDM_TX_0", "PRI_TDM_TX_0"},
+ {"QUAT_TDM_RX_0 Port Mixer", "PRI_TDM_TX_1", "PRI_TDM_TX_1"},
+ {"QUAT_TDM_RX_0 Port Mixer", "PRI_TDM_TX_2", "PRI_TDM_TX_2"},
+ {"QUAT_TDM_RX_0 Port Mixer", "PRI_TDM_TX_3", "PRI_TDM_TX_3"},
{"QUAT_TDM_RX_0 Port Mixer", "TERT_TDM_TX_0", "TERT_TDM_TX_0"},
{"QUAT_TDM_RX_0 Port Mixer", "TERT_TDM_TX_1", "TERT_TDM_TX_1"},
{"QUAT_TDM_RX_0 Port Mixer", "TERT_TDM_TX_2", "TERT_TDM_TX_2"},
@@ -14626,6 +15163,10 @@ static const struct snd_soc_dapm_route intercon[] = {
{"QUAT_TDM_RX_1 Port Mixer", "AFE_PCM_TX", "PCM_TX"},
{"QUAT_TDM_RX_1 Port Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
{"QUAT_TDM_RX_1 Port Mixer", "SEC_AUX_PCM_UL_TX", "SEC_AUX_PCM_TX"},
+ {"QUAT_TDM_RX_1 Port Mixer", "PRI_TDM_TX_0", "PRI_TDM_TX_0"},
+ {"QUAT_TDM_RX_1 Port Mixer", "PRI_TDM_TX_1", "PRI_TDM_TX_1"},
+ {"QUAT_TDM_RX_1 Port Mixer", "PRI_TDM_TX_2", "PRI_TDM_TX_2"},
+ {"QUAT_TDM_RX_1 Port Mixer", "PRI_TDM_TX_3", "PRI_TDM_TX_3"},
{"QUAT_TDM_RX_1 Port Mixer", "TERT_TDM_TX_0", "TERT_TDM_TX_0"},
{"QUAT_TDM_RX_1 Port Mixer", "TERT_TDM_TX_1", "TERT_TDM_TX_1"},
{"QUAT_TDM_RX_1 Port Mixer", "TERT_TDM_TX_2", "TERT_TDM_TX_2"},
@@ -14644,6 +15185,10 @@ static const struct snd_soc_dapm_route intercon[] = {
{"QUAT_TDM_RX_2 Port Mixer", "AFE_PCM_TX", "PCM_TX"},
{"QUAT_TDM_RX_2 Port Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
{"QUAT_TDM_RX_2 Port Mixer", "SEC_AUX_PCM_UL_TX", "SEC_AUX_PCM_TX"},
+ {"QUAT_TDM_RX_2 Port Mixer", "PRI_TDM_TX_0", "PRI_TDM_TX_0"},
+ {"QUAT_TDM_RX_2 Port Mixer", "PRI_TDM_TX_1", "PRI_TDM_TX_1"},
+ {"QUAT_TDM_RX_2 Port Mixer", "PRI_TDM_TX_2", "PRI_TDM_TX_2"},
+ {"QUAT_TDM_RX_2 Port Mixer", "PRI_TDM_TX_3", "PRI_TDM_TX_3"},
{"QUAT_TDM_RX_2 Port Mixer", "TERT_TDM_TX_0", "TERT_TDM_TX_0"},
{"QUAT_TDM_RX_2 Port Mixer", "TERT_TDM_TX_1", "TERT_TDM_TX_1"},
{"QUAT_TDM_RX_2 Port Mixer", "TERT_TDM_TX_2", "TERT_TDM_TX_2"},
@@ -14662,6 +15207,10 @@ static const struct snd_soc_dapm_route intercon[] = {
{"QUAT_TDM_RX_3 Port Mixer", "AFE_PCM_TX", "PCM_TX"},
{"QUAT_TDM_RX_3 Port Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
{"QUAT_TDM_RX_3 Port Mixer", "SEC_AUX_PCM_UL_TX", "SEC_AUX_PCM_TX"},
+ {"QUAT_TDM_RX_3 Port Mixer", "PRI_TDM_TX_0", "PRI_TDM_TX_0"},
+ {"QUAT_TDM_RX_3 Port Mixer", "PRI_TDM_TX_1", "PRI_TDM_TX_1"},
+ {"QUAT_TDM_RX_3 Port Mixer", "PRI_TDM_TX_2", "PRI_TDM_TX_2"},
+ {"QUAT_TDM_RX_3 Port Mixer", "PRI_TDM_TX_3", "PRI_TDM_TX_3"},
{"QUAT_TDM_RX_3 Port Mixer", "TERT_TDM_TX_0", "TERT_TDM_TX_0"},
{"QUAT_TDM_RX_3 Port Mixer", "TERT_TDM_TX_1", "TERT_TDM_TX_1"},
{"QUAT_TDM_RX_3 Port Mixer", "TERT_TDM_TX_2", "TERT_TDM_TX_2"},
@@ -15316,7 +15865,7 @@ done:
static int msm_routing_send_device_pp_params(int port_id, int copp_idx,
int fe_id)
{
- int index, topo_id, be_idx;
+ int topo_id, be_idx;
unsigned long pp_config = 0;
bool mute_on;
int latency;
@@ -15340,16 +15889,6 @@ static int msm_routing_send_device_pp_params(int port_id, int copp_idx,
return -EINVAL;
}
- for (index = 0; index < MSM_BACKEND_DAI_PP_PARAMS_REQ_MAX; index++) {
- if (msm_bedais_pp_params[index].port_id == port_id)
- break;
- }
- if (index >= MSM_BACKEND_DAI_PP_PARAMS_REQ_MAX) {
- pr_err("%s: Invalid backend pp params index %d\n",
- __func__, index);
- return -EINVAL;
- }
-
topo_id = adm_get_topology_for_port_copp_idx(port_id, copp_idx);
if (topo_id != COMPRESSED_PASSTHROUGH_DEFAULT_TOPOLOGY) {
pr_err("%s: Invalid passthrough topology 0x%x\n",
@@ -15361,11 +15900,11 @@ static int msm_routing_send_device_pp_params(int port_id, int copp_idx,
(msm_bedais[be_idx].passthr_mode[fe_id] == LISTEN))
compr_passthr_mode = false;
- pp_config = msm_bedais_pp_params[index].pp_params_config;
+ pp_config = msm_bedais_pp_params[be_idx].pp_params_config;
if (test_bit(ADM_PP_PARAM_MUTE_BIT, &pp_config)) {
pr_debug("%s: ADM_PP_PARAM_MUTE\n", __func__);
clear_bit(ADM_PP_PARAM_MUTE_BIT, &pp_config);
- mute_on = msm_bedais_pp_params[index].mute_on;
+ mute_on = msm_bedais_pp_params[be_idx].mute_on;
if ((msm_bedais[be_idx].active) && compr_passthr_mode)
adm_send_compressed_device_mute(port_id,
copp_idx,
@@ -15375,7 +15914,7 @@ static int msm_routing_send_device_pp_params(int port_id, int copp_idx,
pr_debug("%s: ADM_PP_PARAM_LATENCY\n", __func__);
clear_bit(ADM_PP_PARAM_LATENCY_BIT,
&pp_config);
- latency = msm_bedais_pp_params[index].latency;
+ latency = msm_bedais_pp_params[be_idx].latency;
if ((msm_bedais[be_idx].active) && compr_passthr_mode)
adm_send_compressed_device_latency(port_id,
copp_idx,
@@ -15384,18 +15923,47 @@ static int msm_routing_send_device_pp_params(int port_id, int copp_idx,
return 0;
}
+static bool msm_pcm_routing_test_pp_param(int be_idx, long param_bit)
+{
+ return test_bit(param_bit,
+ &msm_bedais_pp_params[be_idx].pp_params_config);
+}
+
+static void msm_routing_set_pp_param(long param_bit, int value)
+{
+ int be_idx;
+
+ if (value) {
+ for (be_idx = 0; be_idx < MSM_BACKEND_DAI_MAX; be_idx++)
+ set_bit(param_bit,
+ &msm_bedais_pp_params[be_idx].
+ pp_params_config);
+ } else {
+ for (be_idx = 0; be_idx < MSM_BACKEND_DAI_MAX; be_idx++)
+ clear_bit(param_bit,
+ &msm_bedais_pp_params[be_idx].
+ pp_params_config);
+ }
+}
+
static int msm_routing_put_device_pp_params_mixer(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
int pp_id = ucontrol->value.integer.value[0];
+ int value = ucontrol->value.integer.value[1];
int port_id = 0;
- int index, be_idx, i, topo_id, idx;
+ int be_idx, i, topo_id, idx;
bool mute;
int latency;
bool compr_passthr_mode = true;
pr_debug("%s: pp_id: 0x%x\n", __func__, pp_id);
+ if (pp_id == ADM_PP_PARAM_LIMITER_ID) {
+ msm_routing_set_pp_param(ADM_PP_PARAM_LIMITER_BIT, value);
+ goto done;
+ }
+
for (be_idx = 0; be_idx < MSM_BACKEND_DAI_MAX; be_idx++) {
port_id = msm_bedais[be_idx].port_id;
if (port_id == HDMI_RX || port_id == DISPLAY_PORT_RX)
@@ -15407,16 +15975,6 @@ static int msm_routing_put_device_pp_params_mixer(struct snd_kcontrol *kcontrol,
return -EINVAL;
}
- for (index = 0; index < MSM_BACKEND_DAI_PP_PARAMS_REQ_MAX; index++) {
- if (msm_bedais_pp_params[index].port_id == port_id)
- break;
- }
- if (index >= MSM_BACKEND_DAI_PP_PARAMS_REQ_MAX) {
- pr_err("%s: Invalid pp params backend index %d\n",
- __func__, index);
- return -EINVAL;
- }
-
for_each_set_bit(i, &msm_bedais[be_idx].fe_sessions[0],
MSM_FRONTEND_DAI_MM_SIZE) {
if ((msm_bedais[be_idx].passthr_mode[i] == LEGACY_PCM) ||
@@ -15439,22 +15997,20 @@ static int msm_routing_put_device_pp_params_mixer(struct snd_kcontrol *kcontrol,
switch (pp_id) {
case ADM_PP_PARAM_MUTE_ID:
pr_debug("%s: ADM_PP_PARAM_MUTE\n", __func__);
- mute = ucontrol->value.integer.value[1] ? true : false;
- msm_bedais_pp_params[index].mute_on = mute;
+ mute = value ? true : false;
+ msm_bedais_pp_params[be_idx].mute_on = mute;
set_bit(ADM_PP_PARAM_MUTE_BIT,
- &msm_bedais_pp_params[index].pp_params_config);
+ &msm_bedais_pp_params[be_idx].pp_params_config);
if ((msm_bedais[be_idx].active) && compr_passthr_mode)
adm_send_compressed_device_mute(port_id,
idx, mute);
break;
case ADM_PP_PARAM_LATENCY_ID:
pr_debug("%s: ADM_PP_PARAM_LATENCY\n", __func__);
- msm_bedais_pp_params[index].latency =
- ucontrol->value.integer.value[1];
+ msm_bedais_pp_params[be_idx].latency = value;
set_bit(ADM_PP_PARAM_LATENCY_BIT,
- &msm_bedais_pp_params[index].pp_params_config);
- latency = msm_bedais_pp_params[index].latency =
- ucontrol->value.integer.value[1];
+ &msm_bedais_pp_params[be_idx].pp_params_config);
+ latency = msm_bedais_pp_params[be_idx].latency = value;
if ((msm_bedais[be_idx].active) && compr_passthr_mode)
adm_send_compressed_device_latency(port_id,
idx, latency);
@@ -15466,6 +16022,7 @@ static int msm_routing_put_device_pp_params_mixer(struct snd_kcontrol *kcontrol,
}
}
}
+done:
return 0;
}
@@ -15634,6 +16191,93 @@ static struct snd_pcm_ops msm_routing_pcm_ops = {
.prepare = msm_pcm_routing_prepare,
};
+int msm_routing_set_downmix_control_data(int be_id, int session_id,
+ struct asm_stream_pan_ctrl_params *dnmix_param)
+{
+ int i, rc = 0;
+ struct adm_pspd_param_data_t data;
+ struct audproc_chmixer_param_coeff dnmix_cfg;
+ uint16_t variable_payload = 0;
+ char *adm_params = NULL;
+ int port_id, copp_idx = 0;
+ uint32_t params_length = 0;
+
+ if (be_id >= MSM_BACKEND_DAI_MAX) {
+ rc = -EINVAL;
+ return rc;
+ }
+ port_id = msm_bedais[be_id].port_id;
+ copp_idx = adm_get_default_copp_idx(port_id);
+ pr_debug("%s: port_id - %d, copp_idx %d session id - %d\n",
+ __func__, port_id, copp_idx, session_id);
+
+ variable_payload = dnmix_param->num_output_channels * sizeof(uint16_t)+
+ dnmix_param->num_input_channels * sizeof(uint16_t) +
+ dnmix_param->num_output_channels * sizeof(uint16_t) *
+ dnmix_param->num_input_channels * sizeof(uint16_t);
+ i = (variable_payload % sizeof(uint32_t));
+ variable_payload += (i == 0) ? 0 : sizeof(uint32_t) - i;
+
+ params_length = variable_payload +
+ sizeof(struct adm_pspd_param_data_t) +
+ sizeof(struct audproc_chmixer_param_coeff);
+ adm_params = kzalloc(params_length, GFP_KERNEL);
+ if (!adm_params) {
+ rc = -ENOMEM;
+ goto end;
+ }
+
+ data.module_id = AUDPROC_MODULE_ID_CHMIXER;
+ data.param_id = AUDPROC_CHMIXER_PARAM_ID_COEFF;
+ data.param_size = sizeof(struct audproc_chmixer_param_coeff) +
+ variable_payload;
+ data.reserved = 0;
+ memcpy((u8 *)adm_params, &data, sizeof(struct adm_pspd_param_data_t));
+
+ dnmix_cfg.index = 0;
+ dnmix_cfg.num_output_channels = dnmix_param->num_output_channels;
+ dnmix_cfg.num_input_channels = dnmix_param->num_input_channels;
+ memcpy(((u8 *)adm_params +
+ sizeof(struct adm_pspd_param_data_t)),
+ &dnmix_cfg, sizeof(struct audproc_chmixer_param_coeff));
+
+ memcpy(((u8 *)adm_params +
+ sizeof(struct adm_pspd_param_data_t) +
+ sizeof(struct audproc_chmixer_param_coeff)),
+ dnmix_param->output_channel_map,
+ dnmix_param->num_output_channels * sizeof(uint16_t));
+ memcpy(((u8 *)adm_params +
+ sizeof(struct adm_pspd_param_data_t) +
+ sizeof(struct audproc_chmixer_param_coeff) +
+ dnmix_param->num_output_channels * sizeof(uint16_t)),
+ dnmix_param->input_channel_map,
+ dnmix_param->num_input_channels * sizeof(uint16_t));
+ memcpy(((u8 *)adm_params +
+ sizeof(struct adm_pspd_param_data_t) +
+ sizeof(struct audproc_chmixer_param_coeff) +
+ (dnmix_param->num_output_channels * sizeof(uint16_t)) +
+ (dnmix_param->num_input_channels * sizeof(uint16_t))),
+ dnmix_param->gain,
+ (dnmix_param->num_output_channels * sizeof(uint16_t)) *
+ (dnmix_param->num_input_channels * sizeof(uint16_t)));
+
+ if (params_length) {
+ rc = adm_set_pspd_matrix_params(port_id,
+ copp_idx,
+ session_id,
+ adm_params,
+ params_length);
+ if (rc) {
+ pr_err("%s: send params failed rc=%d\n", __func__, rc);
+ rc = -EINVAL;
+ }
+ }
+end:
+ kfree(adm_params);
+ return rc;
+}
+
+
/* Not used but frame seems to require it */
static int msm_routing_probe(struct snd_soc_platform *platform)
{
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h
index 19e726001d25..c640be343e10 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h
@@ -193,6 +193,13 @@ enum {
MSM_FRONTEND_DAI_MULTIMEDIA18,
MSM_FRONTEND_DAI_MULTIMEDIA19,
MSM_FRONTEND_DAI_MULTIMEDIA20,
+ MSM_FRONTEND_DAI_MULTIMEDIA21,
+ MSM_FRONTEND_DAI_MULTIMEDIA22,
+ MSM_FRONTEND_DAI_MULTIMEDIA23,
+ MSM_FRONTEND_DAI_MULTIMEDIA24,
+ MSM_FRONTEND_DAI_MULTIMEDIA25,
+ MSM_FRONTEND_DAI_MULTIMEDIA26,
+ MSM_FRONTEND_DAI_MULTIMEDIA27,
MSM_FRONTEND_DAI_CS_VOICE,
MSM_FRONTEND_DAI_VOIP,
MSM_FRONTEND_DAI_AFE_RX,
@@ -218,8 +225,8 @@ enum {
MSM_FRONTEND_DAI_MAX,
};
-#define MSM_FRONTEND_DAI_MM_SIZE (MSM_FRONTEND_DAI_MULTIMEDIA20 + 1)
-#define MSM_FRONTEND_DAI_MM_MAX_ID MSM_FRONTEND_DAI_MULTIMEDIA20
+#define MSM_FRONTEND_DAI_MM_SIZE (MSM_FRONTEND_DAI_MULTIMEDIA27 + 1)
+#define MSM_FRONTEND_DAI_MM_MAX_ID MSM_FRONTEND_DAI_MULTIMEDIA27
enum {
MSM_BACKEND_DAI_PRI_I2S_RX = 0,
@@ -392,12 +399,20 @@ enum {
#define RELEASE_LOCK 0
#define ACQUIRE_LOCK 1
-#define MSM_BACKEND_DAI_PP_PARAMS_REQ_MAX 2
#define HDMI_RX_ID 0x8001
-#define ADM_PP_PARAM_MUTE_ID 0
-#define ADM_PP_PARAM_MUTE_BIT 1
-#define ADM_PP_PARAM_LATENCY_ID 1
-#define ADM_PP_PARAM_LATENCY_BIT 2
+
+enum {
+ ADM_PP_PARAM_MUTE_ID,
+ ADM_PP_PARAM_LATENCY_ID,
+ ADM_PP_PARAM_LIMITER_ID
+};
+
+enum {
+ ADM_PP_PARAM_MUTE_BIT = 0x1,
+ ADM_PP_PARAM_LATENCY_BIT = 0x2,
+ ADM_PP_PARAM_LIMITER_BIT = 0x4
+};
+
#define BE_DAI_PORT_SESSIONS_IDX_MAX 4
#define BE_DAI_FE_SESSIONS_IDX_MAX 2
@@ -483,4 +498,6 @@ int msm_pcm_routing_reg_stream_app_type_cfg(
int msm_pcm_routing_get_stream_app_type_cfg(
int fedai_id, int session_type, int *be_id,
struct msm_pcm_stream_app_type_cfg *cfg_data);
+int msm_routing_set_downmix_control_data(int be_id, int session_id,
+ struct asm_stream_pan_ctrl_params *pan_param);
#endif /*_MSM_PCM_H*/
diff --git a/sound/soc/msm/qdsp6v2/msm-qti-pp-config.c b/sound/soc/msm/qdsp6v2/msm-qti-pp-config.c
index cac28f43e5ae..65f5167d9dee 100644
--- a/sound/soc/msm/qdsp6v2/msm-qti-pp-config.c
+++ b/sound/soc/msm/qdsp6v2/msm-qti-pp-config.c
@@ -298,11 +298,11 @@ int msm_qti_pp_send_stereo_to_custom_stereo_cmd(int port_id, int copp_idx,
*update_params_value16++ = op_FR_ip_FR_weight;
avail_length = avail_length - (4 * sizeof(uint16_t));
if (params_length) {
- rc = adm_set_stereo_to_custom_stereo(port_id,
- copp_idx,
- session_id,
- params_value,
- params_length);
+ rc = adm_set_pspd_matrix_params(port_id,
+ copp_idx,
+ session_id,
+ params_value,
+ params_length);
if (rc) {
pr_err("%s: send params failed rc=%d\n", __func__, rc);
kfree(params_value);
diff --git a/sound/soc/msm/qdsp6v2/msm-transcode-loopback-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-transcode-loopback-q6-v2.c
index 6bb85ca8e84e..5c5f7bc482c8 100644
--- a/sound/soc/msm/qdsp6v2/msm-transcode-loopback-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-transcode-loopback-q6-v2.c
@@ -100,7 +100,7 @@ static void loopback_event_handler(uint32_t opcode,
return;
}
- cstream = trans->source.cstream;
+ cstream = trans->sink.cstream;
ac = trans->audio_client;
/*
@@ -223,6 +223,7 @@ static int msm_transcode_loopback_open(struct snd_compr_stream *cstream)
ret = -EINVAL;
goto exit;
}
+ msm_adsp_init_mixer_ctl_pp_event_queue(rtd);
}
pr_debug("%s: num stream%d, stream name %s\n", __func__,
@@ -237,8 +238,7 @@ static int msm_transcode_loopback_open(struct snd_compr_stream *cstream)
}
runtime->private_data = trans;
- if (trans->num_streams == 1)
- msm_adsp_init_mixer_ctl_pp_event_queue(rtd);
+
exit:
mutex_unlock(&trans->lock);
return ret;
@@ -283,14 +283,14 @@ static int msm_transcode_loopback_free(struct snd_compr_stream *cstream)
trans->num_streams--;
stop_transcoding(trans);
- if (cstream->direction == SND_COMPRESS_PLAYBACK)
+ if (cstream->direction == SND_COMPRESS_PLAYBACK) {
memset(&trans->sink, 0, sizeof(struct loopback_stream));
- else if (cstream->direction == SND_COMPRESS_CAPTURE)
+ msm_adsp_clean_mixer_ctl_pp_event_queue(rtd);
+ } else if (cstream->direction == SND_COMPRESS_CAPTURE) {
memset(&trans->source, 0, sizeof(struct loopback_stream));
+ }
trans->session_state = LOOPBACK_SESSION_CLOSE;
- if (trans->num_streams == 1)
- msm_adsp_clean_mixer_ctl_pp_event_queue(rtd);
mutex_unlock(&trans->lock);
return ret;
}
diff --git a/sound/soc/msm/qdsp6v2/q6adm.c b/sound/soc/msm/qdsp6v2/q6adm.c
index 28aaf2172221..e1bfc950d0e3 100644
--- a/sound/soc/msm/qdsp6v2/q6adm.c
+++ b/sound/soc/msm/qdsp6v2/q6adm.c
@@ -48,12 +48,6 @@
#define DS2_ADM_COPP_TOPOLOGY_ID 0xFFFFFFFF
#endif
-/* ENUM for adm_status */
-enum adm_cal_status {
- ADM_STATUS_CALIBRATION_REQUIRED = 0,
- ADM_STATUS_MAX,
-};
-
struct adm_copp {
atomic_t id[AFE_MAX_PORTS][MAX_COPPS_PER_PORT];
@@ -781,9 +775,9 @@ fail_cmd:
return ret;
}
-int adm_set_stereo_to_custom_stereo(int port_id, int copp_idx,
- unsigned int session_id, char *params,
- uint32_t params_length)
+int adm_set_pspd_matrix_params(int port_id, int copp_idx,
+ unsigned int session_id, char *params,
+ uint32_t params_length)
{
struct adm_cmd_set_pspd_mtmx_strtr_params_v5 *adm_params = NULL;
int sz, rc = 0, port_idx;
@@ -1413,6 +1407,7 @@ static int32_t adm_callback(struct apr_client_data *data, void *priv)
case ADM_CMD_DEVICE_OPEN_V5:
case ADM_CMD_DEVICE_CLOSE_V5:
case ADM_CMD_DEVICE_OPEN_V6:
+ case ADM_CMD_SET_MTMX_STRTR_DEV_PARAMS_V1:
pr_debug("%s: Basic callback received, wake up.\n",
__func__);
atomic_set(&this_adm.copp.stat[port_idx]
@@ -2695,6 +2690,97 @@ fail_cmd:
return;
}
+
+static int adm_set_mtmx_params_v1(int port_idx, int copp_idx,
+ int params_length, void *params)
+{
+ struct adm_cmd_set_mtmx_params_v1 *adm_params = NULL;
+ int rc = 0;
+ int sz;
+
+ sz = sizeof(*adm_params) + params_length;
+ adm_params = kzalloc(sz, GFP_KERNEL);
+ if (!adm_params)
+ return -ENOMEM;
+
+ memcpy(((u8 *)adm_params + sizeof(*adm_params)),
+ params, params_length);
+ adm_params->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ adm_params->hdr.pkt_size = sz;
+ adm_params->hdr.src_svc = APR_SVC_ADM;
+ adm_params->hdr.src_domain = APR_DOMAIN_APPS;
+ adm_params->hdr.src_port = 0;
+ adm_params->hdr.dest_svc = APR_SVC_ADM;
+ adm_params->hdr.dest_domain = APR_DOMAIN_ADSP;
+ adm_params->hdr.dest_port =
+ atomic_read(&this_adm.copp.id[port_idx][copp_idx]);
+ adm_params->hdr.token = port_idx << 16 | copp_idx;
+ adm_params->hdr.opcode = ADM_CMD_SET_MTMX_STRTR_DEV_PARAMS_V1;
+ adm_params->payload_addr_lsw = 0;
+ adm_params->payload_addr_msw = 0;
+ adm_params->mem_map_handle = 0;
+ adm_params->payload_size = params_length;
+ adm_params->copp_id = atomic_read(&this_adm.copp.
+ id[port_idx][copp_idx]);
+
+ atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1);
+ rc = apr_send_pkt(this_adm.apr, (uint32_t *)adm_params);
+ if (rc < 0) {
+ pr_err("%s: Set params failed port_idx = 0x%x rc %d\n",
+ __func__, port_idx, rc);
+ rc = -EINVAL;
+ goto send_param_return;
+ }
+ /* Wait for the callback */
+ rc = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
+ atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) >= 0,
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!rc) {
+ pr_err("%s: Set params timed out port_idx = 0x%x\n",
+ __func__, port_idx);
+ rc = -EINVAL;
+ goto send_param_return;
+ } else if (atomic_read(&this_adm.copp.stat
+ [port_idx][copp_idx]) > 0) {
+ pr_err("%s: DSP returned error[%s]\n",
+ __func__, adsp_err_get_err_str(
+ atomic_read(&this_adm.copp.stat
+ [port_idx][copp_idx])));
+ rc = adsp_err_get_lnx_err_code(
+ atomic_read(&this_adm.copp.stat
+ [port_idx][copp_idx]));
+ goto send_param_return;
+ }
+ rc = 0;
+send_param_return:
+ kfree(adm_params);
+ return rc;
+}
+
+static void adm_enable_mtmx_limiter(int port_idx, int copp_idx)
+{
+ int rc;
+ struct enable_param_v6 adm_param = { {0} };
+
+ adm_param.param.module_id = ADM_MTMX_MODULE_STREAM_LIMITER;
+ adm_param.param.param_id = AUDPROC_PARAM_ID_ENABLE;
+ adm_param.param.param_size = sizeof(adm_param.enable);
+ adm_param.enable = 1;
+
+ rc = adm_set_mtmx_params_v1(port_idx, copp_idx,
+ sizeof(adm_param), &adm_param);
+ if (rc < 0) {
+ pr_err("%s: adm_set_mtmx_params_v1 failed port_idx = 0x%x rc %d\n",
+ __func__, port_idx, rc);
+ goto done;
+ }
+ set_bit(ADM_STATUS_LIMITER,
+ (void *)&this_adm.copp.adm_status[port_idx][copp_idx]);
+done:
+ return;
+}
+
static void route_set_opcode_matrix_id(
struct adm_cmd_matrix_map_routings_v5 **route_addr,
int path, uint32_t passthr_mode)
@@ -2791,6 +2877,11 @@ int adm_matrix_map(int path, struct route_payload payload_map, int perf_mode,
copp_idx = payload_map.copp_idx[i];
copps_list[i] = atomic_read(&this_adm.copp.id[port_idx]
[copp_idx]);
+ if (test_bit(ADM_STATUS_LIMITER,
+ (void *)&payload_map.route_status) &&
+ ((path == ADM_PATH_PLAYBACK) ||
+ (path == ADM_PATH_COMPRESSED_RX)))
+ adm_enable_mtmx_limiter(port_idx, copp_idx);
}
atomic_set(&this_adm.matrix_map_stat, -1);
@@ -2991,6 +3082,8 @@ int adm_close(int port_id, int perf_mode, int copp_idx)
clear_bit(ADM_STATUS_CALIBRATION_REQUIRED,
(void *)&this_adm.copp.adm_status[port_idx][copp_idx]);
+ clear_bit(ADM_STATUS_LIMITER,
+ (void *)&this_adm.copp.adm_status[port_idx][copp_idx]);
ret = apr_send_pkt(this_adm.apr, (uint32_t *)&close);
if (ret < 0) {
@@ -4807,8 +4900,7 @@ static int __init adm_init(void)
&this_adm.copp.adm_delay_wait[i][j]);
atomic_set(&this_adm.copp.topology[i][j], 0);
this_adm.copp.adm_delay[i][j] = 0;
- this_adm.copp.adm_status[i][j] =
- ADM_STATUS_CALIBRATION_REQUIRED;
+ this_adm.copp.adm_status[i][j] = 0;
}
}
diff --git a/sound/soc/msm/qdsp6v2/q6asm.c b/sound/soc/msm/qdsp6v2/q6asm.c
index 44e5b01b1ccb..a44569530846 100644
--- a/sound/soc/msm/qdsp6v2/q6asm.c
+++ b/sound/soc/msm/qdsp6v2/q6asm.c
@@ -44,7 +44,7 @@
#define TRUE 0x01
#define FALSE 0x00
#define SESSION_MAX 8
-
+#define ASM_MAX_CHANNELS 8
enum {
ASM_TOPOLOGY_CAL = 0,
ASM_CUSTOM_TOP_CAL,
@@ -423,14 +423,22 @@ static void config_debug_fs_run(void)
}
}
-static void config_debug_fs_write(struct audio_buffer *ab)
+static void config_debug_fs_write(struct audio_buffer *ab, int offset)
{
if (out_enable_flag) {
char zero_pattern[2] = {0x00, 0x00};
+ char *data;
+
+ if ((offset < 0) || (offset > ab->size)) {
+ pr_err("Invalid offset %d", offset);
+ return;
+ }
+
+ data = (char *)ab->data + offset;
/* If First two byte is non zero and last two byte
is zero then it is warm output pattern */
- if ((strncmp(((char *)ab->data), zero_pattern, 2)) &&
- (!strncmp(((char *)ab->data + 2), zero_pattern, 2))) {
+ if ((strncmp(data, zero_pattern, 2)) &&
+ (!strncmp((data + 2), zero_pattern, 2))) {
do_gettimeofday(&out_warm_tv);
pr_debug("%s: WARM:apr_send_pkt at %ld sec %ld microsec\n",
__func__,
@@ -440,8 +448,8 @@ static void config_debug_fs_write(struct audio_buffer *ab)
}
/* If First two byte is zero and last two byte is
non zero then it is cont ouput pattern */
- else if ((!strncmp(((char *)ab->data), zero_pattern, 2))
- && (strncmp(((char *)ab->data + 2), zero_pattern, 2))) {
+ else if ((!strncmp(data, zero_pattern, 2))
+ && (strncmp((data + 2), zero_pattern, 2))) {
do_gettimeofday(&out_cont_tv);
pr_debug("%s: CONT:apr_send_pkt at %ld sec %ld microsec\n",
__func__,
@@ -488,7 +496,7 @@ outbuf_fail:
return;
}
#else
-static void config_debug_fs_write(struct audio_buffer *ab)
+static void config_debug_fs_write(struct audio_buffer *ab, int offset)
{
return;
}
@@ -7419,6 +7427,296 @@ int q6asm_set_softvolume_v2(struct audio_client *ac,
return __q6asm_set_softvolume(ac, softvol_param, instance);
}
+int q6asm_set_vol_ctrl_gain_pair(struct audio_client *ac,
+ struct asm_stream_pan_ctrl_params *pan_param)
+{
+ int sz = 0;
+ int rc = 0;
+ int i = 0;
+ int32_t ch = 0;
+ struct apr_hdr hdr;
+ struct audproc_volume_ctrl_channel_type_gain_pair
+ gain_data[ASM_MAX_CHANNELS];
+ struct asm_stream_cmd_set_pp_params_v2 payload_params;
+ struct asm_stream_param_data_v2 data;
+ uint16_t *asm_params = NULL;
+
+ if (ac == NULL) {
+ pr_err("%s: ac is NULL\n", __func__);
+ rc = -EINVAL;
+ goto fail;
+ }
+ if (ac->apr == NULL) {
+ dev_err(ac->dev, "%s: ac apr handle NULL\n", __func__);
+ rc = -EINVAL;
+ goto fail;
+ }
+
+ sz = sizeof(struct apr_hdr) +
+ sizeof(struct asm_stream_cmd_set_pp_params_v2) +
+ sizeof(struct asm_stream_param_data_v2) +
+ sizeof(uint32_t) +
+ (sizeof(struct audproc_volume_ctrl_channel_type_gain_pair) *
+ ASM_MAX_CHANNELS);
+ asm_params = kzalloc(sz, GFP_KERNEL);
+ if (!asm_params) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ q6asm_add_hdr_async(ac, &hdr, sz, TRUE);
+ atomic_set(&ac->cmd_state_pp, -1);
+
+ hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
+ memcpy(((u8 *)asm_params), &hdr, sizeof(struct apr_hdr));
+
+ payload_params.data_payload_addr_lsw = 0;
+ payload_params.data_payload_addr_msw = 0;
+ payload_params.mem_map_handle = 0;
+ payload_params.data_payload_size =
+ sizeof(struct asm_stream_param_data_v2) +
+ sizeof(uint32_t) +
+ (sizeof(struct audproc_volume_ctrl_channel_type_gain_pair) *
+ ASM_MAX_CHANNELS);
+ memcpy(((u8 *)asm_params + sizeof(struct apr_hdr)),
+ &payload_params,
+ sizeof(struct asm_stream_cmd_set_pp_params_v2));
+
+ data.module_id = AUDPROC_MODULE_ID_VOL_CTRL;
+ data.param_id = AUDPROC_PARAM_ID_MULTICHANNEL_GAIN;
+ data.param_size = sizeof(uint32_t) +
+ (sizeof(struct audproc_volume_ctrl_channel_type_gain_pair) *
+ ASM_MAX_CHANNELS);
+ data.reserved = 0;
+ memcpy(((u8 *)asm_params + sizeof(struct apr_hdr) +
+ sizeof(struct asm_stream_cmd_set_pp_params_v2)),
+ &data, sizeof(struct asm_stream_param_data_v2));
+
+ ch = pan_param->num_output_channels;
+ memcpy(((u8 *)asm_params + sizeof(struct apr_hdr) +
+ sizeof(struct asm_stream_cmd_set_pp_params_v2) +
+ sizeof(struct asm_stream_param_data_v2)),
+ &ch,
+ sizeof(uint32_t));
+
+ memset(gain_data, 0,
+ ASM_MAX_CHANNELS *
+ sizeof(struct audproc_volume_ctrl_channel_type_gain_pair));
+ for (i = 0; i < pan_param->num_output_channels; i++) {
+ gain_data[i].channel_type =
+ pan_param->output_channel_map[i];
+ gain_data[i].gain = pan_param->gain[i];
+ }
+ memcpy(((u8 *)asm_params + sizeof(struct apr_hdr) +
+ sizeof(struct asm_stream_cmd_set_pp_params_v2) +
+ sizeof(struct asm_stream_param_data_v2) +
+ sizeof(uint32_t)),
+ gain_data,
+ ASM_MAX_CHANNELS *
+ sizeof(struct audproc_volume_ctrl_channel_type_gain_pair));
+
+ rc = apr_send_pkt(ac->apr, (uint32_t *) asm_params);
+ if (rc < 0) {
+ pr_err("%s: set-params send failed paramid[0x%x] rc %d\n",
+ __func__, data.param_id, rc);
+ goto done;
+ }
+
+ rc = wait_event_timeout(ac->cmd_wait,
+ (atomic_read(&ac->cmd_state_pp) >= 0), 5 * HZ);
+ if (!rc) {
+ pr_err("%s: timeout, set-params paramid[0x%x]\n", __func__,
+ data.param_id);
+ rc = -EINVAL;
+ goto done;
+ }
+ if (atomic_read(&ac->cmd_state_pp) > 0) {
+ pr_err("%s: DSP returned error[%d], set-params paramid[0x%x]\n",
+ __func__, atomic_read(&ac->cmd_state_pp),
+ data.param_id);
+ rc = -EINVAL;
+ goto done;
+ }
+ rc = 0;
+done:
+ kfree(asm_params);
+fail:
+ return rc;
+}
+
+int q6asm_set_mfc_panning_params(struct audio_client *ac,
+ struct asm_stream_pan_ctrl_params *pan_param)
+{
+ int sz, rc, i;
+ struct audproc_mfc_output_media_fmt mfc_cfg;
+ struct apr_hdr hdr;
+ struct asm_stream_cmd_set_pp_params_v2 payload_params;
+ struct asm_stream_param_data_v2 data;
+ struct audproc_chmixer_param_coeff pan_cfg;
+ uint16_t variable_payload = 0;
+ uint16_t *asm_params = NULL;
+
+ sz = rc = i = 0;
+ if (ac == NULL) {
+ pr_err("%s: ac handle NULL\n", __func__);
+ rc = -EINVAL;
+ goto fail_cmd1;
+ }
+ if (ac->apr == NULL) {
+ pr_err("%s: ac apr handle NULL\n", __func__);
+ rc = -EINVAL;
+ goto fail_cmd1;
+ }
+
+ sz = sizeof(struct audproc_mfc_output_media_fmt);
+ q6asm_add_hdr_async(ac, &mfc_cfg.params.hdr, sz, TRUE);
+ atomic_set(&ac->cmd_state_pp, -1);
+ mfc_cfg.params.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
+ mfc_cfg.params.payload_addr_lsw = 0;
+ mfc_cfg.params.payload_addr_msw = 0;
+ mfc_cfg.params.mem_map_handle = 0;
+ mfc_cfg.params.payload_size = sizeof(mfc_cfg) - sizeof(mfc_cfg.params);
+ mfc_cfg.data.module_id = AUDPROC_MODULE_ID_MFC;
+ mfc_cfg.data.param_id = AUDPROC_PARAM_ID_MFC_OUTPUT_MEDIA_FORMAT;
+ mfc_cfg.data.param_size = mfc_cfg.params.payload_size -
+ sizeof(mfc_cfg.data);
+ mfc_cfg.data.reserved = 0;
+ mfc_cfg.sampling_rate = 0;
+ mfc_cfg.bits_per_sample = 0;
+ mfc_cfg.num_channels = pan_param->num_output_channels;
+ for (i = 0; i < mfc_cfg.num_channels; i++)
+ mfc_cfg.channel_type[i] = pan_param->output_channel_map[i];
+
+ rc = apr_send_pkt(ac->apr, (uint32_t *) &mfc_cfg);
+ if (rc < 0) {
+ pr_err("%s: set-params send failed paramid[0x%x] rc %d\n",
+ __func__, mfc_cfg.data.param_id, rc);
+ rc = -EINVAL;
+ goto fail_cmd1;
+ }
+
+ rc = wait_event_timeout(ac->cmd_wait,
+ (atomic_read(&ac->cmd_state_pp) >= 0), 5*HZ);
+ if (!rc) {
+ pr_err("%s: timeout, set-params paramid[0x%x]\n", __func__,
+ mfc_cfg.data.param_id);
+ rc = -ETIMEDOUT;
+ goto fail_cmd1;
+ }
+ if (atomic_read(&ac->cmd_state_pp) > 0) {
+ pr_err("%s: DSP returned error[%s] set-params paramid[0x%x]\n",
+ __func__, adsp_err_get_err_str(
+ atomic_read(&ac->cmd_state_pp)),
+ mfc_cfg.data.param_id);
+ rc = adsp_err_get_lnx_err_code(
+ atomic_read(&ac->cmd_state_pp));
+ goto fail_cmd1;
+ }
+
+ variable_payload = pan_param->num_output_channels * sizeof(uint16_t)+
+ pan_param->num_input_channels * sizeof(uint16_t) +
+ pan_param->num_output_channels * sizeof(uint16_t) *
+ pan_param->num_input_channels * sizeof(uint16_t);
+ i = (variable_payload % sizeof(uint32_t));
+ variable_payload += (i == 0) ? 0 : sizeof(uint32_t) - i;
+ sz = sizeof(struct apr_hdr) +
+ sizeof(struct asm_stream_cmd_set_pp_params_v2) +
+ sizeof(struct asm_stream_param_data_v2) +
+ sizeof(struct audproc_chmixer_param_coeff) +
+ variable_payload;
+
+ asm_params = kzalloc(sz, GFP_KERNEL);
+ if (!asm_params) {
+ rc = -ENOMEM;
+ goto fail_cmd1;
+ }
+
+ q6asm_add_hdr_async(ac, &hdr, sz, TRUE);
+ atomic_set(&ac->cmd_state_pp, -1);
+ hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
+ memcpy(((u8 *)asm_params), &hdr, sizeof(struct apr_hdr));
+
+ payload_params.data_payload_addr_lsw = 0;
+ payload_params.data_payload_addr_msw = 0;
+ payload_params.mem_map_handle = 0;
+ payload_params.data_payload_size =
+ sizeof(struct audproc_chmixer_param_coeff) +
+ variable_payload + sizeof(struct asm_stream_param_data_v2);
+ memcpy(((u8 *)asm_params + sizeof(struct apr_hdr)),
+ &payload_params,
+ sizeof(struct asm_stream_cmd_set_pp_params_v2));
+
+ data.module_id = AUDPROC_MODULE_ID_MFC;
+ data.param_id = AUDPROC_CHMIXER_PARAM_ID_COEFF;
+ data.param_size = sizeof(struct audproc_chmixer_param_coeff) +
+ variable_payload;
+ data.reserved = 0;
+ memcpy(((u8 *)asm_params + sizeof(struct apr_hdr) +
+ sizeof(struct asm_stream_cmd_set_pp_params_v2)),
+ &data, sizeof(struct asm_stream_param_data_v2));
+
+ pan_cfg.index = 0;
+ pan_cfg.num_output_channels = pan_param->num_output_channels;
+ pan_cfg.num_input_channels = pan_param->num_input_channels;
+ memcpy(((u8 *)asm_params + sizeof(struct apr_hdr) +
+ sizeof(struct asm_stream_cmd_set_pp_params_v2) +
+ sizeof(struct asm_stream_param_data_v2)),
+ &pan_cfg, sizeof(struct audproc_chmixer_param_coeff));
+ memcpy(((u8 *)asm_params + sizeof(struct apr_hdr) +
+ sizeof(struct asm_stream_cmd_set_pp_params_v2) +
+ sizeof(struct asm_stream_param_data_v2) +
+ sizeof(struct audproc_chmixer_param_coeff)),
+ pan_param->output_channel_map,
+ pan_param->num_output_channels * sizeof(uint16_t));
+ memcpy(((u8 *)asm_params + sizeof(struct apr_hdr) +
+ sizeof(struct asm_stream_cmd_set_pp_params_v2) +
+ sizeof(struct asm_stream_param_data_v2) +
+ sizeof(struct audproc_chmixer_param_coeff) +
+ pan_param->num_output_channels * sizeof(uint16_t)),
+ pan_param->input_channel_map,
+ pan_param->num_input_channels * sizeof(uint16_t));
+ memcpy(((u8 *)asm_params + sizeof(struct apr_hdr) +
+ sizeof(struct asm_stream_cmd_set_pp_params_v2) +
+ sizeof(struct asm_stream_param_data_v2) +
+ sizeof(struct audproc_chmixer_param_coeff) +
+ (pan_param->num_output_channels * sizeof(uint16_t)) +
+ (pan_param->num_input_channels * sizeof(uint16_t))),
+ pan_param->gain,
+ (pan_param->num_output_channels * sizeof(uint16_t)) *
+ (pan_param->num_input_channels * sizeof(uint16_t)));
+
+ rc = apr_send_pkt(ac->apr, (uint32_t *) asm_params);
+ if (rc < 0) {
+ pr_err("%s: set-params send failed paramid[0x%x] rc %d\n",
+ __func__, data.param_id, rc);
+ rc = -EINVAL;
+ goto fail_cmd2;
+ }
+
+ rc = wait_event_timeout(ac->cmd_wait,
+ (atomic_read(&ac->cmd_state_pp) >= 0), 5*HZ);
+ if (!rc) {
+ pr_err("%s: timeout, set-params paramid[0x%x]\n", __func__,
+ data.param_id);
+ rc = -ETIMEDOUT;
+ goto fail_cmd2;
+ }
+ if (atomic_read(&ac->cmd_state_pp) > 0) {
+ pr_err("%s: DSP returned error[%s] set-params paramid[0x%x]\n",
+ __func__, adsp_err_get_err_str(
+ atomic_read(&ac->cmd_state_pp)),
+ data.param_id);
+ rc = adsp_err_get_lnx_err_code(
+ atomic_read(&ac->cmd_state_pp));
+ goto fail_cmd2;
+ }
+ rc = 0;
+fail_cmd2:
+ kfree(asm_params);
+fail_cmd1:
+ return rc;
+}
+
int q6asm_equalizer(struct audio_client *ac, void *eq_p)
{
struct asm_eq_params eq;
@@ -7689,6 +7987,7 @@ int q6asm_async_write(struct audio_client *ac,
u32 liomode;
u32 io_compressed;
u32 io_compressed_stream;
+ int offset = 0;
if (ac == NULL) {
pr_err("%s: APR handle NULL\n", __func__);
@@ -7750,7 +8049,10 @@ int q6asm_async_write(struct audio_client *ac,
}
}
- config_debug_fs_write(ab);
+ if (ab != NULL) {
+ offset = lbuf_phys_addr - ab->phys;
+ config_debug_fs_write(ab, offset);
+ }
rc = apr_send_pkt(ac->apr, (uint32_t *) &write);
if (rc < 0) {
@@ -7897,7 +8199,7 @@ int q6asm_write(struct audio_client *ac, uint32_t len, uint32_t msw_ts,
write.mem_map_handle);
mutex_unlock(&port->lock);
- config_debug_fs_write(ab);
+ config_debug_fs_write(ab, 0);
rc = apr_send_pkt(ac->apr, (uint32_t *) &write);
if (rc < 0) {
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index 05012bb178d7..fdd87c7e3e91 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -1460,16 +1460,12 @@ int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr,
Dwarf_Addr _addr = 0, baseaddr = 0;
const char *fname = NULL, *func = NULL, *basefunc = NULL, *tmp;
int baseline = 0, lineno = 0, ret = 0;
- bool reloc = false;
-retry:
+ /* We always need to relocate the address for aranges */
+ if (debuginfo__get_text_offset(dbg, &baseaddr) == 0)
+ addr += baseaddr;
/* Find cu die */
if (!dwarf_addrdie(dbg->dbg, (Dwarf_Addr)addr, &cudie)) {
- if (!reloc && debuginfo__get_text_offset(dbg, &baseaddr) == 0) {
- addr += baseaddr;
- reloc = true;
- goto retry;
- }
pr_warning("Failed to find debug information for address %lx\n",
addr);
ret = -EINVAL;
diff --git a/tools/vm/page_owner_sort.c b/tools/vm/page_owner_sort.c
index 77147b42d598..f1c055f3c243 100644
--- a/tools/vm/page_owner_sort.c
+++ b/tools/vm/page_owner_sort.c
@@ -79,12 +79,12 @@ static void add_list(char *buf, int len)
}
}
-#define BUF_SIZE 1024
+#define BUF_SIZE (128 * 1024)
int main(int argc, char **argv)
{
FILE *fin, *fout;
- char buf[BUF_SIZE];
+ char *buf;
int ret, i, count;
struct block_list *list2;
struct stat st;
@@ -107,6 +107,11 @@ int main(int argc, char **argv)
max_size = st.st_size / 100; /* hack ... */
list = malloc(max_size * sizeof(*list));
+ buf = malloc(BUF_SIZE);
+ if (!list || !buf) {
+ printf("Out of memory\n");
+ exit(1);
+ }
for ( ; ; ) {
ret = read_block(buf, BUF_SIZE, fin);