summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/00-INDEX2
-rw-r--r--Documentation/ABI/testing/sysfs-fs-f2fs43
-rw-r--r--Documentation/devicetree/bindings/arm/firmware/linaro,optee-tz.txt31
-rw-r--r--Documentation/devicetree/bindings/arm/msm/adv7481.txt9
-rw-r--r--Documentation/devicetree/bindings/arm/msm/mdm-modem.txt9
-rw-r--r--Documentation/devicetree/bindings/arm/msm/tv-tuner.txt15
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,virtclk-front.txt14
-rw-r--r--Documentation/devicetree/bindings/display/bridge/ti,ths8135.txt46
-rw-r--r--Documentation/devicetree/bindings/display/msm/sde-hyp.txt13
-rw-r--r--Documentation/devicetree/bindings/media/video/msm-ba.txt2
-rw-r--r--Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt30
-rw-r--r--Documentation/devicetree/bindings/sound/qcom-audio-dev.txt2
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt1
-rw-r--r--Documentation/ioctl/ioctl-number.txt1
-rw-r--r--Documentation/tee.txt118
-rw-r--r--MAINTAINERS13
-rw-r--r--Makefile6
-rw-r--r--arch/arm/boot/dts/am33xx.dtsi3
-rw-r--r--arch/arm/boot/dts/armada-375.dtsi4
-rw-r--r--arch/arm/boot/dts/armada-38x.dtsi4
-rw-r--r--arch/arm/boot/dts/armada-39x.dtsi4
-rw-r--r--arch/arm/boot/dts/dm814x.dtsi9
-rw-r--r--arch/arm/boot/dts/dm816x.dtsi6
-rw-r--r--arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts2
-rw-r--r--arch/arm/boot/dts/qcom/apq8096-v3-auto-cdp.dts4
-rw-r--r--arch/arm/boot/dts/qcom/apq8096pro-auto-cdp.dts4
-rw-r--r--arch/arm/boot/dts/qcom/apq8096pro-v1.1-auto-cdp.dts2
-rw-r--r--arch/arm/boot/dts/qcom/dsi-panel-lgd-incell-sw49106-fhd-video.dtsi115
-rw-r--r--arch/arm/boot/dts/qcom/msm-audio.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi45
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi30
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-camera-sensor-auto-cdp.dtsi171
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-cv2x.dtsi12
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-mtp.dtsi90
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-pinctrl.dtsi14
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-regulator-camera-auto-cdp.dtsi24
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-v3-auto-cdp.dts4
-rw-r--r--arch/arm/boot/dts/qcom/msm8996.dtsi15
-rw-r--r--arch/arm/boot/dts/qcom/msm8996pro-auto-cdp.dts4
-rw-r--r--arch/arm/boot/dts/qcom/msm8996pro-auto.dtsi14
-rw-r--r--arch/arm/boot/dts/qcom/msm8996pro-v1.1-auto-cdp.dts4
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-cdp.dtsi9
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-mdss-panels.dtsi14
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-mtp.dtsi9
-rw-r--r--arch/arm/boot/dts/qcom/vplatform-lfv-ion.dtsi6
-rw-r--r--arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-blsp.dtsi16
-rw-r--r--arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-ivi.dts16
-rw-r--r--arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-telematics.dts31
-rw-r--r--arch/arm/boot/dts/qcom/vplatform-lfv-msm8996.dtsi41
-rw-r--r--arch/arm/boot/dts/qcom/vplatform-lfv-smmu.dtsi3
-rw-r--r--arch/arm/configs/omap2plus_defconfig1
-rw-r--r--arch/arm/configs/sdm660-perf_defconfig1
-rw-r--r--arch/arm/crypto/aesbs-glue.c6
-rw-r--r--arch/arm/include/asm/Kbuild1
-rw-r--r--arch/arm/include/asm/elf.h8
-rw-r--r--arch/arm/include/asm/unaligned.h27
-rw-r--r--arch/arm/kernel/traps.c28
-rw-r--r--arch/arm/mach-omap1/dma.c16
-rw-r--r--arch/arm/mach-omap2/pdata-quirks.c1
-rw-r--r--arch/arm/mach-pxa/balloon3.c1
-rw-r--r--arch/arm/mach-pxa/colibri-pxa270-income.c1
-rw-r--r--arch/arm/mach-pxa/corgi.c1
-rw-r--r--arch/arm/mach-pxa/trizeps4.c1
-rw-r--r--arch/arm/mach-pxa/vpac270.c1
-rw-r--r--arch/arm/mach-pxa/zeus.c1
-rw-r--r--arch/arm/mach-pxa/zylonite.c1
-rw-r--r--arch/arm/mm/dump.c4
-rw-r--r--arch/arm/mm/init.c4
-rw-r--r--arch/arm64/Kconfig1
-rw-r--r--arch/arm64/boot/dts/broadcom/ns2.dtsi2
-rw-r--r--arch/arm64/configs/fsmcortex-perf_defconfig108
-rw-r--r--arch/arm64/configs/fsmcortex_defconfig113
-rw-r--r--arch/arm64/configs/msm-auto-gvm-perf_defconfig3
-rw-r--r--arch/arm64/configs/msm-auto-gvm_defconfig3
-rw-r--r--arch/arm64/configs/msm-auto-perf_defconfig7
-rw-r--r--arch/arm64/configs/msm-auto_defconfig7
-rw-r--r--arch/arm64/configs/msmcortex-perf_defconfig1
-rw-r--r--arch/arm64/configs/msmcortex_defconfig1
-rw-r--r--arch/arm64/configs/sdm660-perf_defconfig1
-rw-r--r--arch/arm64/include/asm/efi.h4
-rw-r--r--arch/arm64/include/asm/elf.h2
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h2
-rw-r--r--arch/arm64/include/asm/memory.h1
-rw-r--r--arch/arm64/include/asm/mmu_context.h52
-rw-r--r--arch/arm64/include/asm/pgtable.h2
-rw-r--r--arch/arm64/kernel/acpi_parking_protocol.c3
-rw-r--r--arch/arm64/kernel/cpufeature.c1
-rw-r--r--arch/arm64/kernel/insn.c2
-rw-r--r--arch/arm64/kernel/io.c12
-rw-r--r--arch/arm64/kernel/psci.c4
-rw-r--r--arch/arm64/kernel/setup.c13
-rw-r--r--arch/arm64/kernel/smp_spin_table.c3
-rw-r--r--arch/arm64/kernel/traps.c2
-rw-r--r--arch/arm64/kernel/vdso.c12
-rw-r--r--arch/arm64/kernel/vdso/gettimeofday.S2
-rw-r--r--arch/arm64/mm/init.c9
-rw-r--r--arch/arm64/mm/kasan_init.c25
-rw-r--r--arch/arm64/mm/mmu.c35
-rw-r--r--arch/mips/ar7/platform.c5
-rw-r--r--arch/mips/ar7/prom.c2
-rw-r--r--arch/mips/bcm47xx/leds.c2
-rw-r--r--arch/mips/include/asm/asm.h10
-rw-r--r--arch/mips/include/asm/mips-cm.h4
-rw-r--r--arch/mips/kernel/process.c4
-rw-r--r--arch/mips/kernel/ptrace.c17
-rw-r--r--arch/mips/kernel/setup.c78
-rw-r--r--arch/mips/kernel/smp.c29
-rw-r--r--arch/mips/mm/uasm-micromips.c2
-rw-r--r--arch/mips/netlogic/common/irq.c4
-rw-r--r--arch/mips/ralink/mt7620.c4
-rw-r--r--arch/parisc/kernel/syscall.S6
-rw-r--r--arch/powerpc/Kconfig5
-rw-r--r--arch/powerpc/boot/dts/fsl/kmcoge4.dts4
-rw-r--r--arch/powerpc/kernel/signal.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_xics.c5
-rw-r--r--arch/s390/Kconfig3
-rw-r--r--arch/s390/include/asm/asm-prototypes.h8
-rw-r--r--arch/s390/include/asm/pci_insn.h2
-rw-r--r--arch/s390/include/asm/runtime_instr.h4
-rw-r--r--arch/s390/include/asm/switch_to.h2
-rw-r--r--arch/s390/kernel/dis.c5
-rw-r--r--arch/s390/kernel/early.c4
-rw-r--r--arch/s390/kernel/process.c3
-rw-r--r--arch/s390/kernel/runtime_instr.c30
-rw-r--r--arch/s390/pci/pci.c5
-rw-r--r--arch/s390/pci/pci_insn.c6
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh770x.c1
-rw-r--r--arch/sparc/Kconfig3
-rw-r--r--arch/x86/Kconfig4
-rw-r--r--arch/x86/crypto/sha-mb/sha1_mb_mgr_flush_avx2.S12
-rw-r--r--arch/x86/include/asm/efi.h26
-rw-r--r--arch/x86/include/asm/kvm_emulate.h1
-rw-r--r--arch/x86/include/asm/syscalls.h2
-rw-r--r--arch/x86/include/asm/uaccess.h14
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c18
-rw-r--r--arch/x86/kernel/kprobes/ftrace.c23
-rw-r--r--arch/x86/kernel/ldt.c16
-rw-r--r--arch/x86/kvm/emulate.c1
-rw-r--r--arch/x86/kvm/svm.c9
-rw-r--r--arch/x86/kvm/vmx.c4
-rw-r--r--arch/x86/kvm/x86.c57
-rw-r--r--arch/x86/lib/x86-opcode-map.txt2
-rw-r--r--arch/x86/mm/pageattr.c17
-rw-r--r--arch/x86/oprofile/op_model_ppro.c4
-rw-r--r--arch/x86/platform/efi/efi-bgrt.c39
-rw-r--r--arch/x86/platform/efi/efi.c39
-rw-r--r--arch/x86/platform/efi/efi_32.c5
-rw-r--r--arch/x86/platform/efi/efi_64.c137
-rw-r--r--arch/x86/platform/efi/efi_stub_64.S43
-rw-r--r--arch/x86/um/ldt.c7
-rw-r--r--crypto/Kconfig1
-rw-r--r--crypto/asymmetric_keys/pkcs7_parser.c2
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/android/binder.c21
-rw-r--r--drivers/ata/Kconfig3
-rw-r--r--drivers/ata/libata-eh.c2
-rw-r--r--drivers/base/power/opp/core.c1
-rw-r--r--drivers/base/power/wakeirq.c7
-rw-r--r--drivers/block/rbd.c4
-rw-r--r--drivers/block/xen-blkback/blkback.c23
-rw-r--r--drivers/block/xen-blkback/common.h25
-rw-r--r--drivers/bluetooth/btusb.c6
-rw-r--r--drivers/char/adsprpc.c249
-rw-r--r--drivers/char/adsprpc_compat.c12
-rw-r--r--drivers/char/adsprpc_shared.h29
-rw-r--r--drivers/char/diag/diagfwd_peripheral.c6
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c10
-rw-r--r--drivers/clk/msm/Kconfig11
-rw-r--r--drivers/clk/msm/Makefile3
-rw-r--r--drivers/clk/msm/virtclk-front-8996.c551
-rw-r--r--drivers/clk/msm/virtclk-front.c460
-rw-r--r--drivers/clk/ti/clk-dra7-atl.c3
-rw-r--r--drivers/cpufreq/Kconfig13
-rw-r--r--drivers/cpuidle/lpm-levels-of.c12
-rw-r--r--drivers/crypto/vmx/aes_ctr.c6
-rw-r--r--drivers/dma/dmatest.c1
-rw-r--r--drivers/dma/pl330.c19
-rw-r--r--drivers/dma/zx296702_dma.c1
-rw-r--r--drivers/edac/sb_edac.c1
-rw-r--r--drivers/esoc/esoc-mdm-4x.c8
-rw-r--r--drivers/esoc/esoc-mdm-pon.c30
-rw-r--r--drivers/esoc/esoc-mdm.h4
-rw-r--r--drivers/extcon/extcon-palmas.c5
-rw-r--r--drivers/firmware/efi/efi.c32
-rw-r--r--drivers/firmware/efi/libstub/Makefile2
-rw-r--r--drivers/firmware/efi/libstub/arm-stub.c24
-rw-r--r--drivers/firmware/efi/libstub/arm64-stub.c4
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c13
-rw-r--r--drivers/firmware/efi/libstub/efistub.h2
-rw-r--r--drivers/gpu/drm/Kconfig2
-rw-r--r--drivers/gpu/drm/Makefile1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c9
-rw-r--r--drivers/gpu/drm/armada/Makefile2
-rw-r--r--drivers/gpu/drm/drm_drv.c2
-rw-r--r--drivers/gpu/drm/drm_mm.c27
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h5
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c14
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c14
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c4
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c2
-rw-r--r--drivers/gpu/drm/msm-hyp/Kconfig15
-rw-r--r--drivers/gpu/drm/msm-hyp/Makefile4
-rw-r--r--drivers/gpu/drm/msm-hyp/NOTES8
-rw-r--r--drivers/gpu/drm/msm-hyp/msm_drv_hyp.c257
-rw-r--r--drivers/gpu/drm/msm-hyp/msm_drv_hyp.h81
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c20
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c71
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h16
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c22
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c15
-rw-r--r--drivers/gpu/drm/msm/msm_gem_vma.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c6
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_ctl.c11
-rw-r--r--drivers/gpu/drm/msm/sde_power_handle.c7
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c38
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c1
-rw-r--r--drivers/gpu/drm/sti/sti_vtg.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c2
-rw-r--r--drivers/i2c/busses/i2c-riic.c30
-rw-r--r--drivers/iio/light/cm3232.c2
-rw-r--r--drivers/iio/trigger/iio-trig-interrupt.c8
-rw-r--r--drivers/iio/trigger/iio-trig-sysfs.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c2
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c25
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c9
-rw-r--r--drivers/input/keyboard/mpr121_touchkey.c24
-rw-r--r--drivers/input/misc/ims-pcu.c16
-rw-r--r--drivers/input/mouse/elan_i2c_core.c2
-rw-r--r--drivers/input/tablet/gtco.c17
-rw-r--r--drivers/iommu/arm-smmu-v3.c10
-rw-r--r--drivers/leds/leds-qpnp-flash-v2.c60
-rw-r--r--drivers/md/bcache/alloc.c5
-rw-r--r--drivers/md/bcache/extents.c2
-rw-r--r--drivers/md/bcache/journal.c2
-rw-r--r--drivers/md/bcache/request.c9
-rw-r--r--drivers/md/dm-bufio.c15
-rw-r--r--drivers/md/dm.c12
-rw-r--r--drivers/media/i2c/Kconfig12
-rw-r--r--drivers/media/i2c/Makefile1
-rw-r--r--drivers/media/i2c/adv7481.c171
-rw-r--r--drivers/media/i2c/adv7604.c3
-rw-r--r--drivers/media/i2c/tvtuner.c333
-rw-r--r--drivers/media/i2c/tvtuner.h23
-rw-r--r--drivers/media/pci/bt8xx/dvb-bt8xx.c1
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is.c8
-rw-r--r--drivers/media/platform/msm/camera_v2/jpeg_dma/msm_jpeg_dma_dev.c11
-rw-r--r--drivers/media/platform/msm/camera_v2/msm.c2
-rw-r--r--drivers/media/rc/imon.c5
-rw-r--r--drivers/media/rc/ir-lirc-codec.c9
-rw-r--r--drivers/media/usb/as102/as102_fw.c28
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-cards.c2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-core.c7
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_devices.c24
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c16
-rw-r--r--drivers/mfd/ab8500-sysctrl.c14
-rw-r--r--drivers/mfd/axp20x.c8
-rw-r--r--drivers/misc/eeprom/at24.c6
-rw-r--r--drivers/misc/mei/client.c3
-rw-r--r--drivers/misc/qseecom.c30
-rw-r--r--drivers/mmc/core/host.c19
-rw-r--r--drivers/mmc/host/s3cmci.c1
-rw-r--r--drivers/mtd/nand/nand_base.c9
-rw-r--r--drivers/net/appletalk/ipddp.c2
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/can/c_can/c_can_pci.c1
-rw-r--r--drivers/net/can/c_can/c_can_platform.c1
-rw-r--r--drivers/net/can/spi/k61.c29
-rw-r--r--drivers/net/can/sun4i_can.c15
-rw-r--r--drivers/net/can/usb/kvaser_usb.c9
-rw-r--r--drivers/net/ethernet/3com/typhoon.c25
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c23
-rw-r--r--drivers/net/ethernet/fealnx.c6
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c23
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c11
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c7
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_mbx.c10
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c11
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c4
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c23
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c25
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c2
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c8
-rw-r--r--drivers/net/macvtap.c2
-rw-r--r--drivers/net/ppp/ppp_generic.c20
-rw-r--r--drivers/net/tun.c7
-rw-r--r--drivers/net/usb/cdc_ether.c2
-rw-r--r--drivers/net/usb/cdc_ncm.c28
-rw-r--r--drivers/net/usb/huawei_cdc_ncm.c6
-rw-r--r--drivers/net/usb/qmi_wwan.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c7
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.c14
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c52
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-ops.h19
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c47
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.h8
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h14
-rw-r--r--drivers/net/wireless/ath/ath10k/wow.c68
-rw-r--r--drivers/net/wireless/ath/ath10k/wow.h4
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c3
-rw-r--r--drivers/net/wireless/cnss2/main.c12
-rw-r--r--drivers/net/wireless/cnss2/pci.c7
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c1
-rw-r--r--drivers/net/wireless/wcnss/wcnss_wlan.c83
-rw-r--r--drivers/net/xen-netback/netback.c6
-rw-r--r--drivers/net/xen-netfront.c28
-rw-r--r--drivers/nvdimm/label.c2
-rw-r--r--drivers/nvdimm/namespace_devs.c2
-rw-r--r--drivers/nvme/host/pci.c2
-rw-r--r--drivers/pci/host/pci-mvebu.c101
-rw-r--r--drivers/pci/probe.c15
-rw-r--r--drivers/platform/msm/gpio-usbdetect.c12
-rw-r--r--drivers/platform/msm/gsi/gsi_dbg.c140
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_flt.c10
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c28
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c2
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/rmnet_ipa_fd_ioctl.c12
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c6
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_flt.c25
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c28
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c187
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c2
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c12
-rw-r--r--drivers/platform/x86/hp-wmi.c60
-rw-r--r--drivers/platform/x86/intel_mid_thermal.c1
-rw-r--r--drivers/power/reset/msm-poweroff.c19
-rw-r--r--drivers/power/supply/qcom/fg-core.h15
-rw-r--r--drivers/power/supply/qcom/qpnp-fg-gen3.c234
-rw-r--r--drivers/power/supply/qcom/smb-lib.c4
-rw-r--r--drivers/power/supply/qcom/smb1351-charger.c4
-rw-r--r--drivers/pwm/pwm-qpnp.c9
-rw-r--r--drivers/regulator/fan53555.c5
-rw-r--r--drivers/s390/block/dasd.c7
-rw-r--r--drivers/s390/net/qeth_core.h1
-rw-r--r--drivers/s390/net/qeth_core_main.c21
-rw-r--r--drivers/s390/net/qeth_l2_main.c15
-rw-r--r--drivers/s390/net/qeth_l3_main.c15
-rw-r--r--drivers/s390/scsi/zfcp_aux.c5
-rw-r--r--drivers/s390/scsi/zfcp_erp.c18
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c5
-rw-r--r--drivers/scsi/aacraid/aachba.c289
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c17
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c6
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c8
-rw-r--r--drivers/scsi/sg.c2
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c1
-rw-r--r--drivers/scsi/ufs/ufshcd.c2
-rw-r--r--drivers/scsi/ufs/ufshcd.h1
-rw-r--r--drivers/soc/qcom/glink.c6
-rw-r--r--drivers/soc/qcom/glink_smem_native_xprt.c1
-rw-r--r--drivers/soc/qcom/hab/Makefile3
-rw-r--r--drivers/soc/qcom/hab/hab.c352
-rw-r--r--drivers/soc/qcom/hab/hab.h132
-rw-r--r--drivers/soc/qcom/hab/hab_mem_linux.c44
-rw-r--r--drivers/soc/qcom/hab/hab_mimex.c26
-rw-r--r--drivers/soc/qcom/hab/hab_msg.c100
-rw-r--r--drivers/soc/qcom/hab/hab_open.c7
-rw-r--r--drivers/soc/qcom/hab/hab_parser.c65
-rw-r--r--drivers/soc/qcom/hab/hab_pchan.c11
-rw-r--r--drivers/soc/qcom/hab/hab_qvm.c272
-rw-r--r--drivers/soc/qcom/hab/hab_qvm.h5
-rw-r--r--drivers/soc/qcom/hab/hab_vchan.c39
-rw-r--r--drivers/soc/qcom/hab/khab.c2
-rw-r--r--drivers/soc/qcom/hab/qvm_comm.c20
-rw-r--r--drivers/soc/qcom/icnss.c35
-rw-r--r--drivers/soc/qcom/msm_glink_pkt.c9
-rw-r--r--drivers/soc/qcom/peripheral-loader.c6
-rw-r--r--drivers/soc/qcom/pil-msa.c1
-rw-r--r--drivers/soc/qcom/qdsp6v2/apr.c7
-rw-r--r--drivers/soc/qcom/qdsp6v2/apr_vm.c3
-rw-r--r--drivers/soc/qcom/qdsp6v2/msm_audio_ion_vm.c4
-rw-r--r--drivers/soc/qcom/rpm-smd-debug.c8
-rw-r--r--drivers/spi/Kconfig1
-rw-r--r--drivers/spi/spi-sh-msiof.c2
-rw-r--r--drivers/spi/spi_qsd.c26
-rw-r--r--drivers/staging/android/ion/ion.c5
-rw-r--r--drivers/staging/android/ion/msm/msm_ion.c12
-rw-r--r--drivers/staging/iio/cdc/ad7150.c2
-rw-r--r--drivers/staging/iio/trigger/iio-trig-bfin-timer.c4
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_user.h18
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lock.c7
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_mmap.c4
-rw-r--r--drivers/staging/lustre/lustre/llite/rw26.c4
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/service.c21
-rw-r--r--drivers/staging/panel/panel.c23
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_debug.h2
-rw-r--r--drivers/staging/rtl8712/ieee80211.h84
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_linux.c2
-rw-r--r--drivers/staging/rtl8712/rtl871x_xmit.c7
-rw-r--r--drivers/target/iscsi/iscsi_target.c27
-rw-r--r--drivers/target/target_core_tpg.c4
-rw-r--r--drivers/target/target_core_transport.c8
-rw-r--r--drivers/tee/Kconfig18
-rw-r--r--drivers/tee/Makefile5
-rw-r--r--drivers/tee/optee/Kconfig7
-rw-r--r--drivers/tee/optee/Makefile5
-rw-r--r--drivers/tee/optee/call.c444
-rw-r--r--drivers/tee/optee/core.c622
-rw-r--r--drivers/tee/optee/optee_msg.h418
-rw-r--r--drivers/tee/optee/optee_private.h183
-rw-r--r--drivers/tee/optee/optee_smc.h450
-rw-r--r--drivers/tee/optee/rpc.c396
-rw-r--r--drivers/tee/optee/supp.c273
-rw-r--r--drivers/tee/tee_core.c893
-rw-r--r--drivers/tee/tee_private.h129
-rw-r--r--drivers/tee/tee_shm.c358
-rw-r--r--drivers/tee/tee_shm_pool.c156
-rw-r--r--drivers/thermal/msm_lmh_dcvs.c39
-rw-r--r--drivers/tty/serial/8250/8250_fintek.c2
-rw-r--r--drivers/tty/serial/8250/8250_pci.c3
-rw-r--r--drivers/tty/serial/8250/8250_port.c5
-rw-r--r--drivers/tty/serial/msm_serial_hs.c71
-rw-r--r--drivers/tty/serial/omap-serial.c2
-rw-r--r--drivers/tty/serial/sh-sci.c17
-rw-r--r--drivers/tty/sysrq.c9
-rw-r--r--drivers/usb/core/config.c32
-rw-r--r--drivers/usb/core/devio.c70
-rw-r--r--drivers/usb/core/hcd.c1
-rw-r--r--drivers/usb/core/hub.c9
-rw-r--r--drivers/usb/core/quirks.c6
-rw-r--r--drivers/usb/dwc3/dwc3-msm.c5
-rw-r--r--drivers/usb/gadget/function/f_fs.c2
-rw-r--r--drivers/usb/gadget/function/f_gsi.h2
-rw-r--r--drivers/usb/gadget/function/f_qc_rndis.c54
-rw-r--r--drivers/usb/host/ehci-dbg.c2
-rw-r--r--drivers/usb/host/xhci-hub.c14
-rw-r--r--drivers/usb/host/xhci-mem.c7
-rw-r--r--drivers/usb/misc/usbtest.c5
-rw-r--r--drivers/usb/pd/policy_engine.c57
-rw-r--r--drivers/usb/pd/qpnp-pdphy.c15
-rw-r--r--drivers/usb/phy/phy-tahvo.c3
-rw-r--r--drivers/usb/serial/garmin_gps.c22
-rw-r--r--drivers/usb/serial/option.c3
-rw-r--r--drivers/usb/serial/qcserial.c1
-rw-r--r--drivers/usb/storage/uas-detect.h4
-rw-r--r--drivers/vhost/scsi.c5
-rw-r--r--drivers/video/backlight/adp5520_bl.c12
-rw-r--r--drivers/video/backlight/lcd.c4
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_intf_video.c15
-rw-r--r--drivers/video/fbdev/pmag-ba-fb.c2
-rw-r--r--drivers/video/msm/ba/msm_ba.c19
-rw-r--r--drivers/video/msm/ba/msm_ba_common.c2
-rw-r--r--drivers/video/msm/ba/msm_ba_internal.h1
-rw-r--r--drivers/video/msm/ba/msm_v4l2_ba.c9
-rw-r--r--drivers/xen/gntdev.c2
-rw-r--r--drivers/xen/manage.c12
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c2
-rw-r--r--fs/9p/vfs_inode.c3
-rw-r--r--fs/9p/vfs_inode_dotl.c3
-rw-r--r--fs/autofs4/waitq.c15
-rw-r--r--fs/btrfs/extent-tree.c14
-rw-r--r--fs/btrfs/uuid-tree.c4
-rw-r--r--fs/ceph/caps.c5
-rw-r--r--fs/cifs/dir.c5
-rw-r--r--fs/coda/upcall.c3
-rw-r--r--fs/ecryptfs/ecryptfs_kernel.h24
-rw-r--r--fs/ecryptfs/keystore.c9
-rw-r--r--fs/ecryptfs/messaging.c7
-rw-r--r--fs/ext4/crypto_key.c8
-rw-r--r--fs/ext4/extents.c6
-rw-r--r--fs/ext4/mballoc.c6
-rw-r--r--fs/ext4/super.c4
-rw-r--r--fs/f2fs/acl.c3
-rw-r--r--fs/f2fs/checkpoint.c64
-rw-r--r--fs/f2fs/data.c38
-rw-r--r--fs/f2fs/debug.c31
-rw-r--r--fs/f2fs/dir.c32
-rw-r--r--fs/f2fs/f2fs.h223
-rw-r--r--fs/f2fs/file.c120
-rw-r--r--fs/f2fs/gc.c37
-rw-r--r--fs/f2fs/inline.c1
-rw-r--r--fs/f2fs/inode.c26
-rw-r--r--fs/f2fs/namei.c101
-rw-r--r--fs/f2fs/node.c410
-rw-r--r--fs/f2fs/node.h16
-rw-r--r--fs/f2fs/recovery.c8
-rw-r--r--fs/f2fs/segment.c512
-rw-r--r--fs/f2fs/segment.h39
-rw-r--r--fs/f2fs/shrinker.c2
-rw-r--r--fs/f2fs/super.c219
-rw-r--r--fs/f2fs/sysfs.c53
-rw-r--r--fs/f2fs/xattr.c174
-rw-r--r--fs/fuse/dir.c3
-rw-r--r--fs/isofs/isofs.h2
-rw-r--r--fs/isofs/rock.h2
-rw-r--r--fs/isofs/util.c2
-rw-r--r--fs/nfs/dir.c3
-rw-r--r--fs/nfs/nfs4proc.c21
-rw-r--r--fs/nfs/nfs4state.c1
-rw-r--r--fs/nfs/super.c2
-rw-r--r--fs/nfsd/nfs4state.c139
-rw-r--r--fs/nilfs2/segment.c6
-rw-r--r--fs/ocfs2/alloc.c24
-rw-r--r--fs/sdcardfs/inode.c2
-rw-r--r--include/dt-bindings/pinctrl/omap.h4
-rw-r--r--include/linux/buffer_head.h4
-rw-r--r--include/linux/clk/msm-clock-generic.h14
-rw-r--r--include/linux/efi.h2
-rw-r--r--include/linux/f2fs_fs.h10
-rw-r--r--include/linux/input/qpnp-power-on.h5
-rw-r--r--include/linux/kernel.h7
-rw-r--r--include/linux/mm.h4
-rw-r--r--include/linux/mmzone.h3
-rw-r--r--include/linux/netdevice.h3
-rw-r--r--include/linux/netlink.h2
-rw-r--r--include/linux/phy.h8
-rw-r--r--include/linux/preempt.h21
-rw-r--r--include/linux/sched/sysctl.h1
-rw-r--r--include/linux/skbuff.h7
-rw-r--r--include/linux/tee_drv.h277
-rw-r--r--include/linux/timekeeper_internal.h8
-rw-r--r--include/linux/usb.h1
-rw-r--r--include/linux/usb/cdc_ncm.h1
-rw-r--r--include/media/adv7481.h3
-rw-r--r--include/media/msm_ba.h1
-rw-r--r--include/net/genetlink.h2
-rw-r--r--include/net/inet_sock.h8
-rw-r--r--include/net/tcp.h6
-rw-r--r--include/soc/qcom/icnss.h1
-rw-r--r--include/sound/apr_audio-v2.h1025
-rw-r--r--include/sound/pcm.h3
-rw-r--r--include/sound/q6adm-v2.h21
-rw-r--r--include/sound/q6asm-v2.h11
-rw-r--r--include/sound/q6common.h23
-rw-r--r--include/sound/q6lsm.h120
-rw-r--r--include/sound/seq_kernel.h3
-rw-r--r--include/target/target_core_base.h1
-rw-r--r--include/trace/events/f2fs.h116
-rw-r--r--include/trace/events/sched.h12
-rw-r--r--include/trace/events/sunrpc.h17
-rw-r--r--include/uapi/drm/drm_mode.h19
-rw-r--r--include/uapi/drm/msm_drm.h15
-rw-r--r--include/uapi/linux/bcache.h2
-rw-r--r--include/uapi/linux/habmm.h21
-rw-r--r--include/uapi/linux/msm_ipa.h2
-rw-r--r--include/uapi/linux/rds.h102
-rw-r--r--include/uapi/linux/spi/spidev.h1
-rw-r--r--include/uapi/linux/tee.h346
-rw-r--r--include/uapi/linux/usb/ch9.h20
-rw-r--r--include/uapi/media/Kbuild1
-rw-r--r--include/uapi/media/msm_ba.h35
-rw-r--r--include/uapi/sound/audio_effects.h6
-rw-r--r--init/initramfs.c5
-rw-r--r--kernel/sched/Makefile1
-rw-r--r--kernel/sched/core.c128
-rw-r--r--kernel/sched/cpufreq_sched.c525
-rw-r--r--kernel/sched/fair.c120
-rw-r--r--kernel/sched/hmp.c7
-rw-r--r--kernel/sched/rt.c289
-rw-r--r--kernel/sched/sched.h84
-rw-r--r--kernel/sysctl.c7
-rw-r--r--kernel/time/timekeeping.c57
-rw-r--r--kernel/workqueue.c37
-rw-r--r--kernel/workqueue_internal.h3
-rw-r--r--lib/asn1_decoder.c7
-rw-r--r--lib/assoc_array.c51
-rw-r--r--lib/mpi/mpi-pow.c2
-rw-r--r--lib/test_firmware.c11
-rw-r--r--mm/huge_memory.c14
-rw-r--r--mm/madvise.c4
-rw-r--r--mm/page_alloc.c27
-rw-r--r--mm/page_ext.c4
-rw-r--r--mm/pagewalk.c6
-rw-r--r--net/8021q/vlan.c6
-rw-r--r--net/9p/client.c3
-rw-r--r--net/9p/trans_virtio.c13
-rw-r--r--net/core/dev.c6
-rw-r--r--net/core/skbuff.c1
-rw-r--r--net/dccp/ipv4.c13
-rw-r--r--net/dsa/Kconfig5
-rw-r--r--net/ipv4/ah4.c3
-rw-r--r--net/ipv4/cipso_ipv4.c24
-rw-r--r--net/ipv4/inet_connection_sock.c9
-rw-r--r--net/ipv4/ip_sockglue.c7
-rw-r--r--net/ipv4/ipip.c58
-rw-r--r--net/ipv4/syncookies.c2
-rw-r--r--net/ipv4/tcp_input.c4
-rw-r--r--net/ipv4/tcp_ipv4.c21
-rw-r--r--net/ipv4/tcp_output.c12
-rw-r--r--net/ipv6/ip6_flowlabel.c1
-rw-r--r--net/ipv6/ip6_gre.c11
-rw-r--r--net/ipv6/ip6_output.c4
-rw-r--r--net/ipv6/ip6_vti.c2
-rw-r--r--net/ipv6/ipv6_sockglue.c16
-rw-r--r--net/ipv6/route.c6
-rw-r--r--net/l2tp/l2tp_ppp.c3
-rw-r--r--net/mac80211/ieee80211_i.h1
-rw-r--r--net/mac80211/key.c53
-rw-r--r--net/mac80211/mesh.c3
-rw-r--r--net/mac80211/mesh_plink.c14
-rw-r--r--net/mac80211/mesh_sync.c11
-rw-r--r--net/netfilter/nf_tables_api.c2
-rw-r--r--net/netfilter/nft_meta.c28
-rw-r--r--net/netfilter/nft_queue.c2
-rw-r--r--net/netlink/af_netlink.c21
-rw-r--r--net/netlink/af_netlink.h1
-rw-r--r--net/netlink/genetlink.c16
-rw-r--r--net/nfc/core.c2
-rw-r--r--net/packet/af_packet.c24
-rw-r--r--net/rds/send.c11
-rw-r--r--net/sctp/debug.c2
-rw-r--r--net/sctp/input.c2
-rw-r--r--net/sctp/ipv6.c2
-rw-r--r--net/sctp/socket.c36
-rw-r--r--net/tipc/link.c28
-rw-r--r--net/tipc/link.h1
-rw-r--r--net/tipc/server.c4
-rw-r--r--net/unix/diag.c2
-rw-r--r--net/vmw_vsock/af_vsock.c167
-rw-r--r--net/xfrm/xfrm_policy.c2
-rw-r--r--net/xfrm/xfrm_state.c8
-rw-r--r--net/xfrm/xfrm_user.c25
-rw-r--r--samples/trace_events/trace-events-sample.c14
-rw-r--r--security/integrity/ima/ima_appraise.c3
-rw-r--r--security/integrity/ima/ima_main.c4
-rw-r--r--security/keys/Kconfig4
-rw-r--r--security/keys/keyring.c39
-rw-r--r--security/keys/trusted.c71
-rw-r--r--sound/core/pcm.c4
-rw-r--r--sound/core/pcm_lib.c8
-rw-r--r--sound/core/pcm_native.c8
-rw-r--r--sound/core/pcm_timer.c11
-rw-r--r--sound/core/seq/oss/seq_oss_midi.c4
-rw-r--r--sound/core/seq/oss/seq_oss_readq.c29
-rw-r--r--sound/core/seq/oss/seq_oss_readq.h2
-rw-r--r--sound/core/seq/seq_clientmgr.c2
-rw-r--r--sound/core/seq/seq_device.c3
-rw-r--r--sound/core/timer.c220
-rw-r--r--sound/core/timer_compat.c29
-rw-r--r--sound/drivers/vx/vx_pcm.c8
-rw-r--r--sound/pci/hda/hda_intel.c3
-rw-r--r--sound/pci/hda/patch_realtek.c33
-rw-r--r--sound/pci/vx222/vx222_ops.c12
-rw-r--r--sound/pcmcia/vx/vxp_ops.c12
-rw-r--r--sound/soc/codecs/adau17x1.c24
-rw-r--r--sound/soc/codecs/adau17x1.h2
-rw-r--r--sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c6
-rw-r--r--sound/soc/codecs/wm_adsp.c25
-rw-r--r--sound/soc/msm/msm8996.c4970
-rw-r--r--sound/soc/msm/qdsp6v2/Makefile2
-rw-r--r--sound/soc/msm/qdsp6v2/msm-audio-effects-q6-v2.c1236
-rw-r--r--sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c21
-rw-r--r--sound/soc/msm/qdsp6v2/msm-ds2-dap-config.c525
-rw-r--r--sound/soc/msm/qdsp6v2/msm-ds2-dap-config.h32
-rw-r--r--sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c5
-rw-r--r--sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c69
-rw-r--r--sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.c6
-rw-r--r--sound/soc/msm/qdsp6v2/msm-qti-pp-config.c109
-rw-r--r--sound/soc/msm/qdsp6v2/q6adm.c2301
-rw-r--r--sound/soc/msm/qdsp6v2/q6afe.c2990
-rw-r--r--sound/soc/msm/qdsp6v2/q6asm.c1268
-rw-r--r--sound/soc/msm/qdsp6v2/q6common.c85
-rw-r--r--sound/soc/msm/qdsp6v2/q6lsm.c914
-rw-r--r--sound/soc/msm/qdsp6v2/q6voice.c464
-rw-r--r--sound/soc/msm/qdsp6v2/q6voice.h85
-rw-r--r--sound/soc/msm/qdsp6v2/rtac.c514
-rw-r--r--sound/soc/msm/sdm660-common.h3
-rw-r--r--sound/soc/msm/sdm660-internal.c52
-rw-r--r--sound/soc/sh/rcar/core.c4
-rw-r--r--sound/usb/clock.c9
-rw-r--r--sound/usb/mixer.c15
-rw-r--r--tools/perf/scripts/perl/Perf-Trace-Util/Build4
-rw-r--r--tools/perf/tests/attr.c2
-rw-r--r--tools/perf/util/parse-events.c3
-rwxr-xr-xtools/testing/selftests/firmware/fw_filesystem.sh10
-rwxr-xr-xtools/testing/selftests/firmware/fw_userhelper.sh28
-rw-r--r--tools/testing/selftests/x86/ldt_gdt.c17
-rw-r--r--virt/kvm/arm/arch_timer.c3
687 files changed, 25628 insertions, 11620 deletions
diff --git a/Documentation/00-INDEX b/Documentation/00-INDEX
index e39561d41f8b..44ad7a310c7d 100644
--- a/Documentation/00-INDEX
+++ b/Documentation/00-INDEX
@@ -437,6 +437,8 @@ sysrq.txt
- info on the magic SysRq key.
target/
- directory with info on generating TCM v4 fabric .ko modules
+tee.txt
+ - info on the TEE subsystem and drivers
this_cpu_ops.txt
- List rationale behind and the way to use this_cpu operations.
thermal/
diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs
index 500c60403653..2baed1151eac 100644
--- a/Documentation/ABI/testing/sysfs-fs-f2fs
+++ b/Documentation/ABI/testing/sysfs-fs-f2fs
@@ -51,6 +51,18 @@ Description:
Controls the dirty page count condition for the in-place-update
policies.
+What: /sys/fs/f2fs/<disk>/min_hot_blocks
+Date: March 2017
+Contact: "Jaegeuk Kim" <jaegeuk@kernel.org>
+Description:
+ Controls the dirty page count condition for redefining hot data.
+
+What: /sys/fs/f2fs/<disk>/min_ssr_sections
+Date: October 2017
+Contact: "Chao Yu" <yuchao0@huawei.com>
+Description:
+ Controls the fee section threshold to trigger SSR allocation.
+
What: /sys/fs/f2fs/<disk>/max_small_discards
Date: November 2013
Contact: "Jaegeuk Kim" <jaegeuk.kim@samsung.com>
@@ -96,6 +108,18 @@ Contact: "Jaegeuk Kim" <jaegeuk@kernel.org>
Description:
Controls the checkpoint timing.
+What: /sys/fs/f2fs/<disk>/idle_interval
+Date: January 2016
+Contact: "Jaegeuk Kim" <jaegeuk@kernel.org>
+Description:
+ Controls the idle timing.
+
+What: /sys/fs/f2fs/<disk>/iostat_enable
+Date: August 2017
+Contact: "Chao Yu" <yuchao0@huawei.com>
+Description:
+ Controls to enable/disable IO stat.
+
What: /sys/fs/f2fs/<disk>/ra_nid_pages
Date: October 2015
Contact: "Chao Yu" <chao2.yu@samsung.com>
@@ -116,6 +140,12 @@ Contact: "Shuoran Liu" <liushuoran@huawei.com>
Description:
Shows total written kbytes issued to disk.
+What: /sys/fs/f2fs/<disk>/feature
+Date: July 2017
+Contact: "Jaegeuk Kim" <jaegeuk@kernel.org>
+Description:
+ Shows all enabled features in current device.
+
What: /sys/fs/f2fs/<disk>/inject_rate
Date: May 2016
Contact: "Sheng Yong" <shengyong1@huawei.com>
@@ -132,7 +162,18 @@ What: /sys/fs/f2fs/<disk>/reserved_blocks
Date: June 2017
Contact: "Chao Yu" <yuchao0@huawei.com>
Description:
- Controls current reserved blocks in system.
+ Controls target reserved blocks in system, the threshold
+ is soft, it could exceed current available user space.
+
+What: /sys/fs/f2fs/<disk>/current_reserved_blocks
+Date: October 2017
+Contact: "Yunlong Song" <yunlong.song@huawei.com>
+Contact: "Chao Yu" <yuchao0@huawei.com>
+Description:
+ Shows current reserved blocks in system, it may be temporarily
+ smaller than target_reserved_blocks, but will gradually
+ increase to target_reserved_blocks when more free blocks are
+ freed by user later.
What: /sys/fs/f2fs/<disk>/gc_urgent
Date: August 2017
diff --git a/Documentation/devicetree/bindings/arm/firmware/linaro,optee-tz.txt b/Documentation/devicetree/bindings/arm/firmware/linaro,optee-tz.txt
new file mode 100644
index 000000000000..d38834c67dff
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/firmware/linaro,optee-tz.txt
@@ -0,0 +1,31 @@
+OP-TEE Device Tree Bindings
+
+OP-TEE is a piece of software using hardware features to provide a Trusted
+Execution Environment. The security can be provided with ARM TrustZone, but
+also by virtualization or a separate chip.
+
+We're using "linaro" as the first part of the compatible property for
+the reference implementation maintained by Linaro.
+
+* OP-TEE based on ARM TrustZone required properties:
+
+- compatible : should contain "linaro,optee-tz"
+
+- method : The method of calling the OP-TEE Trusted OS. Permitted
+ values are:
+
+ "smc" : SMC #0, with the register assignments specified
+ in drivers/tee/optee/optee_smc.h
+
+ "hvc" : HVC #0, with the register assignments specified
+ in drivers/tee/optee/optee_smc.h
+
+
+
+Example:
+ firmware {
+ optee {
+ compatible = "linaro,optee-tz";
+ method = "smc";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/arm/msm/adv7481.txt b/Documentation/devicetree/bindings/arm/msm/adv7481.txt
index 974c0877ac30..d09a83cc0d35 100644
--- a/Documentation/devicetree/bindings/arm/msm/adv7481.txt
+++ b/Documentation/devicetree/bindings/arm/msm/adv7481.txt
@@ -15,6 +15,12 @@ Required properties
interrupt 1, interrupt 2 and interrupt 3.
- cam_vdig-supply: Should contain regulator to be used for the digital
vdd.
+- tx-lanes: Should contain array of csi transmission lanes required
+ to select csi lane by adv7481 driver.
+- settle-count: Should contain array of csi settle count required
+ to select settle count by adv7481 driver.
+- res-array: Should contain array of resolution supported by
+ adv7481 driver.
- cam_vio-supply: Should contain regulator to be used for the IO vdd.
- cam_vana-supply: Should contain regulator from which analog voltage
is supplied.
@@ -35,6 +41,9 @@ Example:
compatible = "qcom,adv7481";
reg = <0x70 0xff>;
cam_vdig-supply = <&vph_pwr_vreg>;
+ tx-lanes = <4 2 1>;
+ settle-count = <16 16 16>;
+ res-array = "RES_1080P", "RES_720P", "RES_576P_480P";
/* Cameras powered by PMIC: */
cam_vio-supply = <&pm8994_lvs1>;
cam_vana-supply = <&pm8994_l17>;
diff --git a/Documentation/devicetree/bindings/arm/msm/mdm-modem.txt b/Documentation/devicetree/bindings/arm/msm/mdm-modem.txt
index 7d5e8a1c910a..1000992b3341 100644
--- a/Documentation/devicetree/bindings/arm/msm/mdm-modem.txt
+++ b/Documentation/devicetree/bindings/arm/msm/mdm-modem.txt
@@ -114,6 +114,15 @@ Optional driver parameters:
- qcom,mdm-statusline-not-a-powersource: Boolean. If set, status line to esoc device is not a
power source.
- qcom,mdm-userspace-handle-shutdown: Boolean. If set, userspace handles shutdown requests.
+- qcom,shutdown-timeout-ms: graceful shutdown timeout in milliseconds.
+ This interval is the time needed for the external modem to gracefully shutdown
+ after the host sends a shutdown command. The value depends on how long it takes
+ for the high level OS in the external modem to shutdown gracefully. The default
+ value is 10000 milliseconds.
+- qcom,reset-time-ms: time it takes for the external modem to forcefully reset in milliseconds.
+ This interval is the time it takes to toggle the reset of an external modem by
+ holding down the reset pin. The value depends on the external modem's power
+ management boot options. The default value is 203 milliseconds.
Example:
mdm0: qcom,mdm0 {
diff --git a/Documentation/devicetree/bindings/arm/msm/tv-tuner.txt b/Documentation/devicetree/bindings/arm/msm/tv-tuner.txt
new file mode 100644
index 000000000000..480cdfd733e6
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/tv-tuner.txt
@@ -0,0 +1,15 @@
+TVTUNER driver (VIDEO_TVTUNER)
+
+VIDEO_TVTUNER is a sample kernel platform driver that is used to control the tv
+tuner hardware for the capture of Tv tuner received a/v signal.
+
+The devicetree representation of the VIDEO_TVTUNER block should be:
+
+Required properties
+
+- compatible: "qcom,tv-tuner"
+
+Example:
+ qcom,tv-tuner {
+ compatible = "qcom,tv-tuner";
+ };
diff --git a/Documentation/devicetree/bindings/clock/qcom,virtclk-front.txt b/Documentation/devicetree/bindings/clock/qcom,virtclk-front.txt
new file mode 100644
index 000000000000..a863c802120a
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,virtclk-front.txt
@@ -0,0 +1,14 @@
+QTI Virtual Clock Frontend Binding
+------------------------------------------------
+
+Required properties :
+- compatible : shall contain:
+ "qcom,virtclk-frontend-8996"
+
+- #clock-cells : shall contain 1
+
+Example:
+ virtclk-frontend@0 {
+ compatible = "qcom,virtclk-frontend-8996";
+ #clock-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/display/bridge/ti,ths8135.txt b/Documentation/devicetree/bindings/display/bridge/ti,ths8135.txt
deleted file mode 100644
index 6ec1a880ac18..000000000000
--- a/Documentation/devicetree/bindings/display/bridge/ti,ths8135.txt
+++ /dev/null
@@ -1,46 +0,0 @@
-THS8135 Video DAC
------------------
-
-This is the binding for Texas Instruments THS8135 Video DAC bridge.
-
-Required properties:
-
-- compatible: Must be "ti,ths8135"
-
-Required nodes:
-
-This device has two video ports. Their connections are modelled using the OF
-graph bindings specified in Documentation/devicetree/bindings/graph.txt.
-
-- Video port 0 for RGB input
-- Video port 1 for VGA output
-
-Example
--------
-
-vga-bridge {
- compatible = "ti,ths8135";
- #address-cells = <1>;
- #size-cells = <0>;
-
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- port@0 {
- reg = <0>;
-
- vga_bridge_in: endpoint {
- remote-endpoint = <&lcdc_out_vga>;
- };
- };
-
- port@1 {
- reg = <1>;
-
- vga_bridge_out: endpoint {
- remote-endpoint = <&vga_con_in>;
- };
- };
- };
-};
diff --git a/Documentation/devicetree/bindings/display/msm/sde-hyp.txt b/Documentation/devicetree/bindings/display/msm/sde-hyp.txt
new file mode 100644
index 000000000000..6e60822ad95b
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/msm/sde-hyp.txt
@@ -0,0 +1,13 @@
+Qualcomm Technologies, Inc. SDE KMS HYP
+
+Snapdragon Display Engine registers with the Linux DRM/KMS framework to
+facilitate DRM driver creation, publishing /dev/dri/card0, and sending
+VBlank and Page Flip events to User Space listeners.
+
+Required properties
+- compatible: Must be "qcom,sde-kms-hyp"
+
+Example:
+ sde_kms_hyp: qcom,sde_kms_hyp@900000 {
+ compatible = "qcom,sde-kms-hyp";
+ };
diff --git a/Documentation/devicetree/bindings/media/video/msm-ba.txt b/Documentation/devicetree/bindings/media/video/msm-ba.txt
index 9a6fe4d7e8ae..462d69cf9801 100644
--- a/Documentation/devicetree/bindings/media/video/msm-ba.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-ba.txt
@@ -12,7 +12,7 @@ Required properties:
"qcom,name", "qcom,ba-input", "qcom,ba-output", "qcom,sd-name",
"qcom,ba-node" and "qcom,user-type".
Required properties:
-- qcom,type: Input type such as CVBS(0), HDMI(4) etc as defined in BA driver.
+- qcom,type: Input type such as CVBS(0), HDMI(4), TUNER(8) etc as defined in BA driver.
This property is of type u32.
- qcom,name: Name of the input type. This property is of type string.
- qcom,ba-input: BA input id supported by a bridge chip for this profile.
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt
index 4207b1f0615a..dc967edb6192 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt
+++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt
@@ -359,7 +359,7 @@ First Level Node - FG Gen3 device
Value type: <u32>
Definition: Value in micro percentage for low temperature ESR tight
filter. If this is not specified, then a default value of
- 48829 (4.88 %) will be used. Lowest possible value is 1954
+ 30000 (3 %) will be used. Lowest possible value is 1954
(0.19 %).
- qcom,fg-esr-broad-lt-filter-micro-pct
@@ -367,9 +367,35 @@ First Level Node - FG Gen3 device
Value type: <u32>
Definition: Value in micro percentage for low temperature ESR broad
filter. If this is not specified, then a default value of
- 148438 (14.84 %) will be used. Lowest possible value is
+ 30000 (3 %) will be used. Lowest possible value is
1954 (0.19 %).
+- qcom,fg-esr-rt-filter-switch-temp
+ Usage: optional
+ Value type: <u32>
+ Definition: Battery temperature threshold below which ESR relax
+ filter coefficients will be applied after a certain
+ number of delta battery temperature interrupts firing in
+ an interval of time. This will be applied only when Qnovo
+ is enabled. If this is not specified, then the default
+ value used will be -100. Unit is in decidegC.
+
+- qcom,fg-esr-tight-rt-filter-micro-pct
+ Usage: optional
+ Value type: <u32>
+ Definition: Value in micro percentage for relax temperature ESR tight
+ filter. If this is not specified, then a default value of
+ 5860 will be used. Lowest possible value is 1954 (0.19 %).
+ This will be applied only if Qnovo is enabled.
+
+- qcom,fg-esr-broad-rt-filter-micro-pct
+ Usage: optional
+ Value type: <u32>
+ Definition: Value in micro percentage for relax temperature ESR broad
+ filter. If this is not specified, then a default value of
+ 156250 will be used. Lowest possible value is 1954 (0.19 %).
+ This will be applied only if Qnovo is enabled.
+
- qcom,fg-auto-recharge-soc
Usage: optional
Value type: <empty>
diff --git a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
index 7820562d17ae..b6d0c9affa0e 100644
--- a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
+++ b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
@@ -1408,6 +1408,8 @@ Optional properties:
- qcom,wsa-max-devs : Maximum number of WSA881x devices present in the target
- qcom,wsa-devs : List of phandles for all possible WSA881x devices supported for the target
- qcom,wsa-aux-dev-prefix : Name prefix with Left/Right configuration for WSA881x device
+- qcom,tdm-audio-intf : Boolean. This property is used to specify whether TDM interface
+ is supported or not to the machine driver.
Example:
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 6cca6f49c194..f9097941c192 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -133,6 +133,7 @@ lacie LaCie
lantiq Lantiq Semiconductor
lenovo Lenovo Group Ltd.
lg LG Corporation
+linaro Linaro Limited
linux Linux-specific binding
lsi LSI Corp. (LSI Logic)
lltc Linear Technology Corporation
diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt
index 91261a32a573..b5ce7b6c3576 100644
--- a/Documentation/ioctl/ioctl-number.txt
+++ b/Documentation/ioctl/ioctl-number.txt
@@ -307,6 +307,7 @@ Code Seq#(hex) Include File Comments
0xA3 80-8F Port ACL in development:
<mailto:tlewis@mindspring.com>
0xA3 90-9F linux/dtlk.h
+0xA4 00-1F uapi/linux/tee.h Generic TEE subsystem
0xAA 00-3F linux/uapi/linux/userfaultfd.h
0xAB 00-1F linux/nbd.h
0xAC 00-1F linux/raw.h
diff --git a/Documentation/tee.txt b/Documentation/tee.txt
new file mode 100644
index 000000000000..718599357596
--- /dev/null
+++ b/Documentation/tee.txt
@@ -0,0 +1,118 @@
+TEE subsystem
+This document describes the TEE subsystem in Linux.
+
+A TEE (Trusted Execution Environment) is a trusted OS running in some
+secure environment, for example, TrustZone on ARM CPUs, or a separate
+secure co-processor etc. A TEE driver handles the details needed to
+communicate with the TEE.
+
+This subsystem deals with:
+
+- Registration of TEE drivers
+
+- Managing shared memory between Linux and the TEE
+
+- Providing a generic API to the TEE
+
+The TEE interface
+=================
+
+include/uapi/linux/tee.h defines the generic interface to a TEE.
+
+User space (the client) connects to the driver by opening /dev/tee[0-9]* or
+/dev/teepriv[0-9]*.
+
+- TEE_IOC_SHM_ALLOC allocates shared memory and returns a file descriptor
+ which user space can mmap. When user space doesn't need the file
+ descriptor any more, it should be closed. When shared memory isn't needed
+ any longer it should be unmapped with munmap() to allow the reuse of
+ memory.
+
+- TEE_IOC_VERSION lets user space know which TEE this driver handles and
+ the its capabilities.
+
+- TEE_IOC_OPEN_SESSION opens a new session to a Trusted Application.
+
+- TEE_IOC_INVOKE invokes a function in a Trusted Application.
+
+- TEE_IOC_CANCEL may cancel an ongoing TEE_IOC_OPEN_SESSION or TEE_IOC_INVOKE.
+
+- TEE_IOC_CLOSE_SESSION closes a session to a Trusted Application.
+
+There are two classes of clients, normal clients and supplicants. The latter is
+a helper process for the TEE to access resources in Linux, for example file
+system access. A normal client opens /dev/tee[0-9]* and a supplicant opens
+/dev/teepriv[0-9].
+
+Much of the communication between clients and the TEE is opaque to the
+driver. The main job for the driver is to receive requests from the
+clients, forward them to the TEE and send back the results. In the case of
+supplicants the communication goes in the other direction, the TEE sends
+requests to the supplicant which then sends back the result.
+
+OP-TEE driver
+=============
+
+The OP-TEE driver handles OP-TEE [1] based TEEs. Currently it is only the ARM
+TrustZone based OP-TEE solution that is supported.
+
+Lowest level of communication with OP-TEE builds on ARM SMC Calling
+Convention (SMCCC) [2], which is the foundation for OP-TEE's SMC interface
+[3] used internally by the driver. Stacked on top of that is OP-TEE Message
+Protocol [4].
+
+OP-TEE SMC interface provides the basic functions required by SMCCC and some
+additional functions specific for OP-TEE. The most interesting functions are:
+
+- OPTEE_SMC_FUNCID_CALLS_UID (part of SMCCC) returns the version information
+ which is then returned by TEE_IOC_VERSION
+
+- OPTEE_SMC_CALL_GET_OS_UUID returns the particular OP-TEE implementation, used
+ to tell, for instance, a TrustZone OP-TEE apart from an OP-TEE running on a
+ separate secure co-processor.
+
+- OPTEE_SMC_CALL_WITH_ARG drives the OP-TEE message protocol
+
+- OPTEE_SMC_GET_SHM_CONFIG lets the driver and OP-TEE agree on which memory
+ range to used for shared memory between Linux and OP-TEE.
+
+The GlobalPlatform TEE Client API [5] is implemented on top of the generic
+TEE API.
+
+Picture of the relationship between the different components in the
+OP-TEE architecture.
+
+ User space Kernel Secure world
+ ~~~~~~~~~~ ~~~~~~ ~~~~~~~~~~~~
+ +--------+ +-------------+
+ | Client | | Trusted |
+ +--------+ | Application |
+ /\ +-------------+
+ || +----------+ /\
+ || |tee- | ||
+ || |supplicant| \/
+ || +----------+ +-------------+
+ \/ /\ | TEE Internal|
+ +-------+ || | API |
+ + TEE | || +--------+--------+ +-------------+
+ | Client| || | TEE | OP-TEE | | OP-TEE |
+ | API | \/ | subsys | driver | | Trusted OS |
+ +-------+----------------+----+-------+----+-----------+-------------+
+ | Generic TEE API | | OP-TEE MSG |
+ | IOCTL (TEE_IOC_*) | | SMCCC (OPTEE_SMC_CALL_*) |
+ +-----------------------------+ +------------------------------+
+
+RPC (Remote Procedure Call) are requests from secure world to kernel driver
+or tee-supplicant. An RPC is identified by a special range of SMCCC return
+values from OPTEE_SMC_CALL_WITH_ARG. RPC messages which are intended for the
+kernel are handled by the kernel driver. Other RPC messages will be forwarded to
+tee-supplicant without further involvement of the driver, except switching
+shared memory buffer representation.
+
+References:
+[1] https://github.com/OP-TEE/optee_os
+[2] http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html
+[3] drivers/tee/optee/optee_smc.h
+[4] drivers/tee/optee/optee_msg.h
+[5] http://www.globalplatform.org/specificationsdevice.asp look for
+ "TEE Client API Specification v1.0" and click download.
diff --git a/MAINTAINERS b/MAINTAINERS
index 167a1a751339..c34c64ce4c6f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -7955,6 +7955,11 @@ F: arch/*/oprofile/
F: drivers/oprofile/
F: include/linux/oprofile.h
+OP-TEE DRIVER
+M: Jens Wiklander <jens.wiklander@linaro.org>
+S: Maintained
+F: drivers/tee/optee/
+
ORACLE CLUSTER FILESYSTEM 2 (OCFS2)
M: Mark Fasheh <mfasheh@suse.com>
M: Joel Becker <jlbec@evilplan.org>
@@ -9382,6 +9387,14 @@ F: drivers/hwtracing/stm/
F: include/linux/stm.h
F: include/uapi/linux/stm.h
+TEE SUBSYSTEM
+M: Jens Wiklander <jens.wiklander@linaro.org>
+S: Maintained
+F: include/linux/tee_drv.h
+F: include/uapi/linux/tee.h
+F: drivers/tee/
+F: Documentation/tee.txt
+
THUNDERBOLT DRIVER
M: Andreas Noever <andreas.noever@gmail.com>
S: Maintained
diff --git a/Makefile b/Makefile
index c16d200334ef..aec9774cc7e9 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 4
-SUBLEVEL = 95
+SUBLEVEL = 105
EXTRAVERSION =
NAME = Blurry Fish Butt
@@ -700,11 +700,11 @@ KBUILD_CFLAGS += $(stackp-flag)
ifeq ($(cc-name),clang)
ifneq ($(CROSS_COMPILE),)
CLANG_TRIPLE ?= $(CROSS_COMPILE)
-CLANG_TARGET := -target $(notdir $(CLANG_TRIPLE:%-=%))
+CLANG_TARGET := --target=$(notdir $(CLANG_TRIPLE:%-=%))
GCC_TOOLCHAIN := $(realpath $(dir $(shell which $(LD)))/..)
endif
ifneq ($(GCC_TOOLCHAIN),)
-CLANG_GCC_TC := -gcc-toolchain $(GCC_TOOLCHAIN)
+CLANG_GCC_TC := --gcc-toolchain=$(GCC_TOOLCHAIN)
endif
KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
index d23e2524d694..be9c37e89be1 100644
--- a/arch/arm/boot/dts/am33xx.dtsi
+++ b/arch/arm/boot/dts/am33xx.dtsi
@@ -142,10 +142,11 @@
};
scm_conf: scm_conf@0 {
- compatible = "syscon";
+ compatible = "syscon", "simple-bus";
reg = <0x0 0x800>;
#address-cells = <1>;
#size-cells = <1>;
+ ranges = <0 0 0x800>;
scm_clocks: clocks {
#address-cells = <1>;
diff --git a/arch/arm/boot/dts/armada-375.dtsi b/arch/arm/boot/dts/armada-375.dtsi
index cc952cf8ec30..024f1b75b0a3 100644
--- a/arch/arm/boot/dts/armada-375.dtsi
+++ b/arch/arm/boot/dts/armada-375.dtsi
@@ -176,9 +176,9 @@
reg = <0x8000 0x1000>;
cache-unified;
cache-level = <2>;
- arm,double-linefill-incr = <1>;
+ arm,double-linefill-incr = <0>;
arm,double-linefill-wrap = <0>;
- arm,double-linefill = <1>;
+ arm,double-linefill = <0>;
prefetch-data = <1>;
};
diff --git a/arch/arm/boot/dts/armada-38x.dtsi b/arch/arm/boot/dts/armada-38x.dtsi
index e8b7f6726772..bf20918f1fad 100644
--- a/arch/arm/boot/dts/armada-38x.dtsi
+++ b/arch/arm/boot/dts/armada-38x.dtsi
@@ -143,9 +143,9 @@
reg = <0x8000 0x1000>;
cache-unified;
cache-level = <2>;
- arm,double-linefill-incr = <1>;
+ arm,double-linefill-incr = <0>;
arm,double-linefill-wrap = <0>;
- arm,double-linefill = <1>;
+ arm,double-linefill = <0>;
prefetch-data = <1>;
};
diff --git a/arch/arm/boot/dts/armada-39x.dtsi b/arch/arm/boot/dts/armada-39x.dtsi
index dc6efd386dbc..e67f1fd7a4d1 100644
--- a/arch/arm/boot/dts/armada-39x.dtsi
+++ b/arch/arm/boot/dts/armada-39x.dtsi
@@ -104,9 +104,9 @@
reg = <0x8000 0x1000>;
cache-unified;
cache-level = <2>;
- arm,double-linefill-incr = <1>;
+ arm,double-linefill-incr = <0>;
arm,double-linefill-wrap = <0>;
- arm,double-linefill = <1>;
+ arm,double-linefill = <0>;
prefetch-data = <1>;
};
diff --git a/arch/arm/boot/dts/dm814x.dtsi b/arch/arm/boot/dts/dm814x.dtsi
index 7988b42e5764..c226c3d952d8 100644
--- a/arch/arm/boot/dts/dm814x.dtsi
+++ b/arch/arm/boot/dts/dm814x.dtsi
@@ -138,7 +138,7 @@
};
uart1: uart@20000 {
- compatible = "ti,omap3-uart";
+ compatible = "ti,am3352-uart", "ti,omap3-uart";
ti,hwmods = "uart1";
reg = <0x20000 0x2000>;
clock-frequency = <48000000>;
@@ -148,7 +148,7 @@
};
uart2: uart@22000 {
- compatible = "ti,omap3-uart";
+ compatible = "ti,am3352-uart", "ti,omap3-uart";
ti,hwmods = "uart2";
reg = <0x22000 0x2000>;
clock-frequency = <48000000>;
@@ -158,7 +158,7 @@
};
uart3: uart@24000 {
- compatible = "ti,omap3-uart";
+ compatible = "ti,am3352-uart", "ti,omap3-uart";
ti,hwmods = "uart3";
reg = <0x24000 0x2000>;
clock-frequency = <48000000>;
@@ -189,10 +189,11 @@
ranges = <0 0x160000 0x16d000>;
scm_conf: scm_conf@0 {
- compatible = "syscon";
+ compatible = "syscon", "simple-bus";
reg = <0x0 0x800>;
#address-cells = <1>;
#size-cells = <1>;
+ ranges = <0 0 0x800>;
scm_clocks: clocks {
#address-cells = <1>;
diff --git a/arch/arm/boot/dts/dm816x.dtsi b/arch/arm/boot/dts/dm816x.dtsi
index eee636de4cd8..e526928e6e96 100644
--- a/arch/arm/boot/dts/dm816x.dtsi
+++ b/arch/arm/boot/dts/dm816x.dtsi
@@ -347,7 +347,7 @@
};
uart1: uart@48020000 {
- compatible = "ti,omap3-uart";
+ compatible = "ti,am3352-uart", "ti,omap3-uart";
ti,hwmods = "uart1";
reg = <0x48020000 0x2000>;
clock-frequency = <48000000>;
@@ -357,7 +357,7 @@
};
uart2: uart@48022000 {
- compatible = "ti,omap3-uart";
+ compatible = "ti,am3352-uart", "ti,omap3-uart";
ti,hwmods = "uart2";
reg = <0x48022000 0x2000>;
clock-frequency = <48000000>;
@@ -367,7 +367,7 @@
};
uart3: uart@48024000 {
- compatible = "ti,omap3-uart";
+ compatible = "ti,am3352-uart", "ti,omap3-uart";
ti,hwmods = "uart3";
reg = <0x48024000 0x2000>;
clock-frequency = <48000000>;
diff --git a/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts b/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts
index 5b0430041ec6..fec92cd36ae3 100644
--- a/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts
+++ b/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts
@@ -88,7 +88,7 @@
interrupts-extended = <&intc 83 &omap3_pmx_core 0x11a>;
pinctrl-names = "default";
pinctrl-0 = <&mmc1_pins &mmc1_cd>;
- cd-gpios = <&gpio4 31 IRQ_TYPE_LEVEL_LOW>; /* gpio127 */
+ cd-gpios = <&gpio4 31 GPIO_ACTIVE_LOW>; /* gpio127 */
vmmc-supply = <&vmmc1>;
bus-width = <4>;
cap-power-off-card;
diff --git a/arch/arm/boot/dts/qcom/apq8096-v3-auto-cdp.dts b/arch/arm/boot/dts/qcom/apq8096-v3-auto-cdp.dts
index e59003f2d316..0ef3d4287c45 100644
--- a/arch/arm/boot/dts/qcom/apq8096-v3-auto-cdp.dts
+++ b/arch/arm/boot/dts/qcom/apq8096-v3-auto-cdp.dts
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -38,7 +38,7 @@
compatible = "renesas,rh850";
reg = <0>;
interrupt-parent = <&tlmm>;
- interrupts = <127 0>;
+ interrupts = <122 0>;
spi-max-frequency = <5000000>;
};
};
diff --git a/arch/arm/boot/dts/qcom/apq8096pro-auto-cdp.dts b/arch/arm/boot/dts/qcom/apq8096pro-auto-cdp.dts
index d438bbe828ed..1bca588f448e 100644
--- a/arch/arm/boot/dts/qcom/apq8096pro-auto-cdp.dts
+++ b/arch/arm/boot/dts/qcom/apq8096pro-auto-cdp.dts
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -30,7 +30,7 @@
compatible = "renesas,rh850";
reg = <0>;
interrupt-parent = <&tlmm>;
- interrupts = <127 0>;
+ interrupts = <122 0>;
spi-max-frequency = <5000000>;
};
};
diff --git a/arch/arm/boot/dts/qcom/apq8096pro-v1.1-auto-cdp.dts b/arch/arm/boot/dts/qcom/apq8096pro-v1.1-auto-cdp.dts
index ecde7c667f9a..e69640ddd363 100644
--- a/arch/arm/boot/dts/qcom/apq8096pro-v1.1-auto-cdp.dts
+++ b/arch/arm/boot/dts/qcom/apq8096pro-v1.1-auto-cdp.dts
@@ -30,7 +30,7 @@
compatible = "renesas,rh850";
reg = <0>;
interrupt-parent = <&tlmm>;
- interrupts = <127 0>;
+ interrupts = <122 0>;
spi-max-frequency = <5000000>;
};
};
diff --git a/arch/arm/boot/dts/qcom/dsi-panel-lgd-incell-sw49106-fhd-video.dtsi b/arch/arm/boot/dts/qcom/dsi-panel-lgd-incell-sw49106-fhd-video.dtsi
new file mode 100644
index 000000000000..8db5317f2106
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/dsi-panel-lgd-incell-sw49106-fhd-video.dtsi
@@ -0,0 +1,115 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&mdss_mdp {
+ dsi_lgd_incell_sw49106_fhd_video:
+ qcom,mdss_dsi_lgd_incell_sw49106_fhd_video {
+ qcom,mdss-dsi-panel-name =
+ "lgd incell sw49106 fhd video";
+ qcom,mdss-dsi-panel-type = "dsi_video_mode";
+ qcom,mdss-dsi-panel-framerate = <60>;
+ qcom,mdss-dsi-virtual-channel-id = <0>;
+ qcom,mdss-dsi-stream = <0>;
+ qcom,mdss-dsi-panel-width = <1080>;
+ qcom,mdss-dsi-panel-height = <2160>;
+ qcom,mdss-dsi-h-front-porch = <8>;
+ qcom,mdss-dsi-h-back-porch = <8>;
+ qcom,mdss-dsi-h-pulse-width = <4>;
+ qcom,mdss-dsi-h-sync-skew = <0>;
+ qcom,mdss-dsi-v-back-porch = <92>;
+ qcom,mdss-dsi-v-front-porch = <170>;
+ qcom,mdss-dsi-v-pulse-width = <1>;
+ qcom,mdss-dsi-h-left-border = <0>;
+ qcom,mdss-dsi-h-right-border = <0>;
+ qcom,mdss-dsi-v-top-border = <0>;
+ qcom,mdss-dsi-v-bottom-border = <0>;
+ qcom,mdss-dsi-bpp = <24>;
+ qcom,mdss-dsi-underflow-color = <0xff>;
+ qcom,mdss-dsi-border-color = <0>;
+ qcom,mdss-dsi-h-sync-pulse = <0>;
+ qcom,mdss-dsi-traffic-mode = "burst_mode";
+ qcom,mdss-dsi-bllp-eof-power-mode;
+ qcom,mdss-dsi-bllp-power-mode;
+ qcom,mdss-dsi-lane-0-state;
+ qcom,mdss-dsi-lane-1-state;
+ qcom,mdss-dsi-lane-2-state;
+ qcom,mdss-dsi-lane-3-state;
+ qcom,mdss-dsi-panel-timings = [F8 3C 28 00 6E 72 2E
+ 40 30 03 04 00];
+ qcom,mdss-dsi-t-clk-post = <0x02>;
+ qcom,mdss-dsi-t-clk-pre = <0x2D>;
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,mdss-dsi-dma-trigger = "trigger_sw";
+ qcom,mdss-dsi-mdp-trigger = "none";
+ qcom,mdss-dsi-on-command = [05 01 00 00 0B 00 02 35 00
+ 15 01 00 00 00 00 02 36 00
+ 15 01 00 00 00 00 02 51 FF
+ 15 01 00 00 00 00 02 53 24
+ 15 01 00 00 00 00 02 55 80
+ 39 01 00 00 00 00 02 B0 AC
+ 39 01 00 00 00 00 06 B1 46 00 80 14 85
+ 39 01 00 00 00 00 08 B3 05 08 14 00 1C 00 02
+ 39 01 00 00 00 00 10 B4 83 08 00 04 04 04 04 00
+ 00 00 00 00 00 00 00
+ 39 01 00 00 00 00 13 B5 03 1E 0B 02 29 00 00 00
+ 00 04 00 24 00 10 10 10 10 00
+ 39 01 00 00 00 00 0A B6 00 72 39 13 08 67 00 60 46
+ 39 01 00 00 00 00 05 B7 00 50 37 04
+ 39 01 00 00 00 00 0C B8 70 38 14 ED 08 04 00 01
+ 0A A0 00
+ 39 01 00 00 00 00 06 C0 8A 8F 18 C1 12
+ 39 01 00 00 00 00 07 C1 01 00 30 C2 C7 0F
+ 39 01 00 00 00 00 03 C2 2A 00
+ 39 01 00 00 00 00 07 C3 05 0E 0E 50 88 09
+ 39 01 00 00 00 00 04 C4 A2 E8 F4
+ 39 01 00 00 00 00 05 C5 C2 2A 4E 08
+ 39 01 00 00 00 00 03 C6 15 01
+ 39 01 00 00 00 00 07 CA 00 00 03 84 55 F5
+ 39 01 00 00 00 00 03 CB 3F A0
+ 39 01 00 00 00 00 09 CC F0 03 10 55 11 FC 34 34
+ 39 01 00 00 00 00 07 CD 11 50 50 90 00 F3
+ 39 01 00 00 00 00 07 CE A0 28 28 34 00 AB
+ 39 01 00 00 00 00 10 D0 10 1B 22 2A 35 42 4A 53 4D
+ 44 34 23 10 03 81
+ 39 01 00 00 00 00 10 D1 09 15 1C 25 31 3F 47 52 4F
+ 45 34 22 0E 01 83
+ 39 01 00 00 00 00 10 D2 10 1B 22 29 34 41 49 52 4E
+ 44 34 23 10 03 81
+ 39 01 00 00 00 00 10 D3 09 15 1C 24 30 3E 46 51 50
+ 45 34 22 0E 01 83
+ 39 01 00 00 00 00 10 D4 10 1B 22 2A 35 42 4A 53 4D
+ 44 34 23 10 03 81
+ 39 01 00 00 00 00 10 D5 09 15 1C 25 31 3F 47 52 4F
+ 45 34 22 0E 01 83
+ 39 01 00 00 00 00 0D E5 24 23 11 10 00 0A 08 06 04
+ 11 0E 23
+ 39 01 00 00 00 00 0D E6 24 23 11 10 01 0B 09 07 05
+ 11 0E 23
+ 39 01 00 00 00 00 07 E7 15 16 17 18 19 1A
+ 39 01 00 00 00 00 07 E8 1B 1C 1D 1E 1F 20
+ 39 01 00 00 00 00 05 ED 00 01 53 0C
+ 39 01 00 00 00 00 03 F0 B2 00
+ 39 01 00 00 00 00 05 F2 01 00 17 00
+ 39 01 00 00 64 00 07 F3 00 50 90 C9 00 01
+ 05 01 00 00 78 00 02 11 00
+ 05 01 00 00 05 00 02 29 00];
+ qcom,mdss-dsi-off-command = [05 01 00 00 32 00 02 28 00
+ 05 01 00 00 64 00 02 10 00];
+ qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+ qcom,mdss-dsi-off-command-state = "dsi_lp_mode";
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-reset-sequence = <1 200>, <0 200>, <1 200>;
+ qcom,mdss-dsi-tx-eot-append;
+ qcom,mdss-dsi-post-init-delay = <1>;
+ };
+};
diff --git a/arch/arm/boot/dts/qcom/msm-audio.dtsi b/arch/arm/boot/dts/qcom/msm-audio.dtsi
index 75aea7280e6c..4b37032e1775 100644
--- a/arch/arm/boot/dts/qcom/msm-audio.dtsi
+++ b/arch/arm/boot/dts/qcom/msm-audio.dtsi
@@ -833,6 +833,8 @@
"RX_BIAS", "INT_MCLK0",
"SPK_RX_BIAS", "INT_MCLK0",
"INT_LDO_H", "INT_MCLK0",
+ "RX_I2S_CLK", "INT_MCLK0",
+ "TX_I2S_CLK", "INT_MCLK0",
"MIC BIAS External", "Handset Mic",
"MIC BIAS External2", "Headset Mic",
"MIC BIAS External", "Secondary Mic",
diff --git a/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi b/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi
index 97dc5ed014b2..eb1f821234ba 100644
--- a/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -815,7 +815,8 @@
mmagic-supply = <&gdsc_mmagic_camss>;
gdscr-supply = <&gdsc_camss_top>;
vfe0-vdd-supply = <&gdsc_vfe0>;
- qcom,cam-vreg-name = "mmagic", "gdscr", "vfe0-vdd";
+ vfe1-vdd-supply = <&gdsc_vfe1>;
+ qcom,cam-vreg-name = "mmagic", "gdscr", "vfe0-vdd", "vfe1-vdd";
clocks = <&clock_mmss clk_mmss_mmagic_ahb_clk>,
<&clock_mmss clk_camss_top_ahb_clk>,
<&clock_mmss clk_cci_clk_src>,
@@ -825,12 +826,16 @@
<&clock_mmss clk_mmagic_camss_axi_clk>,
<&clock_mmss clk_camss_vfe_ahb_clk>,
<&clock_mmss clk_camss_vfe0_ahb_clk>,
+ <&clock_mmss clk_camss_vfe1_ahb_clk>,
<&clock_mmss clk_camss_vfe_axi_clk>,
<&clock_mmss clk_camss_vfe0_stream_clk>,
+ <&clock_mmss clk_camss_vfe1_stream_clk>,
<&clock_mmss clk_smmu_vfe_axi_clk>,
<&clock_mmss clk_smmu_vfe_ahb_clk>,
<&clock_mmss clk_camss_csi_vfe0_clk>,
+ <&clock_mmss clk_camss_csi_vfe1_clk>,
<&clock_mmss clk_vfe0_clk_src>,
+ <&clock_mmss clk_vfe1_clk_src>,
<&clock_mmss clk_camss_csi_vfe0_clk>,
<&clock_mmss clk_camss_csi2_ahb_clk>,
<&clock_mmss clk_camss_csi2_clk>,
@@ -839,7 +844,8 @@
<&clock_mmss clk_camss_csi2phytimer_clk>,
<&clock_mmss clk_camss_csi2rdi_clk>,
<&clock_mmss clk_camss_ispif_ahb_clk>,
- <&clock_mmss clk_camss_vfe0_clk>;
+ <&clock_mmss clk_camss_vfe0_clk>,
+ <&clock_mmss clk_camss_vfe1_clk>;
clock-names =
"mmss_mmagic_ahb_clk",
"camss_top_ahb_clk",
@@ -850,12 +856,16 @@
"mmagic_camss_axi_clk",
"camss_vfe_ahb_clk",
"camss_vfe0_ahb_clk",
+ "camss_vfe1_ahb_clk",
"camss_vfe_axi_clk",
"camss_vfe0_stream_clk",
+ "camss_vfe1_stream_clk",
"smmu_vfe_axi_clk",
"smmu_vfe_ahb_clk",
"camss_csi_vfe0_clk",
+ "camss_csi_vfe1_clk",
"vfe0_clk_src",
+ "vfe1_clk_src",
"camss_csi_vfe0_clk",
"camss_csi2_ahb_clk",
"camss_csi2_clk",
@@ -864,7 +874,8 @@
"camss_csi2phytimer_clk",
"camss_csi2rdi_clk",
"camss_ispif_ahb_clk",
- "clk_camss_vfe0_clk";
+ "clk_camss_vfe0_clk",
+ "clk_camss_vfe1_clk";
qcom,clock-rates = <19200000
19200000
@@ -875,12 +886,16 @@
0
0
0
+ 0
320000000
0
0
0
0
- 19200000
+ 0
+ 0
+ 320000000
+ 320000000
0
0
200000000
@@ -889,6 +904,7 @@
200000000
200000000
0
+ 100000000
100000000>;
};
@@ -896,6 +912,8 @@
compatible = "qcom,ntn_avb";
ntn-rst-gpio = <&pm8994_gpios 13 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&ntn_clk_sync>;
vdd-ntn-hsic-supply = <&pm8994_l25>;
vdd-ntn-pci-supply = <&pm8994_s4>;
@@ -1135,6 +1153,9 @@
compatible = "qcom,adv7481";
reg = <0x70 0xff>;
cam_vdig-supply = <&pm8994_s3>;
+ tx-lanes = <4 2 1>;
+ settle-count = <16 16 16>;
+ res-array = "RES_1080P", "RES_720P", "RES_576P_480P";
/* Cameras powered by PMIC: */
cam_vio-supply = <&pm8994_lvs1>;
cam_vana-supply = <&pm8994_l17>;
@@ -1153,6 +1174,10 @@
<&pm8994_gpios 7 0>; /* INT3 */
};
+ qcom,tv-tuner {
+ compatible = "qcom,tv-tuner";
+ };
+
qcom,msm-ba {
compatible = "qcom,msm-ba";
qcom,ba-input-profile-0 {
@@ -1174,6 +1199,16 @@
qcom,ba-node = <1>; /* ba node */
qcom,user-type = <1>; /* user type */
};
+
+ qcom,ba-input-profile-2 {
+ qcom,type = <8>; /* input type */
+ qcom,name = "TUNER-2"; /* input name */
+ qcom,ba-input = <16>; /* ba input id */
+ qcom,ba-output = <0>; /* ba output id */
+ qcom,sd-name = "tv-tuner"; /* sd name */
+ qcom,ba-node = <2>; /* ba node */
+ qcom,user-type = <1>; /* user type */
+ };
};
};
diff --git a/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi b/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi
index 959867160705..8eee428355db 100644
--- a/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -11,7 +11,7 @@
*/
#include "msm8996-pinctrl.dtsi"
-#include "msm8996-camera-sensor-adp.dtsi"
+#include "msm8996-camera-sensor-auto-cdp.dtsi"
#include "msm8996-wsa881x.dtsi"
/ {
@@ -580,7 +580,8 @@
mmagic-supply = <&gdsc_mmagic_camss>;
gdscr-supply = <&gdsc_camss_top>;
vfe0-vdd-supply = <&gdsc_vfe0>;
- qcom,cam-vreg-name = "mmagic", "gdscr", "vfe0-vdd";
+ vfe1-vdd-supply = <&gdsc_vfe1>;
+ qcom,cam-vreg-name = "mmagic", "gdscr", "vfe0-vdd", "vfe1-vdd";
clocks = <&clock_mmss clk_mmss_mmagic_ahb_clk>,
<&clock_mmss clk_camss_top_ahb_clk>,
<&clock_mmss clk_cci_clk_src>,
@@ -590,12 +591,16 @@
<&clock_mmss clk_mmagic_camss_axi_clk>,
<&clock_mmss clk_camss_vfe_ahb_clk>,
<&clock_mmss clk_camss_vfe0_ahb_clk>,
+ <&clock_mmss clk_camss_vfe1_ahb_clk>,
<&clock_mmss clk_camss_vfe_axi_clk>,
<&clock_mmss clk_camss_vfe0_stream_clk>,
+ <&clock_mmss clk_camss_vfe1_stream_clk>,
<&clock_mmss clk_smmu_vfe_axi_clk>,
<&clock_mmss clk_smmu_vfe_ahb_clk>,
<&clock_mmss clk_camss_csi_vfe0_clk>,
+ <&clock_mmss clk_camss_csi_vfe1_clk>,
<&clock_mmss clk_vfe0_clk_src>,
+ <&clock_mmss clk_vfe1_clk_src>,
<&clock_mmss clk_camss_csi_vfe0_clk>,
<&clock_mmss clk_camss_csi2_ahb_clk>,
<&clock_mmss clk_camss_csi2_clk>,
@@ -604,7 +609,8 @@
<&clock_mmss clk_camss_csi2phytimer_clk>,
<&clock_mmss clk_camss_csi2rdi_clk>,
<&clock_mmss clk_camss_ispif_ahb_clk>,
- <&clock_mmss clk_camss_vfe0_clk>;
+ <&clock_mmss clk_camss_vfe0_clk>,
+ <&clock_mmss clk_camss_vfe1_clk>;
clock-names =
"mmss_mmagic_ahb_clk",
"camss_top_ahb_clk",
@@ -615,12 +621,16 @@
"mmagic_camss_axi_clk",
"camss_vfe_ahb_clk",
"camss_vfe0_ahb_clk",
+ "camss_vfe1_ahb_clk",
"camss_vfe_axi_clk",
"camss_vfe0_stream_clk",
+ "camss_vfe1_stream_clk",
"smmu_vfe_axi_clk",
"smmu_vfe_ahb_clk",
"camss_csi_vfe0_clk",
+ "camss_csi_vfe1_clk",
"vfe0_clk_src",
+ "vfe1_clk_src",
"camss_csi_vfe0_clk",
"camss_csi2_ahb_clk",
"camss_csi2_clk",
@@ -629,7 +639,8 @@
"camss_csi2phytimer_clk",
"camss_csi2rdi_clk",
"camss_ispif_ahb_clk",
- "clk_camss_vfe0_clk";
+ "clk_camss_vfe0_clk",
+ "clk_camss_vfe1_clk";
qcom,clock-rates = <19200000
19200000
@@ -640,12 +651,16 @@
0
0
0
+ 0
320000000
0
0
0
0
- 19200000
+ 0
+ 0
+ 320000000
+ 320000000
0
0
200000000
@@ -654,6 +669,7 @@
200000000
200000000
0
+ 100000000
100000000>;
};
@@ -661,6 +677,8 @@
compatible = "qcom,ntn_avb";
ntn-rst-gpio = <&pm8994_gpios 13 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&ntn_clk_sync>;
vdd-ntn-hsic-supply = <&pm8994_l25>;
vdd-ntn-pci-supply = <&pm8994_s4>;
diff --git a/arch/arm/boot/dts/qcom/msm8996-camera-sensor-auto-cdp.dtsi b/arch/arm/boot/dts/qcom/msm8996-camera-sensor-auto-cdp.dtsi
new file mode 100644
index 000000000000..2843700ac168
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/msm8996-camera-sensor-auto-cdp.dtsi
@@ -0,0 +1,171 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm8996-regulator-camera-auto-cdp.dtsi"
+
+&soc {
+ led_flash0: qcom,camera-flash {
+ cell-index = <0>;
+ compatible = "qcom,camera-flash";
+ qcom,flash-source = <&pmi8994_flash0 &pmi8994_flash1>;
+ qcom,torch-source = <&pmi8994_torch0 &pmi8994_torch1>;
+ qcom,switch-source = <&pmi8994_switch>;
+ };
+};
+
+&cci {
+ actuator0: qcom,actuator@0 {
+ cell-index = <0>;
+ reg = <0x0>;
+ compatible = "qcom,actuator";
+ qcom,cci-master = <0>;
+ cam_vaf-supply = <&pm8994_l23>;
+ qcom,cam-vreg-name = "cam_vaf";
+ qcom,cam-vreg-min-voltage = <2800000>;
+ qcom,cam-vreg-max-voltage = <2800000>;
+ qcom,cam-vreg-op-mode = <100000>;
+ };
+
+ actuator1: qcom,actuator@1 {
+ cell-index = <1>;
+ reg = <0x1>;
+ compatible = "qcom,actuator";
+ qcom,cci-master = <1>;
+ cam_vaf-supply = <&pm8994_l23>;
+ qcom,cam-vreg-name = "cam_vaf";
+ qcom,cam-vreg-min-voltage = <2800000>;
+ qcom,cam-vreg-max-voltage = <2800000>;
+ qcom,cam-vreg-op-mode = <100000>;
+ };
+
+ ois0: qcom,ois@0 {
+ cell-index = <0>;
+ reg = <0x0>;
+ compatible = "qcom,ois";
+ qcom,cci-master = <0>;
+ cam_vaf-supply = <&pm8994_l23>;
+ qcom,cam-vreg-name = "cam_vaf";
+ qcom,cam-vreg-min-voltage = <2800000>;
+ qcom,cam-vreg-max-voltage = <2800000>;
+ qcom,cam-vreg-op-mode = <100000>;
+ };
+
+ eeprom0: qcom,eeprom@0 {
+ cell-index = <0>;
+ reg = <0>;
+ compatible = "qcom,eeprom";
+ qcom,cci-master = <0>;
+ cam_vdig-supply = <&pm8994_s3>;
+ cam_vio-supply = <&pm8994_lvs1>;
+ cam_vana-supply = <&pm8994_l17>;
+ cam_vaf-supply = <&pm8994_l23>;
+ qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana",
+ "cam_vaf";
+ qcom,cam-vreg-min-voltage = <1300000 0 2500000 2800000>;
+ qcom,cam-vreg-max-voltage = <1300000 0 2500000 2800000>;
+ qcom,cam-vreg-op-mode = <105000 0 80000 100000>;
+ qcom,gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk0_active &cam_sensor_rear_active>;
+ pinctrl-1 = <&cam_sensor_mclk0_suspend
+ &cam_sensor_rear_suspend>;
+ gpios = <&tlmm 13 0>,
+ <&tlmm 30 0>,
+ <&tlmm 29 0>;
+ qcom,gpio-reset = <1>;
+ qcom,gpio-standby = <2>;
+ qcom,gpio-req-tbl-num = <0 1 2>;
+ qcom,gpio-req-tbl-flags = <1 0 0>;
+ qcom,gpio-req-tbl-label = "CAMIF_MCLK0",
+ "CAM_RESET0",
+ "CAM_STANDBY0";
+ clocks = <&clock_mmss clk_mclk0_clk_src>,
+ <&clock_mmss clk_camss_mclk0_clk>;
+ clock-names = "cam_src_clk", "cam_clk";
+ qcom,clock-rates = <19200000 0>;
+ };
+
+ eeprom1: qcom,eeprom@1 {
+ cell-index = <1>;
+ reg = <0x1>;
+ compatible = "qcom,eeprom";
+ qcom,cci-master = <1>;
+ cam_vdig-supply = <&pm8994_l27>;
+ cam_vio-supply = <&pm8994_lvs1>;
+ cam_vana-supply = <&pm8994_l29>;
+ qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana";
+ qcom,cam-vreg-min-voltage = <1000000 0 2800000>;
+ qcom,cam-vreg-max-voltage = <1000000 0 2800000>;
+ qcom,cam-vreg-op-mode = <105000 0 80000>;
+ qcom,gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk2_active &cam_sensor_front_active>;
+ pinctrl-1 = <&cam_sensor_mclk2_suspend
+ &cam_sensor_front_suspend>;
+ gpios = <&tlmm 15 0>,
+ <&tlmm 23 0>,
+ <&tlmm 26 0>;
+ qcom,gpio-reset = <1>;
+ qcom,gpio-standby = <2>;
+ qcom,gpio-req-tbl-num = <0 1 2>;
+ qcom,gpio-req-tbl-flags = <1 0 0>;
+ qcom,gpio-req-tbl-label = "CAMIF_MCLK2",
+ "CAM_RESET2",
+ "CAM_STANDBY2";
+ clocks = <&clock_mmss clk_mclk2_clk_src>,
+ <&clock_mmss clk_camss_mclk2_clk>;
+ clock-names = "cam_src_clk", "cam_clk";
+ qcom,clock-rates = <19200000 0>;
+ };
+
+ qcom,camera@2 {
+ cell-index = <2>;
+ compatible = "qcom,camera";
+ reg = <0x02>;
+ qcom,csiphy-sd-index = <2>;
+ qcom,csid-sd-index = <2>;
+ qcom,mount-angle = <90>;
+ qcom,eeprom-src = <&eeprom1>;
+ qcom,actuator-src = <&actuator1>;
+ cam_vdig-supply = <&pm8994_l27>;
+ cam_vio-supply = <&pm8994_lvs1>;
+ cam_vana-supply = <&pm8994_l26>;
+ qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana";
+ qcom,cam-vreg-min-voltage = <800000 0 1100000>;
+ qcom,cam-vreg-max-voltage = <800000 0 1100000>;
+ qcom,cam-vreg-op-mode = <105000 0 80000>;
+ qcom,gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk2_active &cam_sensor_front_active>;
+ pinctrl-1 = <&cam_sensor_mclk2_suspend
+ &cam_sensor_front_suspend>;
+ gpios = <&tlmm 15 0>,
+ <&tlmm 23 0>,
+ <&tlmm 26 0>;
+ qcom,gpio-reset = <1>;
+ qcom,gpio-standby = <2>;
+ qcom,gpio-req-tbl-num = <0 1 2>;
+ qcom,gpio-req-tbl-flags = <1 0 0>;
+ qcom,gpio-req-tbl-label = "CAMIF_MCLK2",
+ "CAM_RESET2",
+ "CAM_STANDBY2";
+ qcom,sensor-position = <1>;
+ qcom,sensor-mode = <0>;
+ qcom,cci-master = <1>;
+ status = "ok";
+ clocks = <&clock_mmss clk_mclk2_clk_src>,
+ <&clock_mmss clk_camss_mclk2_clk>;
+ clock-names = "cam_src_clk", "cam_clk";
+ qcom,clock-rates = <24000000 0>;
+ };
+};
diff --git a/arch/arm/boot/dts/qcom/msm8996-cv2x.dtsi b/arch/arm/boot/dts/qcom/msm8996-cv2x.dtsi
index e96ed3020679..40318c52c7c5 100644
--- a/arch/arm/boot/dts/qcom/msm8996-cv2x.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-cv2x.dtsi
@@ -73,6 +73,7 @@
qcom,msm-bus,vectors-KBps =
<100 512 0 0>,
<100 512 207108 14432000>;
+ qcom,ntn-pcierst-resx;
};
usb_detect: usb_detect {
@@ -233,7 +234,7 @@
interrupts = <78 0>;
reset-gpio = <&tlmm 71 GPIO_ACTIVE_LOW>;
bits-per-word = <8>;
- reset-delay-msec = <100>;
+ reset-delay-msec = <300>;
pinctrl-names = "active", "sleep";
pinctrl-0 = <&can_rst_on>;
pinctrl-1 = <&can_rst_off>;
@@ -335,9 +336,16 @@
/delete-property/ vin-supply;
};
+&pcie0 {
+ /delete-property/ qcom,l1-supported;
+ /delete-property/ qcom,l1ss-supported;
+};
+
&pcie1 {
qcom,msi-gicm-addr = <0x09bd0040>;
qcom,msi-gicm-base = <0x240>;
+ /delete-property/ qcom,l1-supported;
+ /delete-property/ qcom,l1ss-supported;
};
&pcie2 {
@@ -367,6 +375,8 @@
qcom,mdm-statusline-not-a-powersource;
qcom,mdm-userspace-handle-shutdown;
qcom,pil-force-shutdown;
+ qcom,shutdown-timeout-ms = <30000>;
+ qcom,reset-time-ms = <16203>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/qcom/msm8996-mtp.dtsi b/arch/arm/boot/dts/qcom/msm8996-mtp.dtsi
index 0bd9b02f3d2e..24f593aa5e9f 100644
--- a/arch/arm/boot/dts/qcom/msm8996-mtp.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-mtp.dtsi
@@ -726,6 +726,7 @@
sound-9335 {
qcom,model = "msm8996-tasha-mtp-snd-card";
+ qcom,tdm-audio-intf;
qcom,audio-routing =
"AIF4 VI", "MCLK",
@@ -760,6 +761,66 @@
asoc-codec = <&stub_codec>, <&hdmi_audio>;
asoc-codec-names = "msm-stub-codec.1", "msm-hdmi-audio-codec-rx";
+ asoc-cpu = <&dai_pri_auxpcm>, <&dai_sec_auxpcm>,
+ <&dai_hdmi>, <&dai_mi2s>,
+ <&sb_0_rx>, <&sb_0_tx>,
+ <&sb_1_rx>, <&sb_1_tx>,
+ <&sb_2_rx>, <&sb_2_tx>,
+ <&sb_3_rx>, <&sb_3_tx>,
+ <&sb_4_rx>, <&sb_4_tx>,
+ <&sb_5_tx>,
+ <&afe_pcm_rx>, <&afe_pcm_tx>,
+ <&afe_proxy_rx>, <&afe_proxy_tx>,
+ <&incall_record_rx>, <&incall_record_tx>,
+ <&incall_music_rx>, <&incall_music2_rx>,
+ <&sb_5_rx>, <&sb_6_rx>,
+ <&usb_audio_rx>, <&usb_audio_tx>,
+ <&dai_pri_tdm_tx_0>, <&dai_pri_tdm_tx_1>,
+ <&dai_pri_tdm_tx_2>, <&dai_pri_tdm_tx_3>,
+ <&dai_pri_tdm_rx_0>, <&dai_pri_tdm_rx_1>,
+ <&dai_pri_tdm_rx_2>, <&dai_pri_tdm_rx_3>,
+ <&dai_sec_tdm_rx_0>, <&dai_sec_tdm_rx_1>,
+ <&dai_sec_tdm_rx_2>, <&dai_sec_tdm_rx_3>,
+ <&dai_sec_tdm_tx_0>, <&dai_sec_tdm_tx_1>,
+ <&dai_sec_tdm_tx_2>, <&dai_sec_tdm_tx_3>,
+ <&dai_tert_tdm_rx_0>, <&dai_tert_tdm_rx_1>,
+ <&dai_tert_tdm_rx_2>, <&dai_tert_tdm_rx_3>,
+ <&dai_tert_tdm_tx_0>, <&dai_tert_tdm_tx_1>,
+ <&dai_tert_tdm_tx_2>, <&dai_tert_tdm_tx_3>,
+ <&dai_quat_tdm_rx_0>, <&dai_quat_tdm_rx_1>,
+ <&dai_quat_tdm_rx_2>, <&dai_quat_tdm_rx_3>,
+ <&dai_quat_tdm_tx_0>, <&dai_quat_tdm_tx_1>,
+ <&dai_quat_tdm_tx_2>, <&dai_quat_tdm_tx_3>;
+ asoc-cpu-names = "msm-dai-q6-auxpcm.1", "msm-dai-q6-auxpcm.2",
+ "msm-dai-q6-hdmi.8", "msm-dai-q6-mi2s.2",
+ "msm-dai-q6-dev.16384", "msm-dai-q6-dev.16385",
+ "msm-dai-q6-dev.16386", "msm-dai-q6-dev.16387",
+ "msm-dai-q6-dev.16388", "msm-dai-q6-dev.16389",
+ "msm-dai-q6-dev.16390", "msm-dai-q6-dev.16391",
+ "msm-dai-q6-dev.16392", "msm-dai-q6-dev.16393",
+ "msm-dai-q6-dev.16395",
+ "msm-dai-q6-dev.224", "msm-dai-q6-dev.225",
+ "msm-dai-q6-dev.241", "msm-dai-q6-dev.240",
+ "msm-dai-q6-dev.32771", "msm-dai-q6-dev.32772",
+ "msm-dai-q6-dev.32773", "msm-dai-q6-dev.32770",
+ "msm-dai-q6-dev.16394", "msm-dai-q6-dev.16396",
+ "msm-dai-q6-dev.28672", "msm-dai-q6-dev.28673",
+ "msm-dai-q6-tdm.36865", "msm-dai-q6-tdm.36867",
+ "msm-dai-q6-tdm.36869", "msm-dai-q6-tdm.36871",
+ "msm-dai-q6-tdm.36864", "msm-dai-q6-tdm.36866",
+ "msm-dai-q6-tdm.36868", "msm-dai-q6-tdm.36870",
+ "msm-dai-q6-tdm.36880", "msm-dai-q6-tdm.36882",
+ "msm-dai-q6-tdm.36884", "msm-dai-q6-tdm.36886",
+ "msm-dai-q6-tdm.36881", "msm-dai-q6-tdm.36883",
+ "msm-dai-q6-tdm.36885", "msm-dai-q6-tdm.36887",
+ "msm-dai-q6-tdm.36896", "msm-dai-q6-tdm.36898",
+ "msm-dai-q6-tdm.36900", "msm-dai-q6-tdm.36902",
+ "msm-dai-q6-tdm.36897", "msm-dai-q6-tdm.36899",
+ "msm-dai-q6-tdm.36901", "msm-dai-q6-tdm.36903",
+ "msm-dai-q6-tdm.36912", "msm-dai-q6-tdm.36914",
+ "msm-dai-q6-tdm.36916", "msm-dai-q6-tdm.36918",
+ "msm-dai-q6-tdm.36913", "msm-dai-q6-tdm.36915",
+ "msm-dai-q6-tdm.36917", "msm-dai-q6-tdm.36919";
qcom,hph-en1-gpio = <&pmi8994_gpios 10 0>;
qcom,hph-en0-gpio = <&pm8994_gpios 13 0>;
qcom,us-euro-gpios = <&pm8994_mpps 2 0>;
@@ -769,6 +830,35 @@
qcom,wsa-aux-dev-prefix = "SpkrLeft", "SpkrRight",
"SpkrLeft", "SpkrRight";
};
+ qcom,msm-dai-tdm-tert-rx {
+ qcom,msm-cpudai-tdm-clk-internal = <1>;
+ qcom,msm-cpudai-tdm-sync-mode = <0>;
+ qcom,msm-cpudai-tdm-sync-src = <1>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&tert_tdm_dout_active>;
+ pinctrl-1 = <&tert_tdm_dout_sleep>;
+ };
+ qcom,msm-dai-tdm-tert-tx {
+ qcom,msm-cpudai-tdm-clk-internal = <1>;
+ qcom,msm-cpudai-tdm-sync-mode = <0>;
+ qcom,msm-cpudai-tdm-sync-src = <1>;
+ };
+ qcom,msm-dai-tdm-quat-rx {
+ qcom,msm-cpudai-tdm-clk-internal = <1>;
+ qcom,msm-cpudai-tdm-sync-mode = <0>;
+ qcom,msm-cpudai-tdm-sync-src = <1>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&quat_tdm_active &quat_tdm_dout_active>;
+ pinctrl-1 = <&quat_tdm_sleep &quat_tdm_dout_sleep>;
+ };
+ qcom,msm-dai-tdm-quat-tx {
+ qcom,msm-cpudai-tdm-clk-internal = <1>;
+ qcom,msm-cpudai-tdm-sync-mode = <0>;
+ qcom,msm-cpudai-tdm-sync-src = <1>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&quat_tdm_din_active>;
+ pinctrl-1 = <&quat_tdm_din_sleep>;
+ };
};
&pm8994_gpios {
diff --git a/arch/arm/boot/dts/qcom/msm8996-pinctrl.dtsi b/arch/arm/boot/dts/qcom/msm8996-pinctrl.dtsi
index 244901bd5cef..50312a57059a 100644
--- a/arch/arm/boot/dts/qcom/msm8996-pinctrl.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-pinctrl.dtsi
@@ -2773,5 +2773,19 @@
};
};
};
+
+ ntn_clk_sync: ntn_clk_sync {
+ mux {
+ pins = "gpio69";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio69";
+ drive-strength = <8>; /* 8 mA */
+ output-low;
+ bias-pull-down; /* PULL down */
+ };
+ };
};
};
diff --git a/arch/arm/boot/dts/qcom/msm8996-regulator-camera-auto-cdp.dtsi b/arch/arm/boot/dts/qcom/msm8996-regulator-camera-auto-cdp.dtsi
new file mode 100644
index 000000000000..bb4d1d33a178
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/msm8996-regulator-camera-auto-cdp.dtsi
@@ -0,0 +1,24 @@
+/* Copyright (c) 2014-2016, 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&rpm_bus {
+ /* PM8994 LDO26 = VDD_SS_CX supply */
+ rpm-regulator-ldoa26 {
+ pm8994_l26: regulator-l26 {
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ qcom,init-voltage = <1100000>;
+ status = "okay";
+ };
+ };
+};
+
diff --git a/arch/arm/boot/dts/qcom/msm8996-v3-auto-cdp.dts b/arch/arm/boot/dts/qcom/msm8996-v3-auto-cdp.dts
index 8ca2b30b3779..9e630aa77717 100644
--- a/arch/arm/boot/dts/qcom/msm8996-v3-auto-cdp.dts
+++ b/arch/arm/boot/dts/qcom/msm8996-v3-auto-cdp.dts
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -43,7 +43,7 @@
compatible = "renesas,rh850";
reg = <0>;
interrupt-parent = <&tlmm>;
- interrupts = <127 0>;
+ interrupts = <122 0>;
spi-max-frequency = <5000000>;
};
};
diff --git a/arch/arm/boot/dts/qcom/msm8996.dtsi b/arch/arm/boot/dts/qcom/msm8996.dtsi
index ed72e18d90bc..3b067e6090e6 100644
--- a/arch/arm/boot/dts/qcom/msm8996.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996.dtsi
@@ -3100,7 +3100,8 @@
<&afe_pcm_tx>, <&afe_proxy_rx>, <&afe_proxy_tx>,
<&incall_record_rx>, <&incall_record_tx>,
<&incall_music_rx>, <&incall_music2_rx>,
- <&sb_5_rx>, <&sb_6_rx>;
+ <&sb_5_rx>, <&sb_6_rx>,
+ <&usb_audio_rx>, <&usb_audio_tx>;
asoc-cpu-names = "msm-dai-q6-auxpcm.1", "msm-dai-q6-auxpcm.2",
"msm-dai-q6-hdmi.8", "msm-dai-q6-mi2s.2",
"msm-dai-q6-dev.16384", "msm-dai-q6-dev.16385",
@@ -3113,7 +3114,8 @@
"msm-dai-q6-dev.240", "msm-dai-q6-dev.32771",
"msm-dai-q6-dev.32772", "msm-dai-q6-dev.32773",
"msm-dai-q6-dev.32770", "msm-dai-q6-dev.16394",
- "msm-dai-q6-dev.16396";
+ "msm-dai-q6-dev.16396",
+ "msm-dai-q6-dev.28672", "msm-dai-q6-dev.28673";
asoc-codec = <&stub_codec>;
asoc-codec-names = "msm-stub-codec.1";
};
@@ -3329,6 +3331,15 @@
qcom,msm-dai-q6-dev-id = <16396>;
};
+ usb_audio_rx: qcom,msm-dai-q6-usb-audio-rx {
+ compatible = "qcom,msm-dai-q6-dev";
+ qcom,msm-dai-q6-dev-id = <28672>;
+ };
+
+ usb_audio_tx: qcom,msm-dai-q6-usb-audio-tx {
+ compatible = "qcom,msm-dai-q6-dev";
+ qcom,msm-dai-q6-dev-id = <28673>;
+ };
};
dai_pri_auxpcm: qcom,msm-pri-auxpcm {
diff --git a/arch/arm/boot/dts/qcom/msm8996pro-auto-cdp.dts b/arch/arm/boot/dts/qcom/msm8996pro-auto-cdp.dts
index e104be7c2742..2983513d0310 100644
--- a/arch/arm/boot/dts/qcom/msm8996pro-auto-cdp.dts
+++ b/arch/arm/boot/dts/qcom/msm8996pro-auto-cdp.dts
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -30,7 +30,7 @@
compatible = "renesas,rh850";
reg = <0>;
interrupt-parent = <&tlmm>;
- interrupts = <127 0>;
+ interrupts = <122 0>;
spi-max-frequency = <5000000>;
};
};
diff --git a/arch/arm/boot/dts/qcom/msm8996pro-auto.dtsi b/arch/arm/boot/dts/qcom/msm8996pro-auto.dtsi
index f0fade10633e..262f8397d975 100644
--- a/arch/arm/boot/dts/qcom/msm8996pro-auto.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996pro-auto.dtsi
@@ -12,6 +12,20 @@
#include "msm8996v3-auto.dtsi"
+/ {
+ firmware: firmware {
+ android {
+ fstab {
+ /delete-node/ system;
+ vendor {
+ fsmgr_flags = "wait,slotselect,verify";
+ status = "ok";
+ };
+ };
+ };
+ };
+};
+
&gfx_vreg {
qcom,cpr-fuse-combos = <24>;
qcom,cpr-speed-bins = <3>;
diff --git a/arch/arm/boot/dts/qcom/msm8996pro-v1.1-auto-cdp.dts b/arch/arm/boot/dts/qcom/msm8996pro-v1.1-auto-cdp.dts
index 06d040aa6bcb..d798d9e163f2 100644
--- a/arch/arm/boot/dts/qcom/msm8996pro-v1.1-auto-cdp.dts
+++ b/arch/arm/boot/dts/qcom/msm8996pro-v1.1-auto-cdp.dts
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -30,7 +30,7 @@
compatible = "renesas,rh850";
reg = <0>;
interrupt-parent = <&tlmm>;
- interrupts = <127 0>;
+ interrupts = <122 0>;
spi-max-frequency = <5000000>;
};
};
diff --git a/arch/arm/boot/dts/qcom/sdm660-cdp.dtsi b/arch/arm/boot/dts/qcom/sdm660-cdp.dtsi
index 4d05ea75b576..7db93928a369 100644
--- a/arch/arm/boot/dts/qcom/sdm660-cdp.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660-cdp.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -184,6 +184,13 @@
qcom,panel-supply-entries = <&dsi_panel_pwr_supply_labibb_amoled>;
};
+&dsi_lgd_incell_sw49106_fhd_video {
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+};
+
&mdss_dp_ctrl {
pinctrl-names = "mdss_dp_active", "mdss_dp_sleep";
pinctrl-0 = <&mdss_dp_aux_active &mdss_dp_usbplug_cc_active>;
diff --git a/arch/arm/boot/dts/qcom/sdm660-mdss-panels.dtsi b/arch/arm/boot/dts/qcom/sdm660-mdss-panels.dtsi
index 3ffd43bcda60..2cf4a1378778 100644
--- a/arch/arm/boot/dts/qcom/sdm660-mdss-panels.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660-mdss-panels.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -25,6 +25,7 @@
#include "dsi-panel-truly-1080p-cmd.dtsi"
#include "dsi-panel-truly-1080p-video.dtsi"
#include "dsi-panel-rm67195-amoled-fhd-cmd.dtsi"
+#include "dsi-panel-lgd-incell-sw49106-fhd-video.dtsi"
&soc {
dsi_panel_pwr_supply: dsi_panel_pwr_supply {
@@ -325,3 +326,14 @@
qcom,mdss-dsi-t-clk-post = <0x0d>;
qcom,mdss-dsi-t-clk-pre = <0x2f>;
};
+
+
+&dsi_lgd_incell_sw49106_fhd_video {
+ qcom,mdss-dsi-panel-timings-phy-v2 = [24 1f 08 09 05 03 04 a0
+ 24 1f 08 09 05 03 04 a0
+ 24 1f 08 09 05 03 04 a0
+ 24 1f 08 09 05 03 04 a0
+ 24 1b 08 09 05 03 04 a0];
+ qcom,mdss-dsi-t-clk-post = <0x0d>;
+ qcom,mdss-dsi-t-clk-pre = <0x30>;
+};
diff --git a/arch/arm/boot/dts/qcom/sdm660-mtp.dtsi b/arch/arm/boot/dts/qcom/sdm660-mtp.dtsi
index 50f5d83346c6..8b1596325889 100644
--- a/arch/arm/boot/dts/qcom/sdm660-mtp.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660-mtp.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -184,6 +184,13 @@
qcom,panel-supply-entries = <&dsi_panel_pwr_supply_labibb_amoled>;
};
+&dsi_lgd_incell_sw49106_fhd_video {
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+};
+
&sdhc_1 {
/* device core power supply */
vdd-supply = <&pm660l_l4>;
diff --git a/arch/arm/boot/dts/qcom/vplatform-lfv-ion.dtsi b/arch/arm/boot/dts/qcom/vplatform-lfv-ion.dtsi
index 045cc44b2d4c..8f7773a87816 100644
--- a/arch/arm/boot/dts/qcom/vplatform-lfv-ion.dtsi
+++ b/arch/arm/boot/dts/qcom/vplatform-lfv-ion.dtsi
@@ -27,11 +27,5 @@
memory-region = <&qseecom_mem>;
qcom,ion-heap-type = "DMA";
};
-
- qcom,ion-heap@28 { /* Audio Heap */
- reg = <28>;
- memory-region = <&ion_audio>;
- qcom,ion-heap-type = "CARVEOUT";
- };
};
};
diff --git a/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-blsp.dtsi b/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-blsp.dtsi
index a672b04cbb35..eb78cfd8b133 100644
--- a/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-blsp.dtsi
+++ b/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-blsp.dtsi
@@ -36,8 +36,8 @@
pinctrl-0 = <&spi_9_active>;
pinctrl-1 = <&spi_9_sleep>;
clock-names = "iface_clk", "core_clk";
- clocks = <&clock_gcc clk_gcc_blsp2_ahb_clk>,
- <&clock_gcc clk_gcc_blsp2_qup3_spi_apps_clk>;
+ clocks = <&clock_virt clk_gcc_blsp2_ahb_clk>,
+ <&clock_virt clk_gcc_blsp2_qup3_spi_apps_clk>;
status = "disabled";
};
@@ -54,8 +54,8 @@
qcom,clk-freq-out = <400000>;
qcom,clk-freq-in = <19200000>;
clock-names = "iface_clk", "core_clk";
- clocks = <&clock_gcc clk_gcc_blsp1_ahb_clk>,
- <&clock_gcc clk_gcc_blsp1_qup6_i2c_apps_clk>;
+ clocks = <&clock_virt clk_gcc_blsp1_ahb_clk>,
+ <&clock_virt clk_gcc_blsp1_qup6_i2c_apps_clk>;
pinctrl-names = "i2c_active", "i2c_sleep";
pinctrl-0 = <&i2c_6_active>;
pinctrl-1 = <&i2c_6_sleep>;
@@ -75,8 +75,8 @@
qcom,clk-freq-out = <400000>;
qcom,clk-freq-in = <19200000>;
clock-names = "iface_clk", "core_clk";
- clocks = <&clock_gcc clk_gcc_blsp2_ahb_clk>,
- <&clock_gcc clk_gcc_blsp2_qup2_i2c_apps_clk>;
+ clocks = <&clock_virt clk_gcc_blsp2_ahb_clk>,
+ <&clock_virt clk_gcc_blsp2_qup2_i2c_apps_clk>;
pinctrl-names = "i2c_active", "i2c_sleep";
pinctrl-0 = <&i2c_8_active>;
pinctrl-1 = <&i2c_8_sleep>;
@@ -99,8 +99,8 @@
qcom,bam-rx-ep-pipe-index = <3>;
qcom,master-id = <86>;
clock-names = "core_clk", "iface_clk";
- clocks = <&clock_gcc clk_gcc_blsp1_uart2_apps_clk>,
- <&clock_gcc clk_gcc_blsp1_ahb_clk>;
+ clocks = <&clock_virt clk_gcc_blsp1_uart2_apps_clk>,
+ <&clock_virt clk_gcc_blsp1_ahb_clk>;
pinctrl-names = "sleep", "default";
pinctrl-0 = <&blsp1_uart2_sleep>;
pinctrl-1 = <&blsp1_uart2_active>;
diff --git a/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-ivi.dts b/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-ivi.dts
index 7543950c9cef..54b6d0e4f132 100644
--- a/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-ivi.dts
+++ b/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-ivi.dts
@@ -30,14 +30,18 @@
};
};
-&spi_9 {
+&blsp1_uart2 {
status = "okay";
};
-&i2c_8 {
- status = "okay";
-};
+&reserved_memory {
+ pmem_shared: pmem_shared_region@d0000000 {
+ reg = <0 0xd0000000 0 0x20000000>;
+ label = "pmem_shared_mem";
+ };
-&blsp1_uart2 {
- status = "okay";
+ ion_system: ion_system_region@100000000 {
+ reg = <0x1 0x0 0 0x10000000>;
+ label = "ion_system_mem";
+ };
};
diff --git a/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-telematics.dts b/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-telematics.dts
index 9057fb315c65..187648f50f59 100644
--- a/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-telematics.dts
+++ b/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-telematics.dts
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -34,11 +34,13 @@
no-map;
reg = <0 0x88800000 0 0x6200000>;
};
+
peripheral_mem: peripheral_region@8ea00000 {
compatible = "removed-dma-pool";
no-map;
reg = <0 0x8ea00000 0 0x2b00000>;
};
+
adsp_mem: adsp_region {
compatible = "shared-dma-pool";
alloc-ranges = <0 0x00000000 0 0xffffffff>;
@@ -46,6 +48,11 @@
alignment = <0 0x100000>;
size = <0 0x400000>;
};
+
+ ion_system: ion_system_region@ff000000 {
+ reg = <0 0xff000000 0 0x00400000>;
+ label = "ion_system_mem";
+ };
};
&soc {
@@ -106,6 +113,24 @@
/* Up to 800 Mbps */
<45 512 207108 14432000>;
};
+
+ dsrc_vreg: dsrc_vreg {
+ compatible = "qcom,stub-regulator";
+ regulator-name = "dsrc_vreg";
+ startup-delay-us = <2410>;
+ enable-active-high;
+ gpio = <&tlmm 125 0>;
+ };
+
+ qcom,cnss_sdio {
+ compatible = "qcom,cnss_sdio";
+ subsys-name = "AR6320_SDIO";
+ vdd-wlan-supply = <&rome_vreg>;
+ vdd-wlan-xtal-supply = <&pm8994_l30>;
+ vdd-wlan-io-supply = <&pm8994_s4>;
+ vdd-wlan-dsrc-supply = <&dsrc_vreg>;
+ qcom,skip-wlan-en-toggle;
+ };
};
&spi_9 {
@@ -120,10 +145,6 @@
status = "okay";
};
-&blsp1_uart2 {
- status = "okay";
-};
-
&sdhc_2 {
vdd-supply = <&pm8994_l21>;
qcom,vdd-voltage-level = <2950000 2950000>;
diff --git a/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996.dtsi b/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996.dtsi
index 7815399f23b1..24ac570b0374 100644
--- a/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996.dtsi
+++ b/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996.dtsi
@@ -35,14 +35,6 @@
#size-cells = <2>;
ranges;
- pmem_shared: pmem_shared_region {
- reg = <0 0xd0000000 0 0x20000000>;
- label = "pmem_shared_mem";
- };
- ion_system: ion_system_region {
- reg = <0x1 0x0 0 0x10000000>;
- label = "ion_system_mem";
- };
qseecom_mem: qseecom_region {
compatible = "shared-dma-pool";
alloc-ranges = <0 0x00000000 0 0xffffffff>;
@@ -50,10 +42,6 @@
alignment = <0 0x400000>;
size = <0 0x1400000>;
};
- ion_audio: ion_audio_region {
- reg = <0 0xc8000000 0 0x00400000>;
- label = "ion_audio_mem";
- };
};
};
@@ -85,6 +73,11 @@
compatible = "qcom,msm-imem-boot_stats";
reg = <0x6b0 32>;
};
+
+ mem_dump_table@10 {
+ compatible = "qcom,msm-imem-mem_dump_table";
+ reg = <0x10 8>;
+ };
};
sdhc_2: sdhci@74a4900 {
@@ -96,8 +89,8 @@
interrupt-names = "hc_irq", "pwr_irq";
clock-names = "iface_clk", "core_clk";
- clocks = <&clock_gcc clk_gcc_sdcc2_ahb_clk>,
- <&clock_gcc clk_gcc_sdcc2_apps_clk>;
+ clocks = <&clock_virt clk_gcc_sdcc2_ahb_clk>,
+ <&clock_virt clk_gcc_sdcc2_apps_clk>;
qcom,large-address-bus;
qcom,bus-width = <4>;
@@ -686,12 +679,21 @@
compatible = "qcom,msm-pcm-hostless";
};
+ sde_kms_hyp: qcom,sde_kms_hyp@900000 {
+ compatible = "qcom,sde-kms-hyp";
+ };
+
qcom,sps {
compatible = "qcom,msm_sps_4k";
qcom,device-type = <3>;
qcom,pipe-attr-ee;
};
+ clock_virt: qcom,virtclk-frontend@0 {
+ compatible = "qcom,virtclk-frontend-8996";
+ #clock-cells = <1>;
+ };
+
clock_gcc: qcom,gcc@300000 {
compatible = "qcom,dummycc";
#clock-cells = <1>;
@@ -872,4 +874,15 @@
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <2950000>;
};
+
+ wdog: qcom,wdt@9830000 {
+ compatible = "qcom,msm-watchdog";
+ reg = <0x9830000 0x1000>;
+ reg-names = "wdt-base";
+ interrupts = <0 28 0>, <0 29 0>;
+ qcom,bark-time = <11000>;
+ qcom,pet-time = <10000>;
+ qcom,ipi-ping;
+ qcom,wakeup-enable;
+ };
};
diff --git a/arch/arm/boot/dts/qcom/vplatform-lfv-smmu.dtsi b/arch/arm/boot/dts/qcom/vplatform-lfv-smmu.dtsi
index 65eaa0c5aef9..2076ba08280e 100644
--- a/arch/arm/boot/dts/qcom/vplatform-lfv-smmu.dtsi
+++ b/arch/arm/boot/dts/qcom/vplatform-lfv-smmu.dtsi
@@ -39,7 +39,7 @@
};
&cpp_fd_smmu {
- status = "ok";
+ status = "disabled";
qcom,register-save;
qcom,skip-init;
qcom,fatal-asf;
@@ -71,5 +71,6 @@
* anyways, so using a dummy value is ok.
*/
iommus = <&cpp_fd_smmu 42>;
+ status = "disabled";
};
};
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index c5e1943e5427..09ebd37e01e0 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -221,6 +221,7 @@ CONFIG_SERIO=m
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=32
+CONFIG_SERIAL_8250_RUNTIME_UARTS=6
CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_MANY_PORTS=y
CONFIG_SERIAL_8250_SHARE_IRQ=y
diff --git a/arch/arm/configs/sdm660-perf_defconfig b/arch/arm/configs/sdm660-perf_defconfig
index 32686982997d..c03ab12f1f03 100644
--- a/arch/arm/configs/sdm660-perf_defconfig
+++ b/arch/arm/configs/sdm660-perf_defconfig
@@ -104,7 +104,6 @@ CONFIG_IP_PNP_DHCP=y
CONFIG_INET_AH=y
CONFIG_INET_ESP=y
CONFIG_INET_IPCOMP=y
-# CONFIG_INET_LRO is not set
CONFIG_INET_DIAG_DESTROY=y
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_IPV6_ROUTE_INFO=y
diff --git a/arch/arm/crypto/aesbs-glue.c b/arch/arm/crypto/aesbs-glue.c
index 6d685298690e..648d5fac9cbf 100644
--- a/arch/arm/crypto/aesbs-glue.c
+++ b/arch/arm/crypto/aesbs-glue.c
@@ -357,7 +357,7 @@ static struct crypto_alg aesbs_algs[] = { {
}, {
.cra_name = "cbc(aes)",
.cra_driver_name = "cbc-aes-neonbs",
- .cra_priority = 300,
+ .cra_priority = 250,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct async_helper_ctx),
@@ -377,7 +377,7 @@ static struct crypto_alg aesbs_algs[] = { {
}, {
.cra_name = "ctr(aes)",
.cra_driver_name = "ctr-aes-neonbs",
- .cra_priority = 300,
+ .cra_priority = 250,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct async_helper_ctx),
@@ -397,7 +397,7 @@ static struct crypto_alg aesbs_algs[] = { {
}, {
.cra_name = "xts(aes)",
.cra_driver_name = "xts-aes-neonbs",
- .cra_priority = 300,
+ .cra_priority = 250,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct async_helper_ctx),
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index 5c06733871fc..72e6efb8e910 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -37,4 +37,3 @@ generic-y += termbits.h
generic-y += termios.h
generic-y += timex.h
generic-y += trace_clock.h
-generic-y += unaligned.h
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
index f13ae153fb24..d2315ffd8f12 100644
--- a/arch/arm/include/asm/elf.h
+++ b/arch/arm/include/asm/elf.h
@@ -112,8 +112,12 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
#define CORE_DUMP_USE_REGSET
#define ELF_EXEC_PAGESIZE 4096
-/* This is the base location for PIE (ET_DYN with INTERP) loads. */
-#define ELF_ET_DYN_BASE 0x400000UL
+/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
+ use of this is to invoke "./ld.so someprog" to test out a new version of
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
/* When the program starts, a1 contains a pointer to a function to be
registered with atexit, as per the SVR4 ABI. A value of 0 means we
diff --git a/arch/arm/include/asm/unaligned.h b/arch/arm/include/asm/unaligned.h
new file mode 100644
index 000000000000..ab905ffcf193
--- /dev/null
+++ b/arch/arm/include/asm/unaligned.h
@@ -0,0 +1,27 @@
+#ifndef __ASM_ARM_UNALIGNED_H
+#define __ASM_ARM_UNALIGNED_H
+
+/*
+ * We generally want to set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS on ARMv6+,
+ * but we don't want to use linux/unaligned/access_ok.h since that can lead
+ * to traps on unaligned stm/ldm or strd/ldrd.
+ */
+#include <asm/byteorder.h>
+
+#if defined(__LITTLE_ENDIAN)
+# include <linux/unaligned/le_struct.h>
+# include <linux/unaligned/be_byteshift.h>
+# include <linux/unaligned/generic.h>
+# define get_unaligned __get_unaligned_le
+# define put_unaligned __put_unaligned_le
+#elif defined(__BIG_ENDIAN)
+# include <linux/unaligned/be_struct.h>
+# include <linux/unaligned/le_byteshift.h>
+# include <linux/unaligned/generic.h>
+# define get_unaligned __get_unaligned_be
+# define put_unaligned __put_unaligned_be
+#else
+# error need to define endianess
+#endif
+
+#endif /* __ASM_ARM_UNALIGNED_H */
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index cdefc69c656b..75a371951f1a 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -133,30 +133,26 @@ static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
set_fs(fs);
}
-static void dump_instr(const char *lvl, struct pt_regs *regs)
+static void __dump_instr(const char *lvl, struct pt_regs *regs)
{
unsigned long addr = instruction_pointer(regs);
const int thumb = thumb_mode(regs);
const int width = thumb ? 4 : 8;
- mm_segment_t fs;
char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
int i;
/*
- * We need to switch to kernel mode so that we can use __get_user
- * to safely read from kernel space. Note that we now dump the
- * code first, just in case the backtrace kills us.
+ * Note that we now dump the code first, just in case the backtrace
+ * kills us.
*/
- fs = get_fs();
- set_fs(KERNEL_DS);
for (i = -4; i < 1 + !!thumb; i++) {
unsigned int val, bad;
if (thumb)
- bad = __get_user(val, &((u16 *)addr)[i]);
+ bad = get_user(val, &((u16 *)addr)[i]);
else
- bad = __get_user(val, &((u32 *)addr)[i]);
+ bad = get_user(val, &((u32 *)addr)[i]);
if (!bad)
p += sprintf(p, i == 0 ? "(%0*x) " : "%0*x ",
@@ -167,8 +163,20 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
}
}
printk("%sCode: %s\n", lvl, str);
+}
- set_fs(fs);
+static void dump_instr(const char *lvl, struct pt_regs *regs)
+{
+ mm_segment_t fs;
+
+ if (!user_mode(regs)) {
+ fs = get_fs();
+ set_fs(KERNEL_DS);
+ __dump_instr(lvl, regs);
+ set_fs(fs);
+ } else {
+ __dump_instr(lvl, regs);
+ }
}
#ifdef CONFIG_ARM_UNWIND
diff --git a/arch/arm/mach-omap1/dma.c b/arch/arm/mach-omap1/dma.c
index 7b02ed218a42..0c120b2ea2f9 100644
--- a/arch/arm/mach-omap1/dma.c
+++ b/arch/arm/mach-omap1/dma.c
@@ -31,7 +31,6 @@
#include "soc.h"
#define OMAP1_DMA_BASE (0xfffed800)
-#define OMAP1_LOGICAL_DMA_CH_COUNT 17
static u32 enable_1510_mode;
@@ -311,8 +310,6 @@ static int __init omap1_system_dma_init(void)
goto exit_iounmap;
}
- d->lch_count = OMAP1_LOGICAL_DMA_CH_COUNT;
-
/* Valid attributes for omap1 plus processors */
if (cpu_is_omap15xx())
d->dev_caps = ENABLE_1510_MODE;
@@ -329,13 +326,14 @@ static int __init omap1_system_dma_init(void)
d->dev_caps |= CLEAR_CSR_ON_READ;
d->dev_caps |= IS_WORD_16;
- if (cpu_is_omap15xx())
- d->chan_count = 9;
- else if (cpu_is_omap16xx() || cpu_is_omap7xx()) {
- if (!(d->dev_caps & ENABLE_1510_MODE))
- d->chan_count = 16;
+ /* available logical channels */
+ if (cpu_is_omap15xx()) {
+ d->lch_count = 9;
+ } else {
+ if (d->dev_caps & ENABLE_1510_MODE)
+ d->lch_count = 9;
else
- d->chan_count = 9;
+ d->lch_count = 16;
}
p = dma_plat_info;
diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c
index 58144779dec4..1e6e09841707 100644
--- a/arch/arm/mach-omap2/pdata-quirks.c
+++ b/arch/arm/mach-omap2/pdata-quirks.c
@@ -522,7 +522,6 @@ static void pdata_quirks_check(struct pdata_init *quirks)
if (of_machine_is_compatible(quirks->compatible)) {
if (quirks->fn)
quirks->fn();
- break;
}
quirks++;
}
diff --git a/arch/arm/mach-pxa/balloon3.c b/arch/arm/mach-pxa/balloon3.c
index a727282bfa99..761d7d64d643 100644
--- a/arch/arm/mach-pxa/balloon3.c
+++ b/arch/arm/mach-pxa/balloon3.c
@@ -17,6 +17,7 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
+#include <linux/leds.h>
#include <linux/sched.h>
#include <linux/bitops.h>
#include <linux/fb.h>
diff --git a/arch/arm/mach-pxa/colibri-pxa270-income.c b/arch/arm/mach-pxa/colibri-pxa270-income.c
index db20d25daaab..1b92a4112bd1 100644
--- a/arch/arm/mach-pxa/colibri-pxa270-income.c
+++ b/arch/arm/mach-pxa/colibri-pxa270-income.c
@@ -17,6 +17,7 @@
#include <linux/gpio.h>
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/leds.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
diff --git a/arch/arm/mach-pxa/corgi.c b/arch/arm/mach-pxa/corgi.c
index 89f790dda93e..d1f12909f740 100644
--- a/arch/arm/mach-pxa/corgi.c
+++ b/arch/arm/mach-pxa/corgi.c
@@ -18,6 +18,7 @@
#include <linux/major.h>
#include <linux/fs.h>
#include <linux/interrupt.h>
+#include <linux/leds.h>
#include <linux/mmc/host.h>
#include <linux/mtd/physmap.h>
#include <linux/pm.h>
diff --git a/arch/arm/mach-pxa/trizeps4.c b/arch/arm/mach-pxa/trizeps4.c
index 066e3a250ee0..5e50c53f1f4b 100644
--- a/arch/arm/mach-pxa/trizeps4.c
+++ b/arch/arm/mach-pxa/trizeps4.c
@@ -16,6 +16,7 @@
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
+#include <linux/leds.h>
#include <linux/export.h>
#include <linux/sched.h>
#include <linux/bitops.h>
diff --git a/arch/arm/mach-pxa/vpac270.c b/arch/arm/mach-pxa/vpac270.c
index 54122a983ae3..2cce92924068 100644
--- a/arch/arm/mach-pxa/vpac270.c
+++ b/arch/arm/mach-pxa/vpac270.c
@@ -15,6 +15,7 @@
#include <linux/irq.h>
#include <linux/gpio_keys.h>
#include <linux/input.h>
+#include <linux/leds.h>
#include <linux/gpio.h>
#include <linux/usb/gpio_vbus.h>
#include <linux/mtd/mtd.h>
diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c
index 30e62a3f0701..d757cfb5f8a6 100644
--- a/arch/arm/mach-pxa/zeus.c
+++ b/arch/arm/mach-pxa/zeus.c
@@ -13,6 +13,7 @@
#include <linux/cpufreq.h>
#include <linux/interrupt.h>
+#include <linux/leds.h>
#include <linux/irq.h>
#include <linux/pm.h>
#include <linux/gpio.h>
diff --git a/arch/arm/mach-pxa/zylonite.c b/arch/arm/mach-pxa/zylonite.c
index e20359a7433c..d7f0a7d87ef2 100644
--- a/arch/arm/mach-pxa/zylonite.c
+++ b/arch/arm/mach-pxa/zylonite.c
@@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
+#include <linux/leds.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
diff --git a/arch/arm/mm/dump.c b/arch/arm/mm/dump.c
index 9fe8e241335c..e1f6f0daa847 100644
--- a/arch/arm/mm/dump.c
+++ b/arch/arm/mm/dump.c
@@ -126,8 +126,8 @@ static const struct prot_bits section_bits[] = {
.val = PMD_SECT_USER,
.set = "USR",
}, {
- .mask = L_PMD_SECT_RDONLY,
- .val = L_PMD_SECT_RDONLY,
+ .mask = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
+ .val = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
.set = "ro",
.clear = "RW",
#elif __LINUX_ARM_ARCH__ >= 6
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index d3d718772381..4d58a6eca48e 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -669,8 +669,8 @@ static struct section_perm ro_perms[] = {
.start = (unsigned long)_stext,
.end = (unsigned long)__init_begin,
#ifdef CONFIG_ARM_LPAE
- .mask = ~L_PMD_SECT_RDONLY,
- .prot = L_PMD_SECT_RDONLY,
+ .mask = ~(L_PMD_SECT_RDONLY | PMD_SECT_AP2),
+ .prot = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
#else
.mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE),
.prot = PMD_SECT_APX | PMD_SECT_AP_WRITE,
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 3cb501b93da6..4c72ce5955d9 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -104,6 +104,7 @@ config ARM64
select HAVE_CONTEXT_TRACKING
select HAVE_ARM_SMCCC
select THREAD_INFO_IN_TASK
+ select HAVE_ARM_SMCCC
help
ARM 64-bit (AArch64) Linux support.
diff --git a/arch/arm64/boot/dts/broadcom/ns2.dtsi b/arch/arm64/boot/dts/broadcom/ns2.dtsi
index 3c92d92278e5..a14a6bb31887 100644
--- a/arch/arm64/boot/dts/broadcom/ns2.dtsi
+++ b/arch/arm64/boot/dts/broadcom/ns2.dtsi
@@ -30,6 +30,8 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+/memreserve/ 0x81000000 0x00200000;
+
#include <dt-bindings/interrupt-controller/arm-gic.h>
/memreserve/ 0x84b00000 0x00000008;
diff --git a/arch/arm64/configs/fsmcortex-perf_defconfig b/arch/arm64/configs/fsmcortex-perf_defconfig
index 31b8aa557d5e..cb16a3509b03 100644
--- a/arch/arm64/configs/fsmcortex-perf_defconfig
+++ b/arch/arm64/configs/fsmcortex-perf_defconfig
@@ -1,5 +1,6 @@
CONFIG_LOCALVERSION="-perf"
# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_POSIX_MQUEUE=y
# CONFIG_USELIB is not set
CONFIG_AUDIT=y
# CONFIG_AUDITSYSCALL is not set
@@ -115,6 +116,7 @@ CONFIG_INET6_IPCOMP=y
CONFIG_IPV6_MIP6=y
CONFIG_IPV6_MULTIPLE_TABLES=y
CONFIG_IPV6_SUBTREES=y
+# CONFIG_ANDROID_PARANOID_NETWORK is not set
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=y
CONFIG_NF_CONNTRACK_SECMARK=y
@@ -228,16 +230,9 @@ CONFIG_RMNET_DATA=y
CONFIG_RMNET_DATA_FC=y
CONFIG_RMNET_DATA_DEBUG_PKT=y
CONFIG_SOCKEV_NLMCAST=y
-CONFIG_BT=y
-CONFIG_MSM_BT_POWER=y
-CONFIG_BTFM_SLIM=y
-CONFIG_BTFM_SLIM_WCN3990=y
-CONFIG_CFG80211=y
-CONFIG_CFG80211_INTERNAL_REGDB=y
CONFIG_RFKILL=y
CONFIG_NFC_NQ=y
CONFIG_IPC_ROUTER=y
-CONFIG_IPC_ROUTER_SECURITY=y
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
@@ -293,27 +288,17 @@ CONFIG_PPPOPNS=y
CONFIG_PPP_ASYNC=y
CONFIG_PPP_SYNC_TTY=y
CONFIG_USB_USBNET=y
-CONFIG_WCNSS_MEM_PRE_ALLOC=y
-CONFIG_ATH_CARDS=y
-CONFIG_WIL6210=m
-CONFIG_CLD_LL_CORE=y
-CONFIG_CNSS_GENL=y
+# CONFIG_WLAN is not set
+# CONFIG_INPUT_MOUSEDEV is not set
CONFIG_INPUT_EVDEV=y
CONFIG_INPUT_KEYRESET=y
-CONFIG_KEYBOARD_GPIO=y
+# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
-CONFIG_INPUT_TOUCHSCREEN=y
-CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_v21=y
-CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_v21=y
-CONFIG_SECURE_TOUCH=y
-CONFIG_TOUCHSCREEN_ST=y
-CONFIG_TOUCHSCREEN_ST_I2C=y
CONFIG_INPUT_MISC=y
-CONFIG_INPUT_HBTP_INPUT=y
CONFIG_INPUT_QPNP_POWER_ON=y
CONFIG_INPUT_UINPUT=y
-CONFIG_INPUT_STMVL53L0=y
# CONFIG_SERIO_SERPORT is not set
+CONFIG_SERIO_LIBPS2=y
# CONFIG_VT is not set
# CONFIG_LEGACY_PTYS is not set
# CONFIG_DEVMEM is not set
@@ -328,10 +313,12 @@ CONFIG_HW_RANDOM_MSM_LEGACY=y
# CONFIG_DEVPORT is not set
CONFIG_MSM_ADSPRPC=y
CONFIG_MSM_RDBG=m
+CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_MUX=y
CONFIG_I2C_MSM_V2=y
+CONFIG_SLIMBUS=y
CONFIG_SLIMBUS_MSM_NGD=y
-CONFIG_SOUNDWIRE=y
CONFIG_SPI=y
CONFIG_SPI_QUP=y
CONFIG_SPI_SPIDEV=y
@@ -366,8 +353,6 @@ CONFIG_THERMAL_QPNP_ADC_TM=y
CONFIG_QCOM_THERMAL_LIMITS_DCVS=y
CONFIG_MFD_SPMI_PMIC=y
CONFIG_MFD_I2C_PMIC=y
-CONFIG_WCD9335_CODEC=y
-CONFIG_WCD934X_CODEC=y
CONFIG_REGULATOR=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_REGULATOR_RPM_SMD=y
@@ -381,68 +366,7 @@ CONFIG_REGULATOR_CPRH_KBSS=y
CONFIG_REGULATOR_MEM_ACC=y
CONFIG_REGULATOR_PROXY_CONSUMER=y
CONFIG_REGULATOR_STUB=y
-CONFIG_MEDIA_SUPPORT=y
-CONFIG_MEDIA_CAMERA_SUPPORT=y
-CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
-CONFIG_MEDIA_CONTROLLER=y
-CONFIG_VIDEO_V4L2_SUBDEV_API=y
-CONFIG_VIDEO_ADV_DEBUG=y
-CONFIG_VIDEO_FIXED_MINOR_RANGES=y
-CONFIG_V4L_PLATFORM_DRIVERS=y
-CONFIG_MSM_CAMERA=y
-CONFIG_MSM_CAMERA_DEBUG=y
-CONFIG_MSMB_CAMERA=y
-CONFIG_MSMB_CAMERA_DEBUG=y
-CONFIG_MSM_CAMERA_SENSOR=y
-CONFIG_MSM_CPP=y
-CONFIG_MSM_CCI=y
-CONFIG_MSM_CSI20_HEADER=y
-CONFIG_MSM_CSI22_HEADER=y
-CONFIG_MSM_CSI30_HEADER=y
-CONFIG_MSM_CSI31_HEADER=y
-CONFIG_MSM_CSIPHY=y
-CONFIG_MSM_CSID=y
-CONFIG_MSM_EEPROM=y
-CONFIG_MSM_ISPIF=y
-CONFIG_IMX134=y
-CONFIG_IMX132=y
-CONFIG_OV9724=y
-CONFIG_OV5648=y
-CONFIG_GC0339=y
-CONFIG_OV8825=y
-CONFIG_OV8865=y
-CONFIG_s5k4e1=y
-CONFIG_OV12830=y
-CONFIG_MSM_V4L2_VIDEO_OVERLAY_DEVICE=y
-CONFIG_MSMB_JPEG=y
-CONFIG_MSM_FD=y
-CONFIG_MSM_JPEGDMA=y
-CONFIG_MSM_VIDC_V4L2=y
-CONFIG_MSM_VIDC_VMEM=y
-CONFIG_MSM_VIDC_GOVERNORS=y
-CONFIG_MSM_SDE_ROTATOR=y
-CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y
-CONFIG_DVB_MPQ=m
-CONFIG_DVB_MPQ_DEMUX=m
-CONFIG_TSPP=m
-CONFIG_QCOM_KGSL=y
-CONFIG_FB=y
-CONFIG_FB_ARMCLCD=y
-CONFIG_FB_MSM=y
-CONFIG_FB_MSM_MDSS=y
-CONFIG_FB_MSM_MDSS_WRITEBACK=y
-CONFIG_FB_MSM_MDSS_HDMI_PANEL=y
-CONFIG_FB_MSM_MDSS_DP_PANEL=y
-CONFIG_FB_MSM_MDSS_XLOG_DEBUG=y
-CONFIG_LOGO=y
-# CONFIG_LOGO_LINUX_MONO is not set
-# CONFIG_LOGO_LINUX_VGA16 is not set
-CONFIG_SOUND=y
-CONFIG_SND=y
-CONFIG_SND_USB_AUDIO=y
-CONFIG_SND_USB_AUDIO_QMI=y
-CONFIG_SND_SOC=y
-CONFIG_SND_SOC_MSM8998=y
+# CONFIG_VGA_ARB is not set
CONFIG_UHID=y
CONFIG_HID_APPLE=y
CONFIG_HID_ELECOM=y
@@ -490,6 +414,8 @@ CONFIG_MMC_TEST=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_MSM=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
CONFIG_LEDS_QPNP=y
CONFIG_LEDS_QPNP_FLASH_V2=y
CONFIG_LEDS_QPNP_WLED=y
@@ -511,6 +437,9 @@ CONFIG_STAGING=y
CONFIG_ASHMEM=y
CONFIG_ANDROID_TIMED_GPIO=y
CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_SYNC=y
+CONFIG_SW_SYNC=y
+CONFIG_ONESHOT_SYNC=y
CONFIG_ION=y
CONFIG_ION_MSM=y
CONFIG_QPNP_REVID=y
@@ -527,7 +456,7 @@ CONFIG_MSM_MHI=y
CONFIG_MSM_MHI_UCI=y
CONFIG_SEEMP_CORE=y
CONFIG_USB_BAM=y
-CONFIG_MSM_MDSS_PLL=y
+CONFIG_MSM_EXT_DISPLAY=y
CONFIG_REMOTE_SPINLOCK_MSM=y
CONFIG_MSM_TIMER_LEAP=y
CONFIG_IOMMU_IO_PGTABLE_FAST=y
@@ -561,10 +490,8 @@ CONFIG_QCOM_SCM=y
CONFIG_QCOM_WATCHDOG_V2=y
CONFIG_QCOM_IRQ_HELPER=y
CONFIG_QCOM_MEMORY_DUMP_V2=y
-CONFIG_ICNSS=y
CONFIG_MSM_RUN_QUEUE_STATS=y
CONFIG_MSM_BOOT_STATS=y
-CONFIG_MSM_ADSP_LOADER=y
CONFIG_MSM_PERFORMANCE=y
CONFIG_MSM_SUBSYSTEM_RESTART=y
CONFIG_MSM_PIL=y
@@ -574,14 +501,13 @@ CONFIG_TRACER_PKT=y
CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
CONFIG_MSM_MPM_OF=y
CONFIG_MSM_EVENT_TIMER=y
-CONFIG_MSM_AVTIMER=y
CONFIG_QCOM_REMOTEQDSS=y
CONFIG_MSM_SERVICE_NOTIFIER=y
-CONFIG_MSM_QBT1000=y
CONFIG_MSM_RPM_RBCPR_STATS_V2_LOG=y
CONFIG_MSM_RPM_LOG=y
CONFIG_MSM_RPM_STATS_LOG=y
CONFIG_QSEE_IPC_IRQ_BRIDGE=y
+CONFIG_WCD_DSP_GLINK=y
CONFIG_QCOM_SMCINVOKE=y
CONFIG_QCOM_EARLY_RANDOM=y
CONFIG_MEM_SHARE_QMI_SERVICE=y
diff --git a/arch/arm64/configs/fsmcortex_defconfig b/arch/arm64/configs/fsmcortex_defconfig
index a0ad65761393..517990921ad0 100644
--- a/arch/arm64/configs/fsmcortex_defconfig
+++ b/arch/arm64/configs/fsmcortex_defconfig
@@ -1,4 +1,5 @@
# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_POSIX_MQUEUE=y
CONFIG_AUDIT=y
# CONFIG_AUDITSYSCALL is not set
CONFIG_NO_HZ=y
@@ -114,6 +115,7 @@ CONFIG_INET6_IPCOMP=y
CONFIG_IPV6_MIP6=y
CONFIG_IPV6_MULTIPLE_TABLES=y
CONFIG_IPV6_SUBTREES=y
+# CONFIG_ANDROID_PARANOID_NETWORK is not set
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=y
CONFIG_NF_CONNTRACK_SECMARK=y
@@ -229,17 +231,9 @@ CONFIG_RMNET_DATA=y
CONFIG_RMNET_DATA_FC=y
CONFIG_RMNET_DATA_DEBUG_PKT=y
CONFIG_SOCKEV_NLMCAST=y
-CONFIG_BT=y
-CONFIG_MSM_BT_POWER=y
-CONFIG_BTFM_SLIM=y
-CONFIG_BTFM_SLIM_WCN3990=y
-CONFIG_CFG80211=y
-CONFIG_CFG80211_INTERNAL_REGDB=y
-# CONFIG_CFG80211_CRDA_SUPPORT is not set
CONFIG_RFKILL=y
CONFIG_NFC_NQ=y
CONFIG_IPC_ROUTER=y
-CONFIG_IPC_ROUTER_SECURITY=y
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
@@ -294,30 +288,19 @@ CONFIG_PPPOPNS=y
CONFIG_PPP_ASYNC=y
CONFIG_PPP_SYNC_TTY=y
CONFIG_USB_USBNET=y
-CONFIG_WCNSS_MEM_PRE_ALLOC=y
-CONFIG_ATH_CARDS=y
-CONFIG_WIL6210=m
-CONFIG_CLD_LL_CORE=y
-CONFIG_CNSS_GENL=y
+# CONFIG_WLAN is not set
+# CONFIG_INPUT_MOUSEDEV is not set
CONFIG_INPUT_EVDEV=y
CONFIG_INPUT_KEYRESET=y
-CONFIG_KEYBOARD_GPIO=y
+# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
-CONFIG_INPUT_JOYSTICK=y
-CONFIG_INPUT_TOUCHSCREEN=y
-CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_v21=y
-CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_v21=y
-CONFIG_SECURE_TOUCH=y
-CONFIG_TOUCHSCREEN_ST=y
-CONFIG_TOUCHSCREEN_ST_I2C=y
CONFIG_INPUT_MISC=y
-CONFIG_INPUT_HBTP_INPUT=y
CONFIG_INPUT_QPNP_POWER_ON=y
CONFIG_INPUT_KEYCHORD=y
CONFIG_INPUT_UINPUT=y
CONFIG_INPUT_GPIO=y
-CONFIG_INPUT_STMVL53L0=y
# CONFIG_SERIO_SERPORT is not set
+CONFIG_SERIO_LIBPS2=y
# CONFIG_VT is not set
# CONFIG_LEGACY_PTYS is not set
CONFIG_SERIAL_MSM=y
@@ -330,10 +313,12 @@ CONFIG_HW_RANDOM_MSM_LEGACY=y
# CONFIG_DEVPORT is not set
CONFIG_MSM_ADSPRPC=y
CONFIG_MSM_RDBG=m
+CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_MUX=y
CONFIG_I2C_MSM_V2=y
+CONFIG_SLIMBUS=y
CONFIG_SLIMBUS_MSM_NGD=y
-CONFIG_SOUNDWIRE=y
CONFIG_SPI=y
CONFIG_SPI_QUP=y
CONFIG_SPI_SPIDEV=y
@@ -368,8 +353,6 @@ CONFIG_THERMAL_QPNP_ADC_TM=y
CONFIG_QCOM_THERMAL_LIMITS_DCVS=y
CONFIG_MFD_SPMI_PMIC=y
CONFIG_MFD_I2C_PMIC=y
-CONFIG_WCD9335_CODEC=y
-CONFIG_WCD934X_CODEC=y
CONFIG_REGULATOR=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_REGULATOR_RPM_SMD=y
@@ -383,68 +366,7 @@ CONFIG_REGULATOR_CPRH_KBSS=y
CONFIG_REGULATOR_MEM_ACC=y
CONFIG_REGULATOR_PROXY_CONSUMER=y
CONFIG_REGULATOR_STUB=y
-CONFIG_MEDIA_SUPPORT=y
-CONFIG_MEDIA_CAMERA_SUPPORT=y
-CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
-CONFIG_MEDIA_CONTROLLER=y
-CONFIG_VIDEO_V4L2_SUBDEV_API=y
-CONFIG_VIDEO_ADV_DEBUG=y
-CONFIG_VIDEO_FIXED_MINOR_RANGES=y
-CONFIG_V4L_PLATFORM_DRIVERS=y
-CONFIG_MSM_CAMERA=y
-CONFIG_MSM_CAMERA_DEBUG=y
-CONFIG_MSMB_CAMERA=y
-CONFIG_MSMB_CAMERA_DEBUG=y
-CONFIG_MSM_CAMERA_SENSOR=y
-CONFIG_MSM_CPP=y
-CONFIG_MSM_CCI=y
-CONFIG_MSM_CSI20_HEADER=y
-CONFIG_MSM_CSI22_HEADER=y
-CONFIG_MSM_CSI30_HEADER=y
-CONFIG_MSM_CSI31_HEADER=y
-CONFIG_MSM_CSIPHY=y
-CONFIG_MSM_CSID=y
-CONFIG_MSM_EEPROM=y
-CONFIG_MSM_ISPIF=y
-CONFIG_IMX134=y
-CONFIG_IMX132=y
-CONFIG_OV9724=y
-CONFIG_OV5648=y
-CONFIG_GC0339=y
-CONFIG_OV8825=y
-CONFIG_OV8865=y
-CONFIG_s5k4e1=y
-CONFIG_OV12830=y
-CONFIG_MSM_V4L2_VIDEO_OVERLAY_DEVICE=y
-CONFIG_MSMB_JPEG=y
-CONFIG_MSM_FD=y
-CONFIG_MSM_JPEGDMA=y
-CONFIG_MSM_VIDC_V4L2=y
-CONFIG_MSM_VIDC_VMEM=y
-CONFIG_MSM_VIDC_GOVERNORS=y
-CONFIG_MSM_SDE_ROTATOR=y
-CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y
-CONFIG_DVB_MPQ=m
-CONFIG_DVB_MPQ_DEMUX=m
-CONFIG_TSPP=m
-CONFIG_QCOM_KGSL=y
-CONFIG_FB=y
-CONFIG_FB_VIRTUAL=y
-CONFIG_FB_MSM=y
-CONFIG_FB_MSM_MDSS=y
-CONFIG_FB_MSM_MDSS_WRITEBACK=y
-CONFIG_FB_MSM_MDSS_HDMI_PANEL=y
-CONFIG_FB_MSM_MDSS_DP_PANEL=y
-CONFIG_FB_MSM_MDSS_XLOG_DEBUG=y
-CONFIG_LOGO=y
-# CONFIG_LOGO_LINUX_MONO is not set
-# CONFIG_LOGO_LINUX_VGA16 is not set
-CONFIG_SOUND=y
-CONFIG_SND=y
-CONFIG_SND_USB_AUDIO=y
-CONFIG_SND_USB_AUDIO_QMI=y
-CONFIG_SND_SOC=y
-CONFIG_SND_SOC_MSM8998=y
+# CONFIG_VGA_ARB is not set
CONFIG_UHID=y
CONFIG_HID_APPLE=y
CONFIG_HID_ELECOM=y
@@ -479,9 +401,7 @@ CONFIG_USB_CONFIGFS_F_FS=y
CONFIG_USB_CONFIGFS_F_MTP=y
CONFIG_USB_CONFIGFS_F_PTP=y
CONFIG_USB_CONFIGFS_F_ACC=y
-CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
CONFIG_USB_CONFIGFS_UEVENT=y
-CONFIG_USB_CONFIGFS_F_MIDI=y
CONFIG_USB_CONFIGFS_F_HID=y
CONFIG_USB_CONFIGFS_F_DIAG=y
CONFIG_USB_CONFIGFS_F_GSI=y
@@ -498,6 +418,8 @@ CONFIG_MMC_TEST=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_MSM=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
CONFIG_LEDS_QPNP=y
CONFIG_LEDS_QPNP_FLASH_V2=y
CONFIG_LEDS_QPNP_WLED=y
@@ -527,6 +449,9 @@ CONFIG_STAGING=y
CONFIG_ASHMEM=y
CONFIG_ANDROID_TIMED_GPIO=y
CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_SYNC=y
+CONFIG_SW_SYNC=y
+CONFIG_ONESHOT_SYNC=y
CONFIG_ION=y
CONFIG_ION_MSM=y
CONFIG_QPNP_REVID=y
@@ -544,7 +469,7 @@ CONFIG_MSM_MHI_UCI=y
CONFIG_MSM_MHI_DEBUG=y
CONFIG_SEEMP_CORE=y
CONFIG_USB_BAM=y
-CONFIG_MSM_MDSS_PLL=y
+CONFIG_MSM_EXT_DISPLAY=y
CONFIG_REMOTE_SPINLOCK_MSM=y
CONFIG_MSM_TIMER_LEAP=y
CONFIG_IOMMU_IO_PGTABLE_FAST=y
@@ -555,7 +480,6 @@ CONFIG_IOMMU_DEBUG_TRACKING=y
CONFIG_IOMMU_TESTS=y
CONFIG_QCOM_COMMON_LOG=y
CONFIG_MSM_SMEM=y
-CONFIG_QPNP_HAPTIC=y
CONFIG_MSM_SMD=y
CONFIG_MSM_SMD_DEBUG=y
CONFIG_MSM_GLINK=y
@@ -582,7 +506,6 @@ CONFIG_QCOM_SCM=y
CONFIG_QCOM_WATCHDOG_V2=y
CONFIG_QCOM_IRQ_HELPER=y
CONFIG_QCOM_MEMORY_DUMP_V2=y
-CONFIG_ICNSS=y
CONFIG_MSM_GLADIATOR_ERP_V2=y
CONFIG_PANIC_ON_GLADIATOR_ERROR_V2=y
CONFIG_MSM_GLADIATOR_HANG_DETECT=y
@@ -590,7 +513,6 @@ CONFIG_MSM_CORE_HANG_DETECT=y
CONFIG_MSM_RUN_QUEUE_STATS=y
CONFIG_MSM_BOOT_STATS=y
CONFIG_QCOM_CPUSS_DUMP=y
-CONFIG_MSM_ADSP_LOADER=y
CONFIG_MSM_PERFORMANCE=y
CONFIG_MSM_SUBSYSTEM_RESTART=y
CONFIG_MSM_PIL=y
@@ -600,14 +522,13 @@ CONFIG_TRACER_PKT=y
CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
CONFIG_MSM_MPM_OF=y
CONFIG_MSM_EVENT_TIMER=y
-CONFIG_MSM_AVTIMER=y
CONFIG_QCOM_REMOTEQDSS=y
CONFIG_MSM_SERVICE_NOTIFIER=y
-CONFIG_MSM_QBT1000=y
CONFIG_MSM_RPM_RBCPR_STATS_V2_LOG=y
CONFIG_MSM_RPM_LOG=y
CONFIG_MSM_RPM_STATS_LOG=y
CONFIG_QSEE_IPC_IRQ_BRIDGE=y
+CONFIG_WCD_DSP_GLINK=y
CONFIG_QCOM_SMCINVOKE=y
CONFIG_QCOM_EARLY_RANDOM=y
CONFIG_MEM_SHARE_QMI_SERVICE=y
diff --git a/arch/arm64/configs/msm-auto-gvm-perf_defconfig b/arch/arm64/configs/msm-auto-gvm-perf_defconfig
index 70673d4959eb..969fc6b7e859 100644
--- a/arch/arm64/configs/msm-auto-gvm-perf_defconfig
+++ b/arch/arm64/configs/msm-auto-gvm-perf_defconfig
@@ -226,6 +226,7 @@ CONFIG_HID_APPLE=y
CONFIG_HID_ELECOM=y
CONFIG_HID_MAGICMOUSE=y
CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MULTITOUCH=y
CONFIG_MMC=y
CONFIG_MMC_PERF_PROFILING=y
CONFIG_MMC_PARANOID_SD_INIT=y
@@ -272,6 +273,8 @@ CONFIG_MSM_SMP2P_TEST=y
CONFIG_MSM_QMI_INTERFACE=y
CONFIG_MSM_IPC_ROUTER_SMD_XPRT=y
CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y
+CONFIG_QCOM_WATCHDOG_V2=y
+CONFIG_QCOM_MEMORY_DUMP_V2=y
CONFIG_MSM_BOOT_STATS=y
CONFIG_MSM_BOOT_TIME_MARKER=y
CONFIG_MSM_SUBSYSTEM_RESTART=y
diff --git a/arch/arm64/configs/msm-auto-gvm_defconfig b/arch/arm64/configs/msm-auto-gvm_defconfig
index 455c7581f51f..c5d68e6e2d58 100644
--- a/arch/arm64/configs/msm-auto-gvm_defconfig
+++ b/arch/arm64/configs/msm-auto-gvm_defconfig
@@ -229,6 +229,7 @@ CONFIG_HID_APPLE=y
CONFIG_HID_ELECOM=y
CONFIG_HID_MAGICMOUSE=y
CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MULTITOUCH=y
CONFIG_MMC=y
CONFIG_MMC_PERF_PROFILING=y
CONFIG_MMC_PARANOID_SD_INIT=y
@@ -278,6 +279,8 @@ CONFIG_MSM_SMP2P_TEST=y
CONFIG_MSM_QMI_INTERFACE=y
CONFIG_MSM_IPC_ROUTER_SMD_XPRT=y
CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y
+CONFIG_QCOM_WATCHDOG_V2=y
+CONFIG_QCOM_MEMORY_DUMP_V2=y
CONFIG_MSM_BOOT_STATS=y
CONFIG_MSM_BOOT_TIME_MARKER=y
CONFIG_MSM_SUBSYSTEM_RESTART=y
diff --git a/arch/arm64/configs/msm-auto-perf_defconfig b/arch/arm64/configs/msm-auto-perf_defconfig
index 728bfc43bbee..1f9e8ac9a446 100644
--- a/arch/arm64/configs/msm-auto-perf_defconfig
+++ b/arch/arm64/configs/msm-auto-perf_defconfig
@@ -258,11 +258,16 @@ CONFIG_SCSI_UFSHCD_PLATFORM=y
CONFIG_SCSI_UFS_QCOM=y
CONFIG_SCSI_UFS_QCOM_ICE=y
CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+# CONFIG_MD_AUTODETECT is not set
+CONFIG_MD_LINEAR=y
CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=y
CONFIG_DM_REQ_CRYPT=y
CONFIG_DM_UEVENT=y
CONFIG_DM_VERITY=y
+CONFIG_DM_VERITY_HASH_PREFETCH_MIN_SIZE=1
+CONFIG_DM_ANDROID_VERITY=y
CONFIG_NETDEVICES=y
CONFIG_BONDING=y
CONFIG_DUMMY=y
@@ -381,6 +386,7 @@ CONFIG_MSM_AIS_DEBUG=y
CONFIG_MSM_AIS_CAMERA_SENSOR=y
# CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set
CONFIG_VIDEO_ADV7481=m
+CONFIG_VIDEO_TVTUNER=m
CONFIG_QCOM_KGSL=y
CONFIG_DRM=y
CONFIG_MSM_BA_V4L2=y
@@ -611,6 +617,7 @@ CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
CONFIG_CRYPTO_DEV_QCEDEV=y
CONFIG_CRYPTO_DEV_OTA_CRYPTO=y
CONFIG_CRYPTO_DEV_QCOM_ICE=y
+CONFIG_SYSTEM_TRUSTED_KEYS="verity.x509.pem"
CONFIG_ARM64_CRYPTO=y
CONFIG_CRYPTO_SHA1_ARM64_CE=y
CONFIG_CRYPTO_SHA2_ARM64_CE=y
diff --git a/arch/arm64/configs/msm-auto_defconfig b/arch/arm64/configs/msm-auto_defconfig
index 4c54fa07aaa2..36833b167c30 100644
--- a/arch/arm64/configs/msm-auto_defconfig
+++ b/arch/arm64/configs/msm-auto_defconfig
@@ -259,11 +259,16 @@ CONFIG_SCSI_UFSHCD_PLATFORM=y
CONFIG_SCSI_UFS_QCOM=y
CONFIG_SCSI_UFS_QCOM_ICE=y
CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+# CONFIG_MD_AUTODETECT is not set
+CONFIG_MD_LINEAR=y
CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=y
CONFIG_DM_REQ_CRYPT=y
CONFIG_DM_UEVENT=y
CONFIG_DM_VERITY=y
+CONFIG_DM_VERITY_HASH_PREFETCH_MIN_SIZE=1
+CONFIG_DM_ANDROID_VERITY=y
CONFIG_NETDEVICES=y
CONFIG_BONDING=y
CONFIG_DUMMY=y
@@ -385,6 +390,7 @@ CONFIG_MSM_AIS_DEBUG=y
CONFIG_MSM_AIS_CAMERA_SENSOR=y
# CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set
CONFIG_VIDEO_ADV7481=m
+CONFIG_VIDEO_TVTUNER=m
CONFIG_QCOM_KGSL=y
CONFIG_DRM=y
CONFIG_MSM_BA_V4L2=y
@@ -655,6 +661,7 @@ CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
CONFIG_CRYPTO_DEV_QCEDEV=y
CONFIG_CRYPTO_DEV_OTA_CRYPTO=y
CONFIG_CRYPTO_DEV_QCOM_ICE=y
+CONFIG_SYSTEM_TRUSTED_KEYS="verity.x509.pem"
CONFIG_ARM64_CRYPTO=y
CONFIG_CRYPTO_SHA1_ARM64_CE=y
CONFIG_CRYPTO_SHA2_ARM64_CE=y
diff --git a/arch/arm64/configs/msmcortex-perf_defconfig b/arch/arm64/configs/msmcortex-perf_defconfig
index a6638f3de2c0..72ef08668808 100644
--- a/arch/arm64/configs/msmcortex-perf_defconfig
+++ b/arch/arm64/configs/msmcortex-perf_defconfig
@@ -34,7 +34,6 @@ CONFIG_BLK_DEV_INITRD=y
# CONFIG_RD_LZO is not set
# CONFIG_RD_LZ4 is not set
CONFIG_KALLSYMS_ALL=y
-# CONFIG_AIO is not set
# CONFIG_MEMBARRIER is not set
CONFIG_EMBEDDED=y
# CONFIG_SLUB_DEBUG is not set
diff --git a/arch/arm64/configs/msmcortex_defconfig b/arch/arm64/configs/msmcortex_defconfig
index 4c986046cd5b..039efa9a16e0 100644
--- a/arch/arm64/configs/msmcortex_defconfig
+++ b/arch/arm64/configs/msmcortex_defconfig
@@ -33,7 +33,6 @@ CONFIG_BLK_DEV_INITRD=y
# CONFIG_RD_LZO is not set
# CONFIG_RD_LZ4 is not set
CONFIG_KALLSYMS_ALL=y
-# CONFIG_AIO is not set
# CONFIG_MEMBARRIER is not set
CONFIG_EMBEDDED=y
# CONFIG_COMPAT_BRK is not set
diff --git a/arch/arm64/configs/sdm660-perf_defconfig b/arch/arm64/configs/sdm660-perf_defconfig
index dff3cc7ce071..c0e5b015736c 100644
--- a/arch/arm64/configs/sdm660-perf_defconfig
+++ b/arch/arm64/configs/sdm660-perf_defconfig
@@ -107,7 +107,6 @@ CONFIG_IP_PNP_DHCP=y
CONFIG_INET_AH=y
CONFIG_INET_ESP=y
CONFIG_INET_IPCOMP=y
-# CONFIG_INET_LRO is not set
CONFIG_INET_DIAG_DESTROY=y
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_IPV6_ROUTE_INFO=y
diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
index 932f5a56d1a6..b7406e05056b 100644
--- a/arch/arm64/include/asm/efi.h
+++ b/arch/arm64/include/asm/efi.h
@@ -87,11 +87,9 @@ static inline void efi_set_pgd(struct mm_struct *mm)
* Defer the switch to the current thread's TTBR0_EL1
* until uaccess_enable(). Restore the current
* thread's saved ttbr0 corresponding to its active_mm
- * (if different from init_mm).
*/
cpu_set_reserved_ttbr0();
- if (current->active_mm != &init_mm)
- update_saved_ttbr0(current, current->active_mm);
+ update_saved_ttbr0(current, current->active_mm);
}
}
}
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
index 9d9287277201..dbfb51405d5a 100644
--- a/arch/arm64/include/asm/elf.h
+++ b/arch/arm64/include/asm/elf.h
@@ -170,7 +170,7 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
#ifdef CONFIG_COMPAT
/* PIE load location for compat arm. Must match ARM ELF_ET_DYN_BASE. */
-#define COMPAT_ELF_ET_DYN_BASE 0x000400000UL
+#define COMPAT_ELF_ET_DYN_BASE (2 * TASK_SIZE_32 / 3)
/* AArch32 registers. */
#define COMPAT_ELF_NGREG 18
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 320dc9c7e4f4..c6aae0b85cef 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -266,7 +266,7 @@ static inline void __kvm_flush_dcache_pud(pud_t pud)
kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE);
}
-#define kvm_virt_to_phys(x) __virt_to_phys((unsigned long)(x))
+#define kvm_virt_to_phys(x) __pa_symbol(x)
void kvm_set_way_flush(struct kvm_vcpu *vcpu);
void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index ae11e8fdbfd2..915b2422d9d0 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -188,6 +188,7 @@ static inline void *phys_to_virt(phys_addr_t x)
#define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x)))
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
#define virt_to_pfn(x) __phys_to_pfn(__virt_to_phys(x))
+#define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x))
/*
* virt_to_page(k) convert a _valid_ virtual address to struct page *
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index f926b95928ee..2e36504f56b6 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -55,7 +55,7 @@ static inline void contextidr_thread_switch(struct task_struct *next)
*/
static inline void cpu_set_reserved_ttbr0(void)
{
- unsigned long ttbr = virt_to_phys(empty_zero_page);
+ unsigned long ttbr = __pa_symbol(empty_zero_page);
asm(
" msr ttbr0_el1, %0 // set TTBR0\n"
@@ -129,7 +129,7 @@ static inline void cpu_install_idmap(void)
local_flush_tlb_all();
cpu_set_idmap_tcr_t0sz();
- cpu_switch_mm(idmap_pg_dir, &init_mm);
+ cpu_switch_mm(lm_alias(idmap_pg_dir), &init_mm);
}
/*
@@ -144,7 +144,7 @@ static inline void cpu_replace_ttbr1(pgd_t *pgd)
phys_addr_t pgd_phys = virt_to_phys(pgd);
- replace_phys = (void *)virt_to_phys(idmap_cpu_replace_ttbr1);
+ replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1);
cpu_install_idmap();
replace_phys(pgd_phys);
@@ -165,29 +165,21 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu);
#define init_new_context(tsk,mm) ({ atomic64_set(&(mm)->context.id, 0); 0; })
-/*
- * This is called when "tsk" is about to enter lazy TLB mode.
- *
- * mm: describes the currently active mm context
- * tsk: task which is entering lazy tlb
- * cpu: cpu number which is entering lazy tlb
- *
- * tsk->mm will be NULL
- */
-static inline void
-enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
-{
-}
-
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
static inline void update_saved_ttbr0(struct task_struct *tsk,
struct mm_struct *mm)
{
- if (system_uses_ttbr0_pan()) {
- BUG_ON(mm->pgd == swapper_pg_dir);
- task_thread_info(tsk)->ttbr0 =
- virt_to_phys(mm->pgd) | ASID(mm) << 48;
- }
+ u64 ttbr;
+
+ if (!system_uses_ttbr0_pan())
+ return;
+
+ if (mm == &init_mm)
+ ttbr = __pa_symbol(empty_zero_page);
+ else
+ ttbr = virt_to_phys(mm->pgd) | ASID(mm) << 48;
+
+ task_thread_info(tsk)->ttbr0 = ttbr;
}
#else
static inline void update_saved_ttbr0(struct task_struct *tsk,
@@ -196,6 +188,16 @@ static inline void update_saved_ttbr0(struct task_struct *tsk,
}
#endif
+static inline void
+enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+{
+ /*
+ * We don't actually care about the ttbr0 mapping, so point it at the
+ * zero page.
+ */
+ update_saved_ttbr0(tsk, &init_mm);
+}
+
static inline void __switch_mm(struct mm_struct *next)
{
unsigned int cpu = smp_processor_id();
@@ -223,11 +225,9 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
* Update the saved TTBR0_EL1 of the scheduled-in task as the previous
* value may have not been initialised yet (activate_mm caller) or the
* ASID has changed since the last run (following the context switch
- * of another thread of the same process). Avoid setting the reserved
- * TTBR0_EL1 to swapper_pg_dir (init_mm; e.g. via idle_task_exit).
+ * of another thread of the same process).
*/
- if (next != &init_mm)
- update_saved_ttbr0(tsk, next);
+ update_saved_ttbr0(tsk, next);
}
#define deactivate_mm(tsk,mm) do { } while (0)
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index ecd7dc14330c..6c3848f50fcc 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -120,7 +120,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
* for zero-mapped memory areas etc..
*/
extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
-#define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
+#define ZERO_PAGE(vaddr) phys_to_page(__pa_symbol(empty_zero_page))
#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte))
diff --git a/arch/arm64/kernel/acpi_parking_protocol.c b/arch/arm64/kernel/acpi_parking_protocol.c
index 4b1e5a7a98da..89c96bd1aab9 100644
--- a/arch/arm64/kernel/acpi_parking_protocol.c
+++ b/arch/arm64/kernel/acpi_parking_protocol.c
@@ -17,6 +17,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/acpi.h>
+#include <linux/mm.h>
#include <linux/types.h>
#include <asm/cpu_ops.h>
@@ -102,7 +103,7 @@ static int acpi_parking_protocol_cpu_boot(unsigned int cpu)
* that read this address need to convert this address to the
* Boot-Loader's endianness before jumping.
*/
- writeq_relaxed(__pa(secondary_entry), &mailbox->entry_point);
+ writeq_relaxed(__pa_symbol(secondary_entry), &mailbox->entry_point);
writel_relaxed(cpu_entry->gic_cpu_id, &mailbox->cpu_id);
arch_send_wakeup_ipi_mask(cpumask_of(cpu));
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index f75000996e4c..3beb2b5cad6f 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -23,6 +23,7 @@
#include <linux/sort.h>
#include <linux/stop_machine.h>
#include <linux/types.h>
+#include <linux/mm.h>
#include <asm/cpu.h>
#include <asm/cpufeature.h>
#include <asm/cpu_ops.h>
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
index 5f72243e5ba7..a3f8f8bbfc92 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
@@ -98,7 +98,7 @@ static void __kprobes *patch_map(void *addr, int fixmap)
page = vmalloc_to_page(addr);
else if (!module && (IS_ENABLED(CONFIG_DEBUG_RODATA)
|| IS_ENABLED(CONFIG_KERNEL_TEXT_RDONLY)))
- page = virt_to_page(addr);
+ page = phys_to_page(__pa_symbol(addr));
else
return addr;
diff --git a/arch/arm64/kernel/io.c b/arch/arm64/kernel/io.c
index 471fb3cb8c5f..d43ea93dc68d 100644
--- a/arch/arm64/kernel/io.c
+++ b/arch/arm64/kernel/io.c
@@ -26,8 +26,7 @@
*/
void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t count)
{
- while (count && (!IS_ALIGNED((unsigned long)from, 8) ||
- !IS_ALIGNED((unsigned long)to, 8))) {
+ while (count && !IS_ALIGNED((unsigned long)from, 8)) {
*(u8 *)to = __raw_readb_no_log(from);
from++;
to++;
@@ -55,23 +54,22 @@ EXPORT_SYMBOL(__memcpy_fromio);
*/
void __memcpy_toio(volatile void __iomem *to, const void *from, size_t count)
{
- while (count && (!IS_ALIGNED((unsigned long)to, 8) ||
- !IS_ALIGNED((unsigned long)from, 8))) {
- __raw_writeb_no_log(*(volatile u8 *)from, to);
+ while (count && !IS_ALIGNED((unsigned long)to, 8)) {
+ __raw_writeb_no_log(*(u8 *)from, to);
from++;
to++;
count--;
}
while (count >= 8) {
- __raw_writeq_no_log(*(volatile u64 *)from, to);
+ __raw_writeq_no_log(*(u64 *)from, to);
from += 8;
to += 8;
count -= 8;
}
while (count) {
- __raw_writeb_no_log(*(volatile u8 *)from, to);
+ __raw_writeb_no_log(*(u8 *)from, to);
from++;
to++;
count--;
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c
index b9e7c42cd8eb..9006af285c39 100644
--- a/arch/arm64/kernel/psci.c
+++ b/arch/arm64/kernel/psci.c
@@ -19,6 +19,7 @@
#include <linux/of.h>
#include <linux/smp.h>
#include <linux/delay.h>
+#include <linux/mm.h>
#include <linux/psci.h>
#include <uapi/linux/psci.h>
@@ -46,7 +47,8 @@ static int __init cpu_psci_cpu_prepare(unsigned int cpu)
static int cpu_psci_cpu_boot(unsigned int cpu)
{
- int err = psci_ops.cpu_on(cpu_logical_map(cpu), __pa(secondary_entry));
+ int err = psci_ops.cpu_on(cpu_logical_map(cpu),
+ __pa_symbol(secondary_entry));
if (err)
pr_err("failed to boot CPU%d (%d)\n", cpu, err);
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index b8b40d95ebef..0b93365e8cf0 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -45,6 +45,7 @@
#include <linux/efi.h>
#include <linux/psci.h>
#include <linux/dma-mapping.h>
+#include <linux/mm.h>
#include <asm/acpi.h>
#include <asm/fixmap.h>
@@ -212,10 +213,10 @@ static void __init request_standard_resources(void)
struct memblock_region *region;
struct resource *res;
- kernel_code.start = virt_to_phys(_text);
- kernel_code.end = virt_to_phys(__init_begin - 1);
- kernel_data.start = virt_to_phys(_sdata);
- kernel_data.end = virt_to_phys(_end - 1);
+ kernel_code.start = __pa_symbol(_text);
+ kernel_code.end = __pa_symbol(__init_begin - 1);
+ kernel_data.start = __pa_symbol(_sdata);
+ kernel_data.end = __pa_symbol(_end - 1);
for_each_memblock(memory, region) {
res = alloc_bootmem_low(sizeof(*res));
@@ -367,9 +368,9 @@ void __init setup_arch(char **cmdline_p)
* thread.
*/
#ifdef CONFIG_THREAD_INFO_IN_TASK
- init_task.thread_info.ttbr0 = virt_to_phys(empty_zero_page);
+ init_task.thread_info.ttbr0 = __pa_symbol(empty_zero_page);
#else
- init_thread_info.ttbr0 = virt_to_phys(empty_zero_page);
+ init_thread_info.ttbr0 = __pa_symbol(empty_zero_page);
#endif
#endif
diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c
index aef3605a8c47..2ccb883353d9 100644
--- a/arch/arm64/kernel/smp_spin_table.c
+++ b/arch/arm64/kernel/smp_spin_table.c
@@ -21,6 +21,7 @@
#include <linux/of.h>
#include <linux/smp.h>
#include <linux/types.h>
+#include <linux/mm.h>
#include <asm/cacheflush.h>
#include <asm/cpu_ops.h>
@@ -96,7 +97,7 @@ static int smp_spin_table_cpu_prepare(unsigned int cpu)
* boot-loader's endianess before jumping. This is mandated by
* the boot protocol.
*/
- writeq_relaxed(__pa(secondary_holding_pen), release_addr);
+ writeq_relaxed(__pa_symbol(secondary_holding_pen), release_addr);
__flush_dcache_area((__force void *)release_addr,
sizeof(*release_addr));
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index ea40dc101433..f7834a3e4d64 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -124,7 +124,7 @@ static void __dump_instr(const char *lvl, struct pt_regs *regs)
for (i = -4; i < 1; i++) {
unsigned int val, bad;
- bad = __get_user(val, &((u32 *)addr)[i]);
+ bad = get_user(val, &((u32 *)addr)[i]);
if (!bad)
p += sprintf(p, i == 0 ? "(%08x) " : "%08x ", val);
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index 3b8acfae7797..7e9dd94452bb 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -114,6 +114,7 @@ static struct vm_special_mapping vdso_spec[2];
static int __init vdso_init(void)
{
int i;
+ unsigned long pfn;
if (memcmp(&vdso_start, "\177ELF", 4)) {
pr_err("vDSO is not a valid ELF object!\n");
@@ -131,11 +132,14 @@ static int __init vdso_init(void)
return -ENOMEM;
/* Grab the vDSO data page. */
- vdso_pagelist[0] = virt_to_page(vdso_data);
+ vdso_pagelist[0] = phys_to_page(__pa_symbol(vdso_data));
+
/* Grab the vDSO code pages. */
+ pfn = sym_to_pfn(&vdso_start);
+
for (i = 0; i < vdso_pages; i++)
- vdso_pagelist[i + 1] = virt_to_page(&vdso_start + i * PAGE_SIZE);
+ vdso_pagelist[i + 1] = pfn_to_page(pfn + i);
/* Populate the special mapping structures */
vdso_spec[0] = (struct vm_special_mapping) {
@@ -214,8 +218,8 @@ void update_vsyscall(struct timekeeper *tk)
if (!use_syscall) {
/* tkr_mono.cycle_last == tkr_raw.cycle_last */
vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last;
- vdso_data->raw_time_sec = tk->raw_time.tv_sec;
- vdso_data->raw_time_nsec = tk->raw_time.tv_nsec;
+ vdso_data->raw_time_sec = tk->raw_sec;
+ vdso_data->raw_time_nsec = tk->tkr_raw.xtime_nsec;
vdso_data->xtime_clock_sec = tk->xtime_sec;
vdso_data->xtime_clock_nsec = tk->tkr_mono.xtime_nsec;
/* tkr_raw.xtime_nsec == 0 */
diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S
index e00b4671bd7c..c97ce91cf023 100644
--- a/arch/arm64/kernel/vdso/gettimeofday.S
+++ b/arch/arm64/kernel/vdso/gettimeofday.S
@@ -310,7 +310,7 @@ ENTRY(__kernel_clock_getres)
b.ne 4f
ldr x2, 6f
2:
- cbz w1, 3f
+ cbz x1, 3f
stp xzr, x2, [x1]
3: /* res == NULL. */
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 7fd74d55c68e..30f1a600a7c9 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -34,6 +34,7 @@
#include <linux/dma-contiguous.h>
#include <linux/efi.h>
#include <linux/swiotlb.h>
+#include <linux/mm.h>
#include <asm/boot.h>
#include <asm/fixmap.h>
@@ -191,8 +192,8 @@ void __init arm64_memblock_init(void)
* linear mapping. Take care not to clip the kernel which may be
* high in memory.
*/
- memblock_remove(max_t(u64, memstart_addr + linear_region_size, __pa(_end)),
- ULLONG_MAX);
+ memblock_remove(max_t(u64, memstart_addr + linear_region_size,
+ __pa_symbol(_end)), ULLONG_MAX);
if (memstart_addr + linear_region_size < memblock_end_of_DRAM()) {
/* ensure that memstart_addr remains sufficiently aligned */
memstart_addr = round_up(memblock_end_of_DRAM() - linear_region_size,
@@ -212,7 +213,7 @@ void __init arm64_memblock_init(void)
*/
bootloader_memory_limit = memblock_end_of_DRAM();
memblock_enforce_memory_limit(memory_limit);
- memblock_add(__pa(_text), (u64)(_end - _text));
+ memblock_add(__pa_symbol(_text), (u64)(_end - _text));
}
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
@@ -236,7 +237,7 @@ void __init arm64_memblock_init(void)
* Register the kernel text, kernel data, initrd, and initial
* pagetables with memblock.
*/
- memblock_reserve(__pa(_text), _end - _text);
+ memblock_reserve(__pa_symbol(_text), _end - _text);
#ifdef CONFIG_BLK_DEV_INITRD
if (initrd_start) {
memblock_reserve(initrd_start, initrd_end - initrd_start);
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
index 757009daa9ed..03588d136f93 100644
--- a/arch/arm64/mm/kasan_init.c
+++ b/arch/arm64/mm/kasan_init.c
@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/memblock.h>
#include <linux/start_kernel.h>
+#include <linux/mm.h>
#include <asm/mmu_context.h>
#include <asm/kernel-pgtable.h>
@@ -26,6 +27,13 @@
static pgd_t tmp_pg_dir[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE);
+/*
+ * The p*d_populate functions call virt_to_phys implicitly so they can't be used
+ * directly on kernel symbols (bm_p*d). All the early functions are called too
+ * early to use lm_alias so __p*d_populate functions must be used to populate
+ * with the physical address from __pa_symbol.
+ */
+
static void __init kasan_early_pte_populate(pmd_t *pmd, unsigned long addr,
unsigned long end)
{
@@ -33,12 +41,13 @@ static void __init kasan_early_pte_populate(pmd_t *pmd, unsigned long addr,
unsigned long next;
if (pmd_none(*pmd))
- pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte);
+ __pmd_populate(pmd, __pa_symbol(kasan_zero_pte),
+ PMD_TYPE_TABLE);
pte = pte_offset_kimg(pmd, addr);
do {
next = addr + PAGE_SIZE;
- set_pte(pte, pfn_pte(virt_to_pfn(kasan_zero_page),
+ set_pte(pte, pfn_pte(sym_to_pfn(kasan_zero_page),
PAGE_KERNEL));
} while (pte++, addr = next, addr != end && pte_none(*pte));
}
@@ -51,7 +60,8 @@ static void __init kasan_early_pmd_populate(pud_t *pud,
unsigned long next;
if (pud_none(*pud))
- pud_populate(&init_mm, pud, kasan_zero_pmd);
+ __pud_populate(pud, __pa_symbol(kasan_zero_pmd),
+ PMD_TYPE_TABLE);
pmd = pmd_offset_kimg(pud, addr);
do {
@@ -68,7 +78,8 @@ static void __init kasan_early_pud_populate(pgd_t *pgd,
unsigned long next;
if (pgd_none(*pgd))
- pgd_populate(&init_mm, pgd, kasan_zero_pud);
+ __pgd_populate(pgd, __pa_symbol(kasan_zero_pud),
+ PUD_TYPE_TABLE);
pud = pud_offset_kimg(pgd, addr);
do {
@@ -148,7 +159,7 @@ void __init kasan_init(void)
*/
memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir));
dsb(ishst);
- cpu_replace_ttbr1(tmp_pg_dir);
+ cpu_replace_ttbr1(lm_alias(tmp_pg_dir));
clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
@@ -199,10 +210,10 @@ void __init kasan_init(void)
*/
for (i = 0; i < PTRS_PER_PTE; i++)
set_pte(&kasan_zero_pte[i],
- pfn_pte(virt_to_pfn(kasan_zero_page), PAGE_KERNEL_RO));
+ pfn_pte(sym_to_pfn(kasan_zero_page), PAGE_KERNEL_RO));
memset(kasan_zero_page, 0, PAGE_SIZE);
- cpu_replace_ttbr1(swapper_pg_dir);
+ cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
/* At this point kasan is fully initialized. Enable error messages */
init_task.kasan_depth = 0;
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 8c063d39bc17..b1411e933bb3 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -31,6 +31,7 @@
#include <linux/stop_machine.h>
#include <linux/dma-contiguous.h>
#include <linux/cma.h>
+#include <linux/mm.h>
#include <asm/barrier.h>
#include <asm/cputype.h>
@@ -391,8 +392,8 @@ static void create_mapping_late(phys_addr_t phys, unsigned long virt,
static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end)
{
- unsigned long kernel_start = __pa(_text);
- unsigned long kernel_end = __pa(__init_begin);
+ unsigned long kernel_start = __pa_symbol(_text);
+ unsigned long kernel_end = __pa_symbol(__init_begin);
/*
* Take care not to create a writable alias for the
@@ -456,14 +457,15 @@ void mark_rodata_ro(void)
unsigned long section_size;
section_size = (unsigned long)_etext - (unsigned long)_text;
- create_mapping_late(__pa(_text), (unsigned long)_text,
+ create_mapping_late(__pa_symbol(_text), (unsigned long)_text,
section_size, PAGE_KERNEL_ROX);
/*
* mark .rodata as read only. Use __init_begin rather than __end_rodata
* to cover NOTES and EXCEPTION_TABLE.
*/
section_size = (unsigned long)__init_begin - (unsigned long)__start_rodata;
- create_mapping_late(__pa(__start_rodata), (unsigned long)__start_rodata,
+ create_mapping_late(__pa_symbol(__start_rodata),
+ (unsigned long)__start_rodata,
section_size, PAGE_KERNEL_RO);
}
@@ -480,7 +482,7 @@ void fixup_init(void)
static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end,
pgprot_t prot, struct vm_struct *vma)
{
- phys_addr_t pa_start = __pa(va_start);
+ phys_addr_t pa_start = __pa_symbol(va_start);
unsigned long size = va_end - va_start;
BUG_ON(!PAGE_ALIGNED(pa_start));
@@ -528,7 +530,7 @@ static void __init map_kernel(pgd_t *pgd)
*/
BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
set_pud(pud_set_fixmap_offset(pgd, FIXADDR_START),
- __pud(__pa(bm_pmd) | PUD_TYPE_TABLE));
+ __pud(__pa_symbol(bm_pmd) | PUD_TYPE_TABLE));
pud_clear_fixmap();
} else {
BUG();
@@ -590,7 +592,7 @@ void __init paging_init(void)
*/
cpu_replace_ttbr1(__va(pgd_phys));
memcpy(swapper_pg_dir, pgd, PAGE_SIZE);
- cpu_replace_ttbr1(swapper_pg_dir);
+ cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
pgd_clear_fixmap();
memblock_free(pgd_phys, PAGE_SIZE);
@@ -599,7 +601,7 @@ void __init paging_init(void)
* We only reuse the PGD from the swapper_pg_dir, not the pud + pmd
* allocated with it.
*/
- memblock_free(__pa(swapper_pg_dir) + PAGE_SIZE,
+ memblock_free(__pa_symbol(swapper_pg_dir) + PAGE_SIZE,
SWAPPER_DIR_SIZE - PAGE_SIZE);
bootmem_init();
@@ -1141,6 +1143,12 @@ static inline pte_t * fixmap_pte(unsigned long addr)
return &bm_pte[pte_index(addr)];
}
+/*
+ * The p*d_populate functions call virt_to_phys implicitly so they can't be used
+ * directly on kernel symbols (bm_p*d). This function is called too early to use
+ * lm_alias so __p*d_populate functions must be used to populate with the
+ * physical address from __pa_symbol.
+ */
void __init early_fixmap_init(void)
{
pgd_t *pgd;
@@ -1150,7 +1158,7 @@ void __init early_fixmap_init(void)
pgd = pgd_offset_k(addr);
if (CONFIG_PGTABLE_LEVELS > 3 &&
- !(pgd_none(*pgd) || pgd_page_paddr(*pgd) == __pa(bm_pud))) {
+ !(pgd_none(*pgd) || pgd_page_paddr(*pgd) == __pa_symbol(bm_pud))) {
/*
* We only end up here if the kernel mapping and the fixmap
* share the top level pgd entry, which should only happen on
@@ -1159,12 +1167,15 @@ void __init early_fixmap_init(void)
BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
pud = pud_offset_kimg(pgd, addr);
} else {
- pgd_populate(&init_mm, pgd, bm_pud);
+ if (pgd_none(*pgd))
+ __pgd_populate(pgd, __pa_symbol(bm_pud),
+ PUD_TYPE_TABLE);
pud = fixmap_pud(addr);
}
- pud_populate(&init_mm, pud, bm_pmd);
+ if (pud_none(*pud))
+ __pud_populate(pud, __pa_symbol(bm_pmd), PMD_TYPE_TABLE);
pmd = fixmap_pmd(addr);
- pmd_populate_kernel(&init_mm, pmd, bm_pte);
+ __pmd_populate(pmd, __pa_symbol(bm_pte), PMD_TYPE_TABLE);
/*
* The boot-ioremap range spans multiple pmds, for which
diff --git a/arch/mips/ar7/platform.c b/arch/mips/ar7/platform.c
index 58fca9ad5fcc..3446b6fb3acb 100644
--- a/arch/mips/ar7/platform.c
+++ b/arch/mips/ar7/platform.c
@@ -576,6 +576,7 @@ static int __init ar7_register_uarts(void)
uart_port.type = PORT_AR7;
uart_port.uartclk = clk_get_rate(bus_clk) / 2;
uart_port.iotype = UPIO_MEM32;
+ uart_port.flags = UPF_FIXED_TYPE;
uart_port.regshift = 2;
uart_port.line = 0;
@@ -654,6 +655,10 @@ static int __init ar7_register_devices(void)
u32 val;
int res;
+ res = ar7_gpio_init();
+ if (res)
+ pr_warn("unable to register gpios: %d\n", res);
+
res = ar7_register_uarts();
if (res)
pr_err("unable to setup uart(s): %d\n", res);
diff --git a/arch/mips/ar7/prom.c b/arch/mips/ar7/prom.c
index a23adc49d50f..36aabee9cba4 100644
--- a/arch/mips/ar7/prom.c
+++ b/arch/mips/ar7/prom.c
@@ -246,8 +246,6 @@ void __init prom_init(void)
ar7_init_cmdline(fw_arg0, (char **)fw_arg1);
ar7_init_env((struct env_var *)fw_arg2);
console_config();
-
- ar7_gpio_init();
}
#define PORT(offset) (KSEG1ADDR(AR7_REGS_UART0 + (offset * 4)))
diff --git a/arch/mips/bcm47xx/leds.c b/arch/mips/bcm47xx/leds.c
index d20ae63eb3c2..46abe9e4e0e0 100644
--- a/arch/mips/bcm47xx/leds.c
+++ b/arch/mips/bcm47xx/leds.c
@@ -330,7 +330,7 @@ bcm47xx_leds_linksys_wrt54g3gv2[] __initconst = {
/* Verified on: WRT54GS V1.0 */
static const struct gpio_led
bcm47xx_leds_linksys_wrt54g_type_0101[] __initconst = {
- BCM47XX_GPIO_LED(0, "green", "wlan", 0, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(0, "green", "wlan", 1, LEDS_GPIO_DEFSTATE_OFF),
BCM47XX_GPIO_LED(1, "green", "power", 0, LEDS_GPIO_DEFSTATE_ON),
BCM47XX_GPIO_LED(7, "green", "dmz", 1, LEDS_GPIO_DEFSTATE_OFF),
};
diff --git a/arch/mips/include/asm/asm.h b/arch/mips/include/asm/asm.h
index 7c26b28bf252..859cf7048347 100644
--- a/arch/mips/include/asm/asm.h
+++ b/arch/mips/include/asm/asm.h
@@ -54,7 +54,8 @@
.align 2; \
.type symbol, @function; \
.ent symbol, 0; \
-symbol: .frame sp, 0, ra
+symbol: .frame sp, 0, ra; \
+ .insn
/*
* NESTED - declare nested routine entry point
@@ -63,8 +64,9 @@ symbol: .frame sp, 0, ra
.globl symbol; \
.align 2; \
.type symbol, @function; \
- .ent symbol, 0; \
-symbol: .frame sp, framesize, rpc
+ .ent symbol, 0; \
+symbol: .frame sp, framesize, rpc; \
+ .insn
/*
* END - mark end of function
@@ -86,7 +88,7 @@ symbol:
#define FEXPORT(symbol) \
.globl symbol; \
.type symbol, @function; \
-symbol:
+symbol: .insn
/*
* ABS - export absolute symbol
diff --git a/arch/mips/include/asm/mips-cm.h b/arch/mips/include/asm/mips-cm.h
index 6516e9da5133..b836ddec82b7 100644
--- a/arch/mips/include/asm/mips-cm.h
+++ b/arch/mips/include/asm/mips-cm.h
@@ -238,8 +238,8 @@ BUILD_CM_Cx_R_(tcid_8_priority, 0x80)
#define CM_GCR_BASE_GCRBASE_MSK (_ULCAST_(0x1ffff) << 15)
#define CM_GCR_BASE_CMDEFTGT_SHF 0
#define CM_GCR_BASE_CMDEFTGT_MSK (_ULCAST_(0x3) << 0)
-#define CM_GCR_BASE_CMDEFTGT_DISABLED 0
-#define CM_GCR_BASE_CMDEFTGT_MEM 1
+#define CM_GCR_BASE_CMDEFTGT_MEM 0
+#define CM_GCR_BASE_CMDEFTGT_RESERVED 1
#define CM_GCR_BASE_CMDEFTGT_IOCU0 2
#define CM_GCR_BASE_CMDEFTGT_IOCU1 3
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 477ba026c3e5..163b3449a8de 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -49,9 +49,7 @@
#ifdef CONFIG_HOTPLUG_CPU
void arch_cpu_idle_dead(void)
{
- /* What the heck is this check doing ? */
- if (!cpumask_test_cpu(smp_processor_id(), &cpu_callin_map))
- play_dead();
+ play_dead();
}
#endif
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 24c115a0721a..a3f38e6b7ea1 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -650,6 +650,19 @@ static const struct user_regset_view user_mips64_view = {
.n = ARRAY_SIZE(mips64_regsets),
};
+#ifdef CONFIG_MIPS32_N32
+
+static const struct user_regset_view user_mipsn32_view = {
+ .name = "mipsn32",
+ .e_flags = EF_MIPS_ABI2,
+ .e_machine = ELF_ARCH,
+ .ei_osabi = ELF_OSABI,
+ .regsets = mips64_regsets,
+ .n = ARRAY_SIZE(mips64_regsets),
+};
+
+#endif /* CONFIG_MIPS32_N32 */
+
#endif /* CONFIG_64BIT */
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
@@ -661,6 +674,10 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
if (test_tsk_thread_flag(task, TIF_32BIT_REGS))
return &user_mips_view;
#endif
+#ifdef CONFIG_MIPS32_N32
+ if (test_tsk_thread_flag(task, TIF_32BIT_ADDR))
+ return &user_mipsn32_view;
+#endif
return &user_mips64_view;
#endif
}
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 8acae316f26b..4f9f1ae49213 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -152,6 +152,35 @@ void __init detect_memory_region(phys_addr_t start, phys_addr_t sz_min, phys_add
add_memory_region(start, size, BOOT_MEM_RAM);
}
+bool __init memory_region_available(phys_addr_t start, phys_addr_t size)
+{
+ int i;
+ bool in_ram = false, free = true;
+
+ for (i = 0; i < boot_mem_map.nr_map; i++) {
+ phys_addr_t start_, end_;
+
+ start_ = boot_mem_map.map[i].addr;
+ end_ = boot_mem_map.map[i].addr + boot_mem_map.map[i].size;
+
+ switch (boot_mem_map.map[i].type) {
+ case BOOT_MEM_RAM:
+ if (start >= start_ && start + size <= end_)
+ in_ram = true;
+ break;
+ case BOOT_MEM_RESERVED:
+ if ((start >= start_ && start < end_) ||
+ (start < start_ && start + size >= start_))
+ free = false;
+ break;
+ default:
+ continue;
+ }
+ }
+
+ return in_ram && free;
+}
+
static void __init print_memory_map(void)
{
int i;
@@ -300,11 +329,19 @@ static void __init bootmem_init(void)
#else /* !CONFIG_SGI_IP27 */
+static unsigned long __init bootmap_bytes(unsigned long pages)
+{
+ unsigned long bytes = DIV_ROUND_UP(pages, 8);
+
+ return ALIGN(bytes, sizeof(long));
+}
+
static void __init bootmem_init(void)
{
unsigned long reserved_end;
unsigned long mapstart = ~0UL;
unsigned long bootmap_size;
+ bool bootmap_valid = false;
int i;
/*
@@ -385,11 +422,42 @@ static void __init bootmem_init(void)
#endif
/*
- * Initialize the boot-time allocator with low memory only.
+ * check that mapstart doesn't overlap with any of
+ * memory regions that have been reserved through eg. DTB
*/
- bootmap_size = init_bootmem_node(NODE_DATA(0), mapstart,
- min_low_pfn, max_low_pfn);
+ bootmap_size = bootmap_bytes(max_low_pfn - min_low_pfn);
+
+ bootmap_valid = memory_region_available(PFN_PHYS(mapstart),
+ bootmap_size);
+ for (i = 0; i < boot_mem_map.nr_map && !bootmap_valid; i++) {
+ unsigned long mapstart_addr;
+
+ switch (boot_mem_map.map[i].type) {
+ case BOOT_MEM_RESERVED:
+ mapstart_addr = PFN_ALIGN(boot_mem_map.map[i].addr +
+ boot_mem_map.map[i].size);
+ if (PHYS_PFN(mapstart_addr) < mapstart)
+ break;
+
+ bootmap_valid = memory_region_available(mapstart_addr,
+ bootmap_size);
+ if (bootmap_valid)
+ mapstart = PHYS_PFN(mapstart_addr);
+ break;
+ default:
+ break;
+ }
+ }
+ if (!bootmap_valid)
+ panic("No memory area to place a bootmap bitmap");
+
+ /*
+ * Initialize the boot-time allocator with low memory only.
+ */
+ if (bootmap_size != init_bootmem_node(NODE_DATA(0), mapstart,
+ min_low_pfn, max_low_pfn))
+ panic("Unexpected memory size required for bootmap");
for (i = 0; i < boot_mem_map.nr_map; i++) {
unsigned long start, end;
@@ -438,6 +506,10 @@ static void __init bootmem_init(void)
continue;
default:
/* Not usable memory */
+ if (start > min_low_pfn && end < max_low_pfn)
+ reserve_bootmem(boot_mem_map.map[i].addr,
+ boot_mem_map.map[i].size,
+ BOOTMEM_DEFAULT);
continue;
}
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 7fef02a9eb85..4af08c197177 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -64,6 +64,9 @@ EXPORT_SYMBOL(cpu_sibling_map);
cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(cpu_core_map);
+static DECLARE_COMPLETION(cpu_starting);
+static DECLARE_COMPLETION(cpu_running);
+
/*
* A logcal cpu mask containing only one VPE per core to
* reduce the number of IPIs on large MT systems.
@@ -174,9 +177,12 @@ asmlinkage void start_secondary(void)
cpumask_set_cpu(cpu, &cpu_coherent_mask);
notify_cpu_starting(cpu);
- cpumask_set_cpu(cpu, &cpu_callin_map);
+ /* Notify boot CPU that we're starting & ready to sync counters */
+ complete(&cpu_starting);
+
synchronise_count_slave(cpu);
+ /* The CPU is running and counters synchronised, now mark it online */
set_cpu_online(cpu, true);
set_cpu_sibling_map(cpu);
@@ -185,6 +191,12 @@ asmlinkage void start_secondary(void)
calculate_cpu_foreign_map();
/*
+ * Notify boot CPU that we're up & online and it can safely return
+ * from __cpu_up
+ */
+ complete(&cpu_running);
+
+ /*
* irq will be enabled in ->smp_finish(), enabling it too early
* is dangerous.
*/
@@ -242,22 +254,23 @@ void smp_prepare_boot_cpu(void)
{
set_cpu_possible(0, true);
set_cpu_online(0, true);
- cpumask_set_cpu(0, &cpu_callin_map);
}
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
mp_ops->boot_secondary(cpu, tidle);
- /*
- * Trust is futile. We should really have timeouts ...
- */
- while (!cpumask_test_cpu(cpu, &cpu_callin_map)) {
- udelay(100);
- schedule();
+ /* Wait for CPU to start and be ready to sync counters */
+ if (!wait_for_completion_timeout(&cpu_starting,
+ msecs_to_jiffies(1000))) {
+ pr_crit("CPU%u: failed to start\n", cpu);
+ return -EIO;
}
synchronise_count_master(cpu);
+
+ /* Wait for CPU to finish startup & mark itself online before return */
+ wait_for_completion(&cpu_running);
return 0;
}
diff --git a/arch/mips/mm/uasm-micromips.c b/arch/mips/mm/uasm-micromips.c
index d78178daea4b..e2fe48dd67b5 100644
--- a/arch/mips/mm/uasm-micromips.c
+++ b/arch/mips/mm/uasm-micromips.c
@@ -75,7 +75,7 @@ static struct insn insn_table_MM[] = {
{ insn_jr, M(mm_pool32a_op, 0, 0, 0, mm_jalr_op, mm_pool32axf_op), RS },
{ insn_lb, M(mm_lb32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
{ insn_ld, 0, 0 },
- { insn_lh, M(mm_lh32_op, 0, 0, 0, 0, 0), RS | RS | SIMM },
+ { insn_lh, M(mm_lh32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
{ insn_ll, M(mm_pool32c_op, 0, 0, (mm_ll_func << 1), 0, 0), RS | RT | SIMM },
{ insn_lld, 0, 0 },
{ insn_lui, M(mm_pool32i_op, mm_lui_op, 0, 0, 0, 0), RS | SIMM },
diff --git a/arch/mips/netlogic/common/irq.c b/arch/mips/netlogic/common/irq.c
index 3660dc67d544..f4961bc9a61d 100644
--- a/arch/mips/netlogic/common/irq.c
+++ b/arch/mips/netlogic/common/irq.c
@@ -275,7 +275,7 @@ asmlinkage void plat_irq_dispatch(void)
do_IRQ(nlm_irq_to_xirq(node, i));
}
-#ifdef CONFIG_OF
+#ifdef CONFIG_CPU_XLP
static const struct irq_domain_ops xlp_pic_irq_domain_ops = {
.xlate = irq_domain_xlate_onetwocell,
};
@@ -348,7 +348,7 @@ void __init arch_init_irq(void)
#if defined(CONFIG_CPU_XLR)
nlm_setup_fmn_irq();
#endif
-#if defined(CONFIG_OF)
+#ifdef CONFIG_CPU_XLP
of_irq_init(xlp_pic_irq_ids);
#endif
}
diff --git a/arch/mips/ralink/mt7620.c b/arch/mips/ralink/mt7620.c
index 48d6349fd9d7..c5f45fc96c74 100644
--- a/arch/mips/ralink/mt7620.c
+++ b/arch/mips/ralink/mt7620.c
@@ -141,8 +141,8 @@ static struct rt2880_pmx_func i2c_grp_mt7628[] = {
FUNC("i2c", 0, 4, 2),
};
-static struct rt2880_pmx_func refclk_grp_mt7628[] = { FUNC("reclk", 0, 36, 1) };
-static struct rt2880_pmx_func perst_grp_mt7628[] = { FUNC("perst", 0, 37, 1) };
+static struct rt2880_pmx_func refclk_grp_mt7628[] = { FUNC("refclk", 0, 37, 1) };
+static struct rt2880_pmx_func perst_grp_mt7628[] = { FUNC("perst", 0, 36, 1) };
static struct rt2880_pmx_func wdt_grp_mt7628[] = { FUNC("wdt", 0, 38, 1) };
static struct rt2880_pmx_func spi_grp_mt7628[] = { FUNC("spi", 0, 7, 4) };
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index c6b855f7892c..9f22195b90ed 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -688,15 +688,15 @@ cas_action:
/* ELF32 Process entry path */
lws_compare_and_swap_2:
#ifdef CONFIG_64BIT
- /* Clip the input registers */
+ /* Clip the input registers. We don't need to clip %r23 as we
+ only use it for word operations */
depdi 0, 31, 32, %r26
depdi 0, 31, 32, %r25
depdi 0, 31, 32, %r24
- depdi 0, 31, 32, %r23
#endif
/* Check the validity of the size pointer */
- subi,>>= 4, %r23, %r0
+ subi,>>= 3, %r23, %r0
b,n lws_exit_nosys
/* Jump to the functions which will load the old and new values into
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index ec7b8f1e4822..c628f47a9052 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -1083,11 +1083,6 @@ source "arch/powerpc/Kconfig.debug"
source "security/Kconfig"
-config KEYS_COMPAT
- bool
- depends on COMPAT && KEYS
- default y
-
source "crypto/Kconfig"
config PPC_LIB_RHEAP
diff --git a/arch/powerpc/boot/dts/fsl/kmcoge4.dts b/arch/powerpc/boot/dts/fsl/kmcoge4.dts
index 6858ec9ef295..1a953d9edf1e 100644
--- a/arch/powerpc/boot/dts/fsl/kmcoge4.dts
+++ b/arch/powerpc/boot/dts/fsl/kmcoge4.dts
@@ -83,6 +83,10 @@
};
};
+ sdhc@114000 {
+ status = "disabled";
+ };
+
i2c@119000 {
status = "disabled";
};
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
index cf8c7e4e0b21..984a54c85952 100644
--- a/arch/powerpc/kernel/signal.c
+++ b/arch/powerpc/kernel/signal.c
@@ -102,7 +102,7 @@ static void check_syscall_restart(struct pt_regs *regs, struct k_sigaction *ka,
static void do_signal(struct pt_regs *regs)
{
sigset_t *oldset = sigmask_to_save();
- struct ksignal ksig;
+ struct ksignal ksig = { .sig = 0 };
int ret;
int is32 = is_32bit_task();
diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c
index 24f58076d49e..1d2bc84338bf 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_xics.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c
@@ -280,6 +280,7 @@ static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
*/
if (reject && reject != XICS_IPI) {
arch_spin_unlock(&ics->lock);
+ icp->n_reject++;
new_irq = reject;
goto again;
}
@@ -611,10 +612,8 @@ int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
state = &ics->irq_state[src];
/* Still asserted, resend it */
- if (state->asserted) {
- icp->n_reject++;
+ if (state->asserted)
icp_rm_deliver_irq(xics, icp, irq);
- }
if (!hlist_empty(&vcpu->kvm->irq_ack_notifier_list)) {
icp->rm_action |= XICS_RM_NOTIFY_EOI;
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 60530fd93d6d..9510ddfff59b 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -347,9 +347,6 @@ config COMPAT
config SYSVIPC_COMPAT
def_bool y if COMPAT && SYSVIPC
-config KEYS_COMPAT
- def_bool y if COMPAT && KEYS
-
config SMP
def_bool y
prompt "Symmetric multi-processing support"
diff --git a/arch/s390/include/asm/asm-prototypes.h b/arch/s390/include/asm/asm-prototypes.h
new file mode 100644
index 000000000000..2c3413b0ca52
--- /dev/null
+++ b/arch/s390/include/asm/asm-prototypes.h
@@ -0,0 +1,8 @@
+#ifndef _ASM_S390_PROTOTYPES_H
+
+#include <linux/kvm_host.h>
+#include <linux/ftrace.h>
+#include <asm/fpu/api.h>
+#include <asm-generic/asm-prototypes.h>
+
+#endif /* _ASM_S390_PROTOTYPES_H */
diff --git a/arch/s390/include/asm/pci_insn.h b/arch/s390/include/asm/pci_insn.h
index 649eb62c52b3..9e02cb7955c1 100644
--- a/arch/s390/include/asm/pci_insn.h
+++ b/arch/s390/include/asm/pci_insn.h
@@ -81,6 +81,6 @@ int zpci_refresh_trans(u64 fn, u64 addr, u64 range);
int zpci_load(u64 *data, u64 req, u64 offset);
int zpci_store(u64 data, u64 req, u64 offset);
int zpci_store_block(const u64 *data, u64 req, u64 offset);
-void zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc);
+int zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc);
#endif
diff --git a/arch/s390/include/asm/runtime_instr.h b/arch/s390/include/asm/runtime_instr.h
index 402ad6df4897..c54a9310d814 100644
--- a/arch/s390/include/asm/runtime_instr.h
+++ b/arch/s390/include/asm/runtime_instr.h
@@ -85,6 +85,8 @@ static inline void restore_ri_cb(struct runtime_instr_cb *cb_next,
load_runtime_instr_cb(&runtime_instr_empty_cb);
}
-void exit_thread_runtime_instr(void);
+struct task_struct;
+
+void runtime_instr_release(struct task_struct *tsk);
#endif /* _RUNTIME_INSTR_H */
diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h
index 12d45f0cfdd9..dde6b52359c5 100644
--- a/arch/s390/include/asm/switch_to.h
+++ b/arch/s390/include/asm/switch_to.h
@@ -34,8 +34,8 @@ static inline void restore_access_regs(unsigned int *acrs)
save_access_regs(&prev->thread.acrs[0]); \
save_ri_cb(prev->thread.ri_cb); \
} \
+ update_cr_regs(next); \
if (next->mm) { \
- update_cr_regs(next); \
set_cpu_flag(CIF_FPU); \
restore_access_regs(&next->thread.acrs[0]); \
restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index 6e72961608f0..07477ba392b7 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -1549,6 +1549,7 @@ static struct s390_insn opcode_e7[] = {
{ "vfsq", 0xce, INSTR_VRR_VV000MM },
{ "vfs", 0xe2, INSTR_VRR_VVV00MM },
{ "vftci", 0x4a, INSTR_VRI_VVIMM },
+ { "", 0, INSTR_INVALID }
};
static struct s390_insn opcode_eb[] = {
@@ -1961,7 +1962,7 @@ void show_code(struct pt_regs *regs)
{
char *mode = user_mode(regs) ? "User" : "Krnl";
unsigned char code[64];
- char buffer[64], *ptr;
+ char buffer[128], *ptr;
mm_segment_t old_fs;
unsigned long addr;
int start, end, opsize, hops, i;
@@ -2024,7 +2025,7 @@ void show_code(struct pt_regs *regs)
start += opsize;
printk(buffer);
ptr = buffer;
- ptr += sprintf(ptr, "\n ");
+ ptr += sprintf(ptr, "\n\t ");
hops++;
}
printk("\n");
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 3c31609df959..ee7b8e7ca4f8 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -325,8 +325,10 @@ static __init void detect_machine_facilities(void)
S390_lowcore.machine_flags |= MACHINE_FLAG_IDTE;
if (test_facility(40))
S390_lowcore.machine_flags |= MACHINE_FLAG_LPP;
- if (test_facility(50) && test_facility(73))
+ if (test_facility(50) && test_facility(73)) {
S390_lowcore.machine_flags |= MACHINE_FLAG_TE;
+ __ctl_set_bit(0, 55);
+ }
if (test_facility(51))
S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC;
if (test_facility(129)) {
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 114ee8b96f17..7bc4e4c5d5b8 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -72,7 +72,6 @@ extern void kernel_thread_starter(void);
*/
void exit_thread(void)
{
- exit_thread_runtime_instr();
}
void flush_thread(void)
@@ -87,6 +86,7 @@ void arch_release_task_struct(struct task_struct *tsk)
{
/* Free either the floating-point or the vector register save area */
kfree(tsk->thread.fpu.regs);
+ runtime_instr_release(tsk);
}
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
@@ -137,6 +137,7 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
memset(&p->thread.per_user, 0, sizeof(p->thread.per_user));
memset(&p->thread.per_event, 0, sizeof(p->thread.per_event));
clear_tsk_thread_flag(p, TIF_SINGLE_STEP);
+ p->thread.per_flags = 0;
/* Initialize per thread user and system timer values */
ti = task_thread_info(p);
ti->user_timer = 0;
diff --git a/arch/s390/kernel/runtime_instr.c b/arch/s390/kernel/runtime_instr.c
index fffa0e5462af..fd03a7569e10 100644
--- a/arch/s390/kernel/runtime_instr.c
+++ b/arch/s390/kernel/runtime_instr.c
@@ -18,11 +18,24 @@
/* empty control block to disable RI by loading it */
struct runtime_instr_cb runtime_instr_empty_cb;
+void runtime_instr_release(struct task_struct *tsk)
+{
+ kfree(tsk->thread.ri_cb);
+}
+
static void disable_runtime_instr(void)
{
- struct pt_regs *regs = task_pt_regs(current);
+ struct task_struct *task = current;
+ struct pt_regs *regs;
+ if (!task->thread.ri_cb)
+ return;
+ regs = task_pt_regs(task);
+ preempt_disable();
load_runtime_instr_cb(&runtime_instr_empty_cb);
+ kfree(task->thread.ri_cb);
+ task->thread.ri_cb = NULL;
+ preempt_enable();
/*
* Make sure the RI bit is deleted from the PSW. If the user did not
@@ -43,17 +56,6 @@ static void init_runtime_instr_cb(struct runtime_instr_cb *cb)
cb->valid = 1;
}
-void exit_thread_runtime_instr(void)
-{
- struct task_struct *task = current;
-
- if (!task->thread.ri_cb)
- return;
- disable_runtime_instr();
- kfree(task->thread.ri_cb);
- task->thread.ri_cb = NULL;
-}
-
SYSCALL_DEFINE1(s390_runtime_instr, int, command)
{
struct runtime_instr_cb *cb;
@@ -62,9 +64,7 @@ SYSCALL_DEFINE1(s390_runtime_instr, int, command)
return -EOPNOTSUPP;
if (command == S390_RUNTIME_INSTR_STOP) {
- preempt_disable();
- exit_thread_runtime_instr();
- preempt_enable();
+ disable_runtime_instr();
return 0;
}
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index f2f6720a3331..ef0499b76c50 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -359,7 +359,8 @@ static void zpci_irq_handler(struct airq_struct *airq)
/* End of second scan with interrupts on. */
break;
/* First scan complete, reenable interrupts. */
- zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
+ if (zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC))
+ break;
si = 0;
continue;
}
@@ -921,7 +922,7 @@ static int __init pci_base_init(void)
if (!s390_pci_probe)
return 0;
- if (!test_facility(69) || !test_facility(71) || !test_facility(72))
+ if (!test_facility(69) || !test_facility(71))
return 0;
rc = zpci_debug_init();
diff --git a/arch/s390/pci/pci_insn.c b/arch/s390/pci/pci_insn.c
index 10ca15dcab11..bc065392f7ab 100644
--- a/arch/s390/pci/pci_insn.c
+++ b/arch/s390/pci/pci_insn.c
@@ -7,6 +7,7 @@
#include <linux/export.h>
#include <linux/errno.h>
#include <linux/delay.h>
+#include <asm/facility.h>
#include <asm/pci_insn.h>
#include <asm/pci_debug.h>
#include <asm/processor.h>
@@ -91,11 +92,14 @@ int zpci_refresh_trans(u64 fn, u64 addr, u64 range)
}
/* Set Interruption Controls */
-void zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc)
+int zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc)
{
+ if (!test_facility(72))
+ return -EIO;
asm volatile (
" .insn rsy,0xeb00000000d1,%[ctl],%[isc],%[u]\n"
: : [ctl] "d" (ctl), [isc] "d" (isc << 27), [u] "Q" (*unused));
+ return 0;
}
/* PCI Load */
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh770x.c b/arch/sh/kernel/cpu/sh3/setup-sh770x.c
index 538c10db3537..8dc315b212c2 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh770x.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh770x.c
@@ -165,7 +165,6 @@ static struct plat_sci_port scif2_platform_data = {
.scscr = SCSCR_TE | SCSCR_RE,
.type = PORT_IRDA,
.ops = &sh770x_sci_port_ops,
- .regshift = 1,
};
static struct resource scif2_resources[] = {
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 894bcaed002e..1cf6a15102d8 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -550,9 +550,6 @@ config SYSVIPC_COMPAT
depends on COMPAT && SYSVIPC
default y
-config KEYS_COMPAT
- def_bool y if COMPAT && KEYS
-
endmenu
source "net/Kconfig"
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 8dc3b07ee3cc..f37e01e6b7f2 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -2657,10 +2657,6 @@ config COMPAT_FOR_U64_ALIGNMENT
config SYSVIPC_COMPAT
def_bool y
depends on SYSVIPC
-
-config KEYS_COMPAT
- def_bool y
- depends on KEYS
endif
endmenu
diff --git a/arch/x86/crypto/sha-mb/sha1_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha-mb/sha1_mb_mgr_flush_avx2.S
index 85c4e1cf7172..e1693457c178 100644
--- a/arch/x86/crypto/sha-mb/sha1_mb_mgr_flush_avx2.S
+++ b/arch/x86/crypto/sha-mb/sha1_mb_mgr_flush_avx2.S
@@ -174,8 +174,8 @@ LABEL skip_ %I
.endr
# Find min length
- vmovdqa _lens+0*16(state), %xmm0
- vmovdqa _lens+1*16(state), %xmm1
+ vmovdqu _lens+0*16(state), %xmm0
+ vmovdqu _lens+1*16(state), %xmm1
vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
@@ -195,8 +195,8 @@ LABEL skip_ %I
vpsubd %xmm2, %xmm0, %xmm0
vpsubd %xmm2, %xmm1, %xmm1
- vmovdqa %xmm0, _lens+0*16(state)
- vmovdqa %xmm1, _lens+1*16(state)
+ vmovdqu %xmm0, _lens+0*16(state)
+ vmovdqu %xmm1, _lens+1*16(state)
# "state" and "args" are the same address, arg1
# len is arg2
@@ -260,8 +260,8 @@ ENTRY(sha1_mb_mgr_get_comp_job_avx2)
jc .return_null
# Find min length
- vmovdqa _lens(state), %xmm0
- vmovdqa _lens+1*16(state), %xmm1
+ vmovdqu _lens(state), %xmm0
+ vmovdqu _lens+1*16(state), %xmm1
vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index 08b1f2f6ea50..c9e6eab2075b 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -3,6 +3,7 @@
#include <asm/fpu/api.h>
#include <asm/pgtable.h>
+#include <asm/tlb.h>
/*
* We map the EFI regions needed for runtime services non-contiguously,
@@ -66,6 +67,17 @@ extern u64 asmlinkage efi_call(void *fp, ...);
#define efi_call_phys(f, args...) efi_call((f), args)
+/*
+ * Scratch space used for switching the pagetable in the EFI stub
+ */
+struct efi_scratch {
+ u64 r15;
+ u64 prev_cr3;
+ pgd_t *efi_pgt;
+ bool use_pgd;
+ u64 phys_stack;
+} __packed;
+
#define efi_call_virt(f, ...) \
({ \
efi_status_t __s; \
@@ -73,7 +85,20 @@ extern u64 asmlinkage efi_call(void *fp, ...);
efi_sync_low_kernel_mappings(); \
preempt_disable(); \
__kernel_fpu_begin(); \
+ \
+ if (efi_scratch.use_pgd) { \
+ efi_scratch.prev_cr3 = read_cr3(); \
+ write_cr3((unsigned long)efi_scratch.efi_pgt); \
+ __flush_tlb_all(); \
+ } \
+ \
__s = efi_call((void *)efi.systab->runtime->f, __VA_ARGS__); \
+ \
+ if (efi_scratch.use_pgd) { \
+ write_cr3(efi_scratch.prev_cr3); \
+ __flush_tlb_all(); \
+ } \
+ \
__kernel_fpu_end(); \
preempt_enable(); \
__s; \
@@ -113,6 +138,7 @@ extern void __init efi_memory_uc(u64 addr, unsigned long size);
extern void __init efi_map_region(efi_memory_desc_t *md);
extern void __init efi_map_region_fixed(efi_memory_desc_t *md);
extern void efi_sync_low_kernel_mappings(void);
+extern int __init efi_alloc_page_tables(void);
extern int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages);
extern void __init efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages);
extern void __init old_map_region(efi_memory_desc_t *md);
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index 19d14ac23ef9..fc3c7e49c8e4 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -296,6 +296,7 @@ struct x86_emulate_ctxt {
bool perm_ok; /* do not check permissions if true */
bool ud; /* inject an #UD if host doesn't support insn */
+ bool tf; /* TF value before instruction (after for syscall/sysret) */
bool have_exception;
struct x86_exception exception;
diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h
index 91dfcafe27a6..bad25bb80679 100644
--- a/arch/x86/include/asm/syscalls.h
+++ b/arch/x86/include/asm/syscalls.h
@@ -21,7 +21,7 @@ asmlinkage long sys_ioperm(unsigned long, unsigned long, int);
asmlinkage long sys_iopl(unsigned int);
/* kernel/ldt.c */
-asmlinkage int sys_modify_ldt(int, void __user *, unsigned long);
+asmlinkage long sys_modify_ldt(int, void __user *, unsigned long);
/* kernel/signal.c */
asmlinkage long sys_rt_sigreturn(void);
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 7402eb4b509d..6a07c05956a6 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -7,6 +7,7 @@
#include <linux/compiler.h>
#include <linux/thread_info.h>
#include <linux/string.h>
+#include <linux/preempt.h>
#include <asm/asm.h>
#include <asm/page.h>
#include <asm/smap.h>
@@ -66,6 +67,12 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un
__chk_range_not_ok((unsigned long __force)(addr), size, limit); \
})
+#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
+# define WARN_ON_IN_IRQ() WARN_ON_ONCE(!in_task())
+#else
+# define WARN_ON_IN_IRQ()
+#endif
+
/**
* access_ok: - Checks if a user space pointer is valid
* @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that
@@ -86,8 +93,11 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un
* checks that the pointer is in the user space range - after calling
* this function, memory access functions may still return -EFAULT.
*/
-#define access_ok(type, addr, size) \
- likely(!__range_not_ok(addr, size, user_addr_max()))
+#define access_ok(type, addr, size) \
+({ \
+ WARN_ON_IN_IRQ(); \
+ likely(!__range_not_ok(addr, size, user_addr_max())); \
+})
/*
* The exception table consists of pairs of addresses relative to the
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index ac8975a65280..abf581ade8d2 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -990,6 +990,18 @@ static int get_ucode_fw(void *to, const void *from, size_t n)
return 0;
}
+static bool is_blacklisted(unsigned int cpu)
+{
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
+
+ if (c->x86 == 6 && c->x86_model == 79) {
+ pr_err_once("late loading on model 79 is disabled.\n");
+ return true;
+ }
+
+ return false;
+}
+
static enum ucode_state request_microcode_fw(int cpu, struct device *device,
bool refresh_fw)
{
@@ -998,6 +1010,9 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
const struct firmware *firmware;
enum ucode_state ret;
+ if (is_blacklisted(cpu))
+ return UCODE_NFOUND;
+
sprintf(name, "intel-ucode/%02x-%02x-%02x",
c->x86, c->x86_model, c->x86_mask);
@@ -1022,6 +1037,9 @@ static int get_ucode_user(void *to, const void *from, size_t n)
static enum ucode_state
request_microcode_user(int cpu, const void __user *buf, size_t size)
{
+ if (is_blacklisted(cpu))
+ return UCODE_NFOUND;
+
return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
}
diff --git a/arch/x86/kernel/kprobes/ftrace.c b/arch/x86/kernel/kprobes/ftrace.c
index 5f8f0b3cc674..2c0b0b645a74 100644
--- a/arch/x86/kernel/kprobes/ftrace.c
+++ b/arch/x86/kernel/kprobes/ftrace.c
@@ -26,7 +26,7 @@
#include "common.h"
static nokprobe_inline
-int __skip_singlestep(struct kprobe *p, struct pt_regs *regs,
+void __skip_singlestep(struct kprobe *p, struct pt_regs *regs,
struct kprobe_ctlblk *kcb, unsigned long orig_ip)
{
/*
@@ -41,20 +41,21 @@ int __skip_singlestep(struct kprobe *p, struct pt_regs *regs,
__this_cpu_write(current_kprobe, NULL);
if (orig_ip)
regs->ip = orig_ip;
- return 1;
}
int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
struct kprobe_ctlblk *kcb)
{
- if (kprobe_ftrace(p))
- return __skip_singlestep(p, regs, kcb, 0);
- else
- return 0;
+ if (kprobe_ftrace(p)) {
+ __skip_singlestep(p, regs, kcb, 0);
+ preempt_enable_no_resched();
+ return 1;
+ }
+ return 0;
}
NOKPROBE_SYMBOL(skip_singlestep);
-/* Ftrace callback handler for kprobes */
+/* Ftrace callback handler for kprobes -- called under preepmt disabed */
void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *ops, struct pt_regs *regs)
{
@@ -77,13 +78,17 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
/* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */
regs->ip = ip + sizeof(kprobe_opcode_t);
+ /* To emulate trap based kprobes, preempt_disable here */
+ preempt_disable();
__this_cpu_write(current_kprobe, p);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
- if (!p->pre_handler || !p->pre_handler(p, regs))
+ if (!p->pre_handler || !p->pre_handler(p, regs)) {
__skip_singlestep(p, regs, kcb, orig_ip);
+ preempt_enable_no_resched();
+ }
/*
* If pre_handler returns !0, it sets regs->ip and
- * resets current kprobe.
+ * resets current kprobe, and keep preempt count +1.
*/
}
end:
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index 6acc9dd91f36..d6279593bcdd 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -12,6 +12,7 @@
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/smp.h>
+#include <linux/syscalls.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/uaccess.h>
@@ -271,8 +272,8 @@ out:
return error;
}
-asmlinkage int sys_modify_ldt(int func, void __user *ptr,
- unsigned long bytecount)
+SYSCALL_DEFINE3(modify_ldt, int , func , void __user * , ptr ,
+ unsigned long , bytecount)
{
int ret = -ENOSYS;
@@ -290,5 +291,14 @@ asmlinkage int sys_modify_ldt(int func, void __user *ptr,
ret = write_ldt(ptr, bytecount, 0);
break;
}
- return ret;
+ /*
+ * The SYSCALL_DEFINE() macros give us an 'unsigned long'
+ * return type, but tht ABI for sys_modify_ldt() expects
+ * 'int'. This cast gives us an int-sized value in %rax
+ * for the return code. The 'unsigned' is necessary so
+ * the compiler does not try to sign-extend the negative
+ * return codes into the high half of the register when
+ * taking the value from int->long.
+ */
+ return (unsigned int)ret;
}
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 04b2f3cad7ba..684edebb4a0c 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -2726,6 +2726,7 @@ static int em_syscall(struct x86_emulate_ctxt *ctxt)
ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
}
+ ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
return X86EMUL_CONTINUE;
}
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 899c40f826dd..4b1152e57340 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1696,6 +1696,8 @@ static int ud_interception(struct vcpu_svm *svm)
int er;
er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD);
+ if (er == EMULATE_USER_EXIT)
+ return 0;
if (er != EMULATE_DONE)
kvm_queue_exception(&svm->vcpu, UD_VECTOR);
return 1;
@@ -3114,6 +3116,13 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
u32 ecx = msr->index;
u64 data = msr->data;
switch (ecx) {
+ case MSR_IA32_CR_PAT:
+ if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
+ return 1;
+ vcpu->arch.pat = data;
+ svm->vmcb->save.g_pat = data;
+ mark_dirty(svm->vmcb, VMCB_NPT);
+ break;
case MSR_IA32_TSC:
kvm_write_tsc(vcpu, msr);
break;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 9114588e3e61..253a8c8207bb 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -5267,6 +5267,8 @@ static int handle_exception(struct kvm_vcpu *vcpu)
return 1;
}
er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD);
+ if (er == EMULATE_USER_EXIT)
+ return 0;
if (er != EMULATE_DONE)
kvm_queue_exception(vcpu, UD_VECTOR);
return 1;
@@ -10394,6 +10396,8 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip);
vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base);
vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base);
+ vmcs_write32(GUEST_IDTR_LIMIT, 0xFFFF);
+ vmcs_write32(GUEST_GDTR_LIMIT, 0xFFFF);
/* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1. */
if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 8e526c6fd784..df81717a92f3 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1812,6 +1812,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
*/
BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0);
+ if (guest_hv_clock.version & 1)
+ ++guest_hv_clock.version; /* first time write, random junk */
+
vcpu->hv_clock.version = guest_hv_clock.version + 1;
kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
&vcpu->hv_clock,
@@ -5095,6 +5098,8 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
ctxt->eflags = kvm_get_rflags(vcpu);
+ ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
+
ctxt->eip = kvm_rip_read(vcpu);
ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL :
(ctxt->eflags & X86_EFLAGS_VM) ? X86EMUL_MODE_VM86 :
@@ -5315,37 +5320,26 @@ static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7,
return dr6;
}
-static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, unsigned long rflags, int *r)
+static void kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu, int *r)
{
struct kvm_run *kvm_run = vcpu->run;
- /*
- * rflags is the old, "raw" value of the flags. The new value has
- * not been saved yet.
- *
- * This is correct even for TF set by the guest, because "the
- * processor will not generate this exception after the instruction
- * that sets the TF flag".
- */
- if (unlikely(rflags & X86_EFLAGS_TF)) {
- if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
- kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 |
- DR6_RTM;
- kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip;
- kvm_run->debug.arch.exception = DB_VECTOR;
- kvm_run->exit_reason = KVM_EXIT_DEBUG;
- *r = EMULATE_USER_EXIT;
- } else {
- vcpu->arch.emulate_ctxt.eflags &= ~X86_EFLAGS_TF;
- /*
- * "Certain debug exceptions may clear bit 0-3. The
- * remaining contents of the DR6 register are never
- * cleared by the processor".
- */
- vcpu->arch.dr6 &= ~15;
- vcpu->arch.dr6 |= DR6_BS | DR6_RTM;
- kvm_queue_exception(vcpu, DB_VECTOR);
- }
+ if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
+ kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 | DR6_RTM;
+ kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip;
+ kvm_run->debug.arch.exception = DB_VECTOR;
+ kvm_run->exit_reason = KVM_EXIT_DEBUG;
+ *r = EMULATE_USER_EXIT;
+ } else {
+ vcpu->arch.emulate_ctxt.eflags &= ~X86_EFLAGS_TF;
+ /*
+ * "Certain debug exceptions may clear bit 0-3. The
+ * remaining contents of the DR6 register are never
+ * cleared by the processor".
+ */
+ vcpu->arch.dr6 &= ~15;
+ vcpu->arch.dr6 |= DR6_BS | DR6_RTM;
+ kvm_queue_exception(vcpu, DB_VECTOR);
}
}
@@ -5435,6 +5429,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
if (reexecute_instruction(vcpu, cr2, write_fault_to_spt,
emulation_type))
return EMULATE_DONE;
+ if (ctxt->have_exception && inject_emulated_exception(vcpu))
+ return EMULATE_DONE;
if (emulation_type & EMULTYPE_SKIP)
return EMULATE_FAIL;
return handle_emulation_failure(vcpu);
@@ -5500,8 +5496,9 @@ restart:
toggle_interruptibility(vcpu, ctxt->interruptibility);
vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
kvm_rip_write(vcpu, ctxt->eip);
- if (r == EMULATE_DONE)
- kvm_vcpu_check_singlestep(vcpu, rflags, &r);
+ if (r == EMULATE_DONE &&
+ (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
+ kvm_vcpu_do_singlestep(vcpu, &r);
if (!ctxt->have_exception ||
exception_type(ctxt->exception.vector) == EXCPT_TRAP)
__kvm_set_rflags(vcpu, ctxt->eflags);
diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt
index d388de72eaca..ec039f2a0c13 100644
--- a/arch/x86/lib/x86-opcode-map.txt
+++ b/arch/x86/lib/x86-opcode-map.txt
@@ -833,7 +833,7 @@ EndTable
GrpTable: Grp3_1
0: TEST Eb,Ib
-1:
+1: TEST Eb,Ib
2: NOT Eb
3: NEG Eb
4: MUL AL,Eb
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 4540e8880cd9..1924bba0f3af 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -911,15 +911,10 @@ static void populate_pte(struct cpa_data *cpa,
pte = pte_offset_kernel(pmd, start);
while (num_pages-- && start < end) {
-
- /* deal with the NX bit */
- if (!(pgprot_val(pgprot) & _PAGE_NX))
- cpa->pfn &= ~_PAGE_NX;
-
- set_pte(pte, pfn_pte(cpa->pfn >> PAGE_SHIFT, pgprot));
+ set_pte(pte, pfn_pte(cpa->pfn, pgprot));
start += PAGE_SIZE;
- cpa->pfn += PAGE_SIZE;
+ cpa->pfn++;
pte++;
}
}
@@ -975,11 +970,11 @@ static int populate_pmd(struct cpa_data *cpa,
pmd = pmd_offset(pud, start);
- set_pmd(pmd, __pmd(cpa->pfn | _PAGE_PSE |
+ set_pmd(pmd, __pmd(cpa->pfn << PAGE_SHIFT | _PAGE_PSE |
massage_pgprot(pmd_pgprot)));
start += PMD_SIZE;
- cpa->pfn += PMD_SIZE;
+ cpa->pfn += PMD_SIZE >> PAGE_SHIFT;
cur_pages += PMD_SIZE >> PAGE_SHIFT;
}
@@ -1048,11 +1043,11 @@ static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
* Map everything starting from the Gb boundary, possibly with 1G pages
*/
while (end - start >= PUD_SIZE) {
- set_pud(pud, __pud(cpa->pfn | _PAGE_PSE |
+ set_pud(pud, __pud(cpa->pfn << PAGE_SHIFT | _PAGE_PSE |
massage_pgprot(pud_pgprot)));
start += PUD_SIZE;
- cpa->pfn += PUD_SIZE;
+ cpa->pfn += PUD_SIZE >> PAGE_SHIFT;
cur_pages += PUD_SIZE >> PAGE_SHIFT;
pud++;
}
diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
index d90528ea5412..12c051d19e4b 100644
--- a/arch/x86/oprofile/op_model_ppro.c
+++ b/arch/x86/oprofile/op_model_ppro.c
@@ -212,8 +212,8 @@ static void arch_perfmon_setup_counters(void)
eax.full = cpuid_eax(0xa);
/* Workaround for BIOS bugs in 6/15. Taken from perfmon2 */
- if (eax.split.version_id == 0 && __this_cpu_read(cpu_info.x86) == 6 &&
- __this_cpu_read(cpu_info.x86_model) == 15) {
+ if (eax.split.version_id == 0 && boot_cpu_data.x86 == 6 &&
+ boot_cpu_data.x86_model == 15) {
eax.split.version_id = 2;
eax.split.num_counters = 2;
eax.split.bit_width = 40;
diff --git a/arch/x86/platform/efi/efi-bgrt.c b/arch/x86/platform/efi/efi-bgrt.c
index ea48449b2e63..64fbc7e33226 100644
--- a/arch/x86/platform/efi/efi-bgrt.c
+++ b/arch/x86/platform/efi/efi-bgrt.c
@@ -28,8 +28,7 @@ struct bmp_header {
void __init efi_bgrt_init(void)
{
acpi_status status;
- void __iomem *image;
- bool ioremapped = false;
+ void *image;
struct bmp_header bmp_header;
if (acpi_disabled)
@@ -70,20 +69,14 @@ void __init efi_bgrt_init(void)
return;
}
- image = efi_lookup_mapped_addr(bgrt_tab->image_address);
+ image = memremap(bgrt_tab->image_address, sizeof(bmp_header), MEMREMAP_WB);
if (!image) {
- image = early_ioremap(bgrt_tab->image_address,
- sizeof(bmp_header));
- ioremapped = true;
- if (!image) {
- pr_err("Ignoring BGRT: failed to map image header memory\n");
- return;
- }
+ pr_err("Ignoring BGRT: failed to map image header memory\n");
+ return;
}
- memcpy_fromio(&bmp_header, image, sizeof(bmp_header));
- if (ioremapped)
- early_iounmap(image, sizeof(bmp_header));
+ memcpy(&bmp_header, image, sizeof(bmp_header));
+ memunmap(image);
bgrt_image_size = bmp_header.size;
bgrt_image = kmalloc(bgrt_image_size, GFP_KERNEL | __GFP_NOWARN);
@@ -93,18 +86,14 @@ void __init efi_bgrt_init(void)
return;
}
- if (ioremapped) {
- image = early_ioremap(bgrt_tab->image_address,
- bmp_header.size);
- if (!image) {
- pr_err("Ignoring BGRT: failed to map image memory\n");
- kfree(bgrt_image);
- bgrt_image = NULL;
- return;
- }
+ image = memremap(bgrt_tab->image_address, bmp_header.size, MEMREMAP_WB);
+ if (!image) {
+ pr_err("Ignoring BGRT: failed to map image memory\n");
+ kfree(bgrt_image);
+ bgrt_image = NULL;
+ return;
}
- memcpy_fromio(bgrt_image, image, bgrt_image_size);
- if (ioremapped)
- early_iounmap(image, bmp_header.size);
+ memcpy(bgrt_image, image, bgrt_image_size);
+ memunmap(image);
}
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index ad285404ea7f..3c1f3cd7b2ba 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -869,7 +869,7 @@ static void __init kexec_enter_virtual_mode(void)
* This function will switch the EFI runtime services to virtual mode.
* Essentially, we look through the EFI memmap and map every region that
* has the runtime attribute bit set in its memory descriptor into the
- * ->trampoline_pgd page table using a top-down VA allocation scheme.
+ * efi_pgd page table.
*
* The old method which used to update that memory descriptor with the
* virtual address obtained from ioremap() is still supported when the
@@ -879,8 +879,8 @@ static void __init kexec_enter_virtual_mode(void)
*
* The new method does a pagetable switch in a preemption-safe manner
* so that we're in a different address space when calling a runtime
- * function. For function arguments passing we do copy the PGDs of the
- * kernel page table into ->trampoline_pgd prior to each call.
+ * function. For function arguments passing we do copy the PUDs of the
+ * kernel page table into efi_pgd prior to each call.
*
* Specially for kexec boot, efi runtime maps in previous kernel should
* be passed in via setup_data. In that case runtime ranges will be mapped
@@ -895,6 +895,12 @@ static void __init __efi_enter_virtual_mode(void)
efi.systab = NULL;
+ if (efi_alloc_page_tables()) {
+ pr_err("Failed to allocate EFI page tables\n");
+ clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+ return;
+ }
+
efi_merge_regions();
new_memmap = efi_map_regions(&count, &pg_shift);
if (!new_memmap) {
@@ -954,28 +960,11 @@ static void __init __efi_enter_virtual_mode(void)
efi_runtime_mkexec();
/*
- * We mapped the descriptor array into the EFI pagetable above but we're
- * not unmapping it here. Here's why:
- *
- * We're copying select PGDs from the kernel page table to the EFI page
- * table and when we do so and make changes to those PGDs like unmapping
- * stuff from them, those changes appear in the kernel page table and we
- * go boom.
- *
- * From setup_real_mode():
- *
- * ...
- * trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd;
- *
- * In this particular case, our allocation is in PGD 0 of the EFI page
- * table but we've copied that PGD from PGD[272] of the EFI page table:
- *
- * pgd_index(__PAGE_OFFSET = 0xffff880000000000) = 272
- *
- * where the direct memory mapping in kernel space is.
- *
- * new_memmap's VA comes from that direct mapping and thus clearing it,
- * it would get cleared in the kernel page table too.
+ * We mapped the descriptor array into the EFI pagetable above
+ * but we're not unmapping it here because if we're running in
+ * EFI mixed mode we need all of memory to be accessible when
+ * we pass parameters to the EFI runtime services in the
+ * thunking code.
*
* efi_cleanup_page_tables(__pa(new_memmap), 1 << pg_shift);
*/
diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
index ed5b67338294..58d669bc8250 100644
--- a/arch/x86/platform/efi/efi_32.c
+++ b/arch/x86/platform/efi/efi_32.c
@@ -38,6 +38,11 @@
* say 0 - 3G.
*/
+int __init efi_alloc_page_tables(void)
+{
+ return 0;
+}
+
void efi_sync_low_kernel_mappings(void) {}
void __init efi_dump_pagetable(void) {}
int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index a0ac0f9c307f..18dfaad71c99 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -40,6 +40,7 @@
#include <asm/fixmap.h>
#include <asm/realmode.h>
#include <asm/time.h>
+#include <asm/pgalloc.h>
/*
* We allocate runtime services regions bottom-up, starting from -4G, i.e.
@@ -47,16 +48,7 @@
*/
static u64 efi_va = EFI_VA_START;
-/*
- * Scratch space used for switching the pagetable in the EFI stub
- */
-struct efi_scratch {
- u64 r15;
- u64 prev_cr3;
- pgd_t *efi_pgt;
- bool use_pgd;
- u64 phys_stack;
-} __packed;
+struct efi_scratch efi_scratch;
static void __init early_code_mapping_set_exec(int executable)
{
@@ -83,8 +75,11 @@ pgd_t * __init efi_call_phys_prolog(void)
int pgd;
int n_pgds;
- if (!efi_enabled(EFI_OLD_MEMMAP))
- return NULL;
+ if (!efi_enabled(EFI_OLD_MEMMAP)) {
+ save_pgd = (pgd_t *)read_cr3();
+ write_cr3((unsigned long)efi_scratch.efi_pgt);
+ goto out;
+ }
early_code_mapping_set_exec(1);
@@ -96,6 +91,7 @@ pgd_t * __init efi_call_phys_prolog(void)
vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
}
+out:
__flush_tlb_all();
return save_pgd;
@@ -109,8 +105,11 @@ void __init efi_call_phys_epilog(pgd_t *save_pgd)
int pgd_idx;
int nr_pgds;
- if (!save_pgd)
+ if (!efi_enabled(EFI_OLD_MEMMAP)) {
+ write_cr3((unsigned long)save_pgd);
+ __flush_tlb_all();
return;
+ }
nr_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE);
@@ -123,27 +122,97 @@ void __init efi_call_phys_epilog(pgd_t *save_pgd)
early_code_mapping_set_exec(0);
}
+static pgd_t *efi_pgd;
+
+/*
+ * We need our own copy of the higher levels of the page tables
+ * because we want to avoid inserting EFI region mappings (EFI_VA_END
+ * to EFI_VA_START) into the standard kernel page tables. Everything
+ * else can be shared, see efi_sync_low_kernel_mappings().
+ */
+int __init efi_alloc_page_tables(void)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ gfp_t gfp_mask;
+
+ if (efi_enabled(EFI_OLD_MEMMAP))
+ return 0;
+
+ gfp_mask = GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO;
+ efi_pgd = (pgd_t *)__get_free_page(gfp_mask);
+ if (!efi_pgd)
+ return -ENOMEM;
+
+ pgd = efi_pgd + pgd_index(EFI_VA_END);
+
+ pud = pud_alloc_one(NULL, 0);
+ if (!pud) {
+ free_page((unsigned long)efi_pgd);
+ return -ENOMEM;
+ }
+
+ pgd_populate(NULL, pgd, pud);
+
+ return 0;
+}
+
/*
* Add low kernel mappings for passing arguments to EFI functions.
*/
void efi_sync_low_kernel_mappings(void)
{
- unsigned num_pgds;
- pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd);
+ unsigned num_entries;
+ pgd_t *pgd_k, *pgd_efi;
+ pud_t *pud_k, *pud_efi;
if (efi_enabled(EFI_OLD_MEMMAP))
return;
- num_pgds = pgd_index(MODULES_END - 1) - pgd_index(PAGE_OFFSET);
+ /*
+ * We can share all PGD entries apart from the one entry that
+ * covers the EFI runtime mapping space.
+ *
+ * Make sure the EFI runtime region mappings are guaranteed to
+ * only span a single PGD entry and that the entry also maps
+ * other important kernel regions.
+ */
+ BUILD_BUG_ON(pgd_index(EFI_VA_END) != pgd_index(MODULES_END));
+ BUILD_BUG_ON((EFI_VA_START & PGDIR_MASK) !=
+ (EFI_VA_END & PGDIR_MASK));
+
+ pgd_efi = efi_pgd + pgd_index(PAGE_OFFSET);
+ pgd_k = pgd_offset_k(PAGE_OFFSET);
- memcpy(pgd + pgd_index(PAGE_OFFSET),
- init_mm.pgd + pgd_index(PAGE_OFFSET),
- sizeof(pgd_t) * num_pgds);
+ num_entries = pgd_index(EFI_VA_END) - pgd_index(PAGE_OFFSET);
+ memcpy(pgd_efi, pgd_k, sizeof(pgd_t) * num_entries);
+
+ /*
+ * We share all the PUD entries apart from those that map the
+ * EFI regions. Copy around them.
+ */
+ BUILD_BUG_ON((EFI_VA_START & ~PUD_MASK) != 0);
+ BUILD_BUG_ON((EFI_VA_END & ~PUD_MASK) != 0);
+
+ pgd_efi = efi_pgd + pgd_index(EFI_VA_END);
+ pud_efi = pud_offset(pgd_efi, 0);
+
+ pgd_k = pgd_offset_k(EFI_VA_END);
+ pud_k = pud_offset(pgd_k, 0);
+
+ num_entries = pud_index(EFI_VA_END);
+ memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries);
+
+ pud_efi = pud_offset(pgd_efi, EFI_VA_START);
+ pud_k = pud_offset(pgd_k, EFI_VA_START);
+
+ num_entries = PTRS_PER_PUD - pud_index(EFI_VA_START);
+ memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries);
}
int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
{
- unsigned long text;
+ unsigned long pfn, text;
struct page *page;
unsigned npages;
pgd_t *pgd;
@@ -151,8 +220,8 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
if (efi_enabled(EFI_OLD_MEMMAP))
return 0;
- efi_scratch.efi_pgt = (pgd_t *)(unsigned long)real_mode_header->trampoline_pgd;
- pgd = __va(efi_scratch.efi_pgt);
+ efi_scratch.efi_pgt = (pgd_t *)__pa(efi_pgd);
+ pgd = efi_pgd;
/*
* It can happen that the physical address of new_memmap lands in memory
@@ -160,7 +229,8 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
* and ident-map those pages containing the map before calling
* phys_efi_set_virtual_address_map().
*/
- if (kernel_map_pages_in_pgd(pgd, pa_memmap, pa_memmap, num_pages, _PAGE_NX)) {
+ pfn = pa_memmap >> PAGE_SHIFT;
+ if (kernel_map_pages_in_pgd(pgd, pfn, pa_memmap, num_pages, _PAGE_NX)) {
pr_err("Error ident-mapping new memmap (0x%lx)!\n", pa_memmap);
return 1;
}
@@ -185,8 +255,9 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
npages = (_end - _text) >> PAGE_SHIFT;
text = __pa(_text);
+ pfn = text >> PAGE_SHIFT;
- if (kernel_map_pages_in_pgd(pgd, text >> PAGE_SHIFT, text, npages, 0)) {
+ if (kernel_map_pages_in_pgd(pgd, pfn, text, npages, 0)) {
pr_err("Failed to map kernel text 1:1\n");
return 1;
}
@@ -196,20 +267,20 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
void __init efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages)
{
- pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd);
-
- kernel_unmap_pages_in_pgd(pgd, pa_memmap, num_pages);
+ kernel_unmap_pages_in_pgd(efi_pgd, pa_memmap, num_pages);
}
static void __init __map_region(efi_memory_desc_t *md, u64 va)
{
- pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd);
- unsigned long pf = 0;
+ unsigned long flags = 0;
+ unsigned long pfn;
+ pgd_t *pgd = efi_pgd;
if (!(md->attribute & EFI_MEMORY_WB))
- pf |= _PAGE_PCD;
+ flags |= _PAGE_PCD;
- if (kernel_map_pages_in_pgd(pgd, md->phys_addr, va, md->num_pages, pf))
+ pfn = md->phys_addr >> PAGE_SHIFT;
+ if (kernel_map_pages_in_pgd(pgd, pfn, va, md->num_pages, flags))
pr_warn("Error mapping PA 0x%llx -> VA 0x%llx!\n",
md->phys_addr, va);
}
@@ -312,9 +383,7 @@ void __init efi_runtime_mkexec(void)
void __init efi_dump_pagetable(void)
{
#ifdef CONFIG_EFI_PGT_DUMP
- pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd);
-
- ptdump_walk_pgd_level(NULL, pgd);
+ ptdump_walk_pgd_level(NULL, efi_pgd);
#endif
}
diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
index 86d0f9e08dd9..32020cb8bb08 100644
--- a/arch/x86/platform/efi/efi_stub_64.S
+++ b/arch/x86/platform/efi/efi_stub_64.S
@@ -38,41 +38,6 @@
mov %rsi, %cr0; \
mov (%rsp), %rsp
- /* stolen from gcc */
- .macro FLUSH_TLB_ALL
- movq %r15, efi_scratch(%rip)
- movq %r14, efi_scratch+8(%rip)
- movq %cr4, %r15
- movq %r15, %r14
- andb $0x7f, %r14b
- movq %r14, %cr4
- movq %r15, %cr4
- movq efi_scratch+8(%rip), %r14
- movq efi_scratch(%rip), %r15
- .endm
-
- .macro SWITCH_PGT
- cmpb $0, efi_scratch+24(%rip)
- je 1f
- movq %r15, efi_scratch(%rip) # r15
- # save previous CR3
- movq %cr3, %r15
- movq %r15, efi_scratch+8(%rip) # prev_cr3
- movq efi_scratch+16(%rip), %r15 # EFI pgt
- movq %r15, %cr3
- 1:
- .endm
-
- .macro RESTORE_PGT
- cmpb $0, efi_scratch+24(%rip)
- je 2f
- movq efi_scratch+8(%rip), %r15
- movq %r15, %cr3
- movq efi_scratch(%rip), %r15
- FLUSH_TLB_ALL
- 2:
- .endm
-
ENTRY(efi_call)
SAVE_XMM
mov (%rsp), %rax
@@ -83,16 +48,8 @@ ENTRY(efi_call)
mov %r8, %r9
mov %rcx, %r8
mov %rsi, %rcx
- SWITCH_PGT
call *%rdi
- RESTORE_PGT
addq $48, %rsp
RESTORE_XMM
ret
ENDPROC(efi_call)
-
- .data
-ENTRY(efi_scratch)
- .fill 3,8,0
- .byte 0
- .quad 0
diff --git a/arch/x86/um/ldt.c b/arch/x86/um/ldt.c
index 836a1eb5df43..3ee234b6234d 100644
--- a/arch/x86/um/ldt.c
+++ b/arch/x86/um/ldt.c
@@ -6,6 +6,7 @@
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/syscalls.h>
#include <linux/uaccess.h>
#include <asm/unistd.h>
#include <os.h>
@@ -369,7 +370,9 @@ void free_ldt(struct mm_context *mm)
mm->arch.ldt.entry_count = 0;
}
-int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount)
+SYSCALL_DEFINE3(modify_ldt, int , func , void __user * , ptr ,
+ unsigned long , bytecount)
{
- return do_modify_ldt_skas(func, ptr, bytecount);
+ /* See non-um modify_ldt() for why we do this cast */
+ return (unsigned int)do_modify_ldt_skas(func, ptr, bytecount);
}
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 248d1a8f9409..3240d394426c 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -361,7 +361,6 @@ config CRYPTO_XTS
select CRYPTO_BLKCIPHER
select CRYPTO_MANAGER
select CRYPTO_GF128MUL
- select CRYPTO_ECB
help
XTS: IEEE1619/D16 narrow block cipher use with aes-xts-plain,
key size 256, 384 or 512 bits. This implementation currently
diff --git a/crypto/asymmetric_keys/pkcs7_parser.c b/crypto/asymmetric_keys/pkcs7_parser.c
index 2516e97c58f1..5e5a8adac0ba 100644
--- a/crypto/asymmetric_keys/pkcs7_parser.c
+++ b/crypto/asymmetric_keys/pkcs7_parser.c
@@ -87,7 +87,7 @@ EXPORT_SYMBOL_GPL(pkcs7_free_message);
static int pkcs7_check_authattrs(struct pkcs7_message *msg)
{
struct pkcs7_signed_info *sinfo;
- bool want;
+ bool want = false;
sinfo = msg->signed_infos;
if (!sinfo)
diff --git a/drivers/Kconfig b/drivers/Kconfig
index b3b27b86955d..4051a164c2eb 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -212,4 +212,6 @@ source "drivers/bif/Kconfig"
source "drivers/sensors/Kconfig"
+source "drivers/tee/Kconfig"
+
endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index 2545cf95e8db..d7c1d7422e86 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -182,3 +182,4 @@ obj-$(CONFIG_BIF) += bif/
obj-$(CONFIG_SENSORS_SSC) += sensors/
obj-$(CONFIG_ESOC) += esoc/
+obj-$(CONFIG_TEE) += tee/
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 2c33b1251afb..2106014f1ea8 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -833,7 +833,7 @@ binder_enqueue_work_ilocked(struct binder_work *work,
}
/**
- * binder_enqueue_thread_work_ilocked_nowake() - Add thread work
+ * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
* @thread: thread to queue work to
* @work: struct binder_work to add to list
*
@@ -844,8 +844,8 @@ binder_enqueue_work_ilocked(struct binder_work *work,
* Requires the proc->inner_lock to be held.
*/
static void
-binder_enqueue_thread_work_ilocked_nowake(struct binder_thread *thread,
- struct binder_work *work)
+binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
+ struct binder_work *work)
{
binder_enqueue_work_ilocked(work, &thread->todo);
}
@@ -2468,7 +2468,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
debug_id, (u64)fda->num_fds);
continue;
}
- fd_array = (u32 *)(parent_buffer + fda->parent_offset);
+ fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
task_close_fd(proc, fd_array[fd_index]);
} break;
@@ -2692,7 +2692,7 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda,
*/
parent_buffer = parent->buffer -
binder_alloc_get_user_buffer_offset(&target_proc->alloc);
- fd_array = (u32 *)(parent_buffer + fda->parent_offset);
+ fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
binder_user_error("%d:%d parent offset not aligned correctly.\n",
proc->pid, thread->pid);
@@ -2758,7 +2758,7 @@ static int binder_fixup_parent(struct binder_transaction *t,
proc->pid, thread->pid);
return -EINVAL;
}
- parent_buffer = (u8 *)(parent->buffer -
+ parent_buffer = (u8 *)((uintptr_t)parent->buffer -
binder_alloc_get_user_buffer_offset(
&target_proc->alloc));
*(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
@@ -3348,7 +3348,14 @@ static void binder_transaction(struct binder_proc *proc,
} else if (!(t->flags & TF_ONE_WAY)) {
BUG_ON(t->buffer->async_transaction != 0);
binder_inner_proc_lock(proc);
- binder_enqueue_thread_work_ilocked_nowake(thread, tcomplete);
+ /*
+ * Defer the TRANSACTION_COMPLETE, so we don't return to
+ * userspace immediately; this allows the target process to
+ * immediately start processing this transaction, reducing
+ * latency. We will then return the TRANSACTION_COMPLETE when
+ * the target replies (or there is an error).
+ */
+ binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
t->need_reply = 1;
t->from_parent = thread->transaction_stack;
thread->transaction_stack = t;
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 6aaa3f81755b..c2ba811993d4 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -272,6 +272,7 @@ config SATA_SX4
config ATA_BMDMA
bool "ATA BMDMA support"
+ depends on HAS_DMA
default y
help
This option adds support for SFF ATA controllers with BMDMA
@@ -318,6 +319,7 @@ config SATA_DWC_VDEBUG
config SATA_HIGHBANK
tristate "Calxeda Highbank SATA support"
+ depends on HAS_DMA
depends on ARCH_HIGHBANK || COMPILE_TEST
help
This option enables support for the Calxeda Highbank SoC's
@@ -327,6 +329,7 @@ config SATA_HIGHBANK
config SATA_MV
tristate "Marvell SATA support"
+ depends on HAS_DMA
depends on PCI || ARCH_DOVE || ARCH_MV78XX0 || \
ARCH_MVEBU || ARCH_ORION5X || COMPILE_TEST
select GENERIC_PHY
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 91a9e6af2ec4..75cced210b2a 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -2245,8 +2245,8 @@ static void ata_eh_link_autopsy(struct ata_link *link)
if (dev->flags & ATA_DFLAG_DUBIOUS_XFER)
eflags |= ATA_EFLAG_DUBIOUS_XFER;
ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask);
+ trace_ata_eh_link_autopsy(dev, ehc->i.action, all_err_mask);
}
- trace_ata_eh_link_autopsy(dev, ehc->i.action, all_err_mask);
DPRINTK("EXIT\n");
}
diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c
index 433b60092972..e40f67b7d28b 100644
--- a/drivers/base/power/opp/core.c
+++ b/drivers/base/power/opp/core.c
@@ -1936,6 +1936,7 @@ static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
if (ret) {
dev_err(dev, "%s: Failed to add OPP, %d\n", __func__,
ret);
+ of_node_put(np);
goto free_table;
}
}
diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
index 404d94c6c8bc..feba1b211898 100644
--- a/drivers/base/power/wakeirq.c
+++ b/drivers/base/power/wakeirq.c
@@ -141,6 +141,13 @@ static irqreturn_t handle_threaded_wake_irq(int irq, void *_wirq)
struct wake_irq *wirq = _wirq;
int res;
+ /* Maybe abort suspend? */
+ if (irqd_is_wakeup_set(irq_get_irq_data(irq))) {
+ pm_wakeup_event(wirq->dev, 0);
+
+ return IRQ_HANDLED;
+ }
+
/* We don't want RPM_ASYNC or RPM_NOWAIT here */
res = pm_runtime_resume(wirq->dev);
if (res < 0)
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 55a8671f1979..80455f70ff79 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -2736,7 +2736,7 @@ static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
* from the parent.
*/
page_count = (u32)calc_pages_for(0, length);
- pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
+ pages = ceph_alloc_page_vector(page_count, GFP_NOIO);
if (IS_ERR(pages)) {
result = PTR_ERR(pages);
pages = NULL;
@@ -2863,7 +2863,7 @@ static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
*/
size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
page_count = (u32)calc_pages_for(0, size);
- pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
+ pages = ceph_alloc_page_vector(page_count, GFP_NOIO);
if (IS_ERR(pages))
return PTR_ERR(pages);
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 33e23a7a691f..a295ad6a1674 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -1407,33 +1407,34 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
static void make_response(struct xen_blkif *blkif, u64 id,
unsigned short op, int st)
{
- struct blkif_response resp;
+ struct blkif_response *resp;
unsigned long flags;
union blkif_back_rings *blk_rings = &blkif->blk_rings;
int notify;
- resp.id = id;
- resp.operation = op;
- resp.status = st;
-
spin_lock_irqsave(&blkif->blk_ring_lock, flags);
/* Place on the response ring for the relevant domain. */
switch (blkif->blk_protocol) {
case BLKIF_PROTOCOL_NATIVE:
- memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt),
- &resp, sizeof(resp));
+ resp = RING_GET_RESPONSE(&blk_rings->native,
+ blk_rings->native.rsp_prod_pvt);
break;
case BLKIF_PROTOCOL_X86_32:
- memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt),
- &resp, sizeof(resp));
+ resp = RING_GET_RESPONSE(&blk_rings->x86_32,
+ blk_rings->x86_32.rsp_prod_pvt);
break;
case BLKIF_PROTOCOL_X86_64:
- memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt),
- &resp, sizeof(resp));
+ resp = RING_GET_RESPONSE(&blk_rings->x86_64,
+ blk_rings->x86_64.rsp_prod_pvt);
break;
default:
BUG();
}
+
+ resp->id = id;
+ resp->operation = op;
+ resp->status = st;
+
blk_rings->common.rsp_prod_pvt++;
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index c929ae22764c..04cfee719334 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -74,9 +74,8 @@ extern unsigned int xen_blkif_max_ring_order;
struct blkif_common_request {
char dummy;
};
-struct blkif_common_response {
- char dummy;
-};
+
+/* i386 protocol version */
struct blkif_x86_32_request_rw {
uint8_t nr_segments; /* number of segments */
@@ -128,14 +127,6 @@ struct blkif_x86_32_request {
} u;
} __attribute__((__packed__));
-/* i386 protocol version */
-#pragma pack(push, 4)
-struct blkif_x86_32_response {
- uint64_t id; /* copied from request */
- uint8_t operation; /* copied from request */
- int16_t status; /* BLKIF_RSP_??? */
-};
-#pragma pack(pop)
/* x86_64 protocol version */
struct blkif_x86_64_request_rw {
@@ -192,18 +183,12 @@ struct blkif_x86_64_request {
} u;
} __attribute__((__packed__));
-struct blkif_x86_64_response {
- uint64_t __attribute__((__aligned__(8))) id;
- uint8_t operation; /* copied from request */
- int16_t status; /* BLKIF_RSP_??? */
-};
-
DEFINE_RING_TYPES(blkif_common, struct blkif_common_request,
- struct blkif_common_response);
+ struct blkif_response);
DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request,
- struct blkif_x86_32_response);
+ struct blkif_response __packed);
DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request,
- struct blkif_x86_64_response);
+ struct blkif_response);
union blkif_back_rings {
struct blkif_back_ring native;
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 7bb8055bd10c..1ccad79ce77c 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -2969,6 +2969,12 @@ static int btusb_probe(struct usb_interface *intf,
if (id->driver_info & BTUSB_QCA_ROME) {
data->setup_on_usb = btusb_setup_qca;
hdev->set_bdaddr = btusb_set_bdaddr_ath3012;
+
+ /* QCA Rome devices lose their updated firmware over suspend,
+ * but the USB hub doesn't notice any status change.
+ * Explicitly request a device reset on resume.
+ */
+ set_bit(BTUSB_RESET_RESUME, &data->flags);
}
#ifdef CONFIG_BT_HCIBTUSB_RTL
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index 614ce7b7d5a6..5737da9d855a 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -111,11 +111,11 @@ static inline uint64_t buf_page_offset(uint64_t buf)
return offset;
}
-static inline int buf_num_pages(uint64_t buf, ssize_t len)
+static inline uint64_t buf_num_pages(uint64_t buf, size_t len)
{
uint64_t start = buf_page_start(buf) >> PAGE_SHIFT;
uint64_t end = (((uint64_t) buf + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
- int nPages = end - start + 1;
+ uint64_t nPages = end - start + 1;
return nPages;
}
@@ -144,7 +144,7 @@ struct fastrpc_buf {
struct fastrpc_file *fl;
void *virt;
uint64_t phys;
- ssize_t size;
+ size_t size;
};
struct fastrpc_ctx_lst;
@@ -170,7 +170,7 @@ struct smq_invoke_ctx {
unsigned *attrs;
struct fastrpc_mmap **maps;
struct fastrpc_buf *buf;
- ssize_t used;
+ size_t used;
struct fastrpc_file *fl;
uint32_t sc;
struct overlap *overs;
@@ -259,9 +259,9 @@ struct fastrpc_mmap {
struct dma_buf_attachment *attach;
struct ion_handle *handle;
uint64_t phys;
- ssize_t size;
+ size_t size;
uintptr_t va;
- ssize_t len;
+ size_t len;
int refs;
uintptr_t raddr;
int uncached;
@@ -347,7 +347,7 @@ static inline int64_t getnstimediff(struct timespec *start)
static void fastrpc_buf_free(struct fastrpc_buf *buf, int cache)
{
- struct fastrpc_file *fl = buf == 0 ? 0 : buf->fl;
+ struct fastrpc_file *fl = buf == NULL ? NULL : buf->fl;
int vmid;
if (!fl)
@@ -382,7 +382,8 @@ static void fastrpc_buf_list_free(struct fastrpc_file *fl)
struct fastrpc_buf *buf, *free;
do {
struct hlist_node *n;
- free = 0;
+
+ free = NULL;
spin_lock(&fl->hlock);
hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
hlist_del_init(&buf->hn);
@@ -414,11 +415,14 @@ static void fastrpc_mmap_add(struct fastrpc_mmap *map)
}
static int fastrpc_mmap_find(struct fastrpc_file *fl, int fd, uintptr_t va,
- ssize_t len, int mflags, struct fastrpc_mmap **ppmap)
+ size_t len, int mflags, struct fastrpc_mmap **ppmap)
{
struct fastrpc_apps *me = &gfa;
- struct fastrpc_mmap *match = 0, *map;
+ struct fastrpc_mmap *match = NULL, *map = NULL;
struct hlist_node *n;
+
+ if ((va + len) < va)
+ return -EOVERFLOW;
if (mflags == ADSP_MMAP_HEAP_ADDR ||
mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
spin_lock(&me->hlock);
@@ -452,10 +456,10 @@ static int fastrpc_mmap_find(struct fastrpc_file *fl, int fd, uintptr_t va,
return -ENOTTY;
}
-static int dma_alloc_memory(phys_addr_t *region_start, ssize_t size)
+static int dma_alloc_memory(phys_addr_t *region_start, size_t size)
{
struct fastrpc_apps *me = &gfa;
- void *vaddr = 0;
+ void *vaddr = NULL;
DEFINE_DMA_ATTRS(attrs);
if (me->dev == NULL) {
@@ -475,9 +479,9 @@ static int dma_alloc_memory(phys_addr_t *region_start, ssize_t size)
}
static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
- ssize_t len, struct fastrpc_mmap **ppmap)
+ size_t len, struct fastrpc_mmap **ppmap)
{
- struct fastrpc_mmap *match = 0, *map;
+ struct fastrpc_mmap *match = NULL, *map;
struct hlist_node *n;
struct fastrpc_apps *me = &gfa;
@@ -595,14 +599,14 @@ static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
struct fastrpc_session_ctx **session);
static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd, unsigned attr,
- uintptr_t va, ssize_t len, int mflags, struct fastrpc_mmap **ppmap)
+ uintptr_t va, size_t len, int mflags, struct fastrpc_mmap **ppmap)
{
struct fastrpc_apps *me = &gfa;
struct fastrpc_session_ctx *sess;
struct fastrpc_apps *apps = fl->apps;
int cid = fl->cid;
struct fastrpc_channel_ctx *chan = &apps->channel[cid];
- struct fastrpc_mmap *map = 0;
+ struct fastrpc_mmap *map = NULL;
struct dma_attrs attrs;
phys_addr_t region_start = 0;
unsigned long flags;
@@ -623,13 +627,13 @@ static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd, unsigned attr,
if (mflags == ADSP_MMAP_HEAP_ADDR ||
mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
map->apps = me;
- map->fl = 0;
+ map->fl = NULL;
VERIFY(err, !dma_alloc_memory(&region_start, len));
if (err)
goto bail;
map->phys = (uintptr_t)region_start;
map->size = len;
- map->va = map->phys;
+ map->va = (uintptr_t)map->phys;
} else {
VERIFY(err, !IS_ERR_OR_NULL(map->handle =
ion_import_dma_buf(fl->apps->client, fd)));
@@ -727,11 +731,11 @@ bail:
return err;
}
-static int fastrpc_buf_alloc(struct fastrpc_file *fl, ssize_t size,
+static int fastrpc_buf_alloc(struct fastrpc_file *fl, size_t size,
struct fastrpc_buf **obuf)
{
int err = 0, vmid;
- struct fastrpc_buf *buf = 0, *fr = 0;
+ struct fastrpc_buf *buf = NULL, *fr = NULL;
struct hlist_node *n;
VERIFY(err, size > 0);
@@ -751,13 +755,13 @@ static int fastrpc_buf_alloc(struct fastrpc_file *fl, ssize_t size,
*obuf = fr;
return 0;
}
- buf = 0;
- VERIFY(err, buf = kzalloc(sizeof(*buf), GFP_KERNEL));
+ buf = NULL;
+ VERIFY(err, NULL != (buf = kzalloc(sizeof(*buf), GFP_KERNEL)));
if (err)
goto bail;
INIT_HLIST_NODE(&buf->hn);
buf->fl = fl;
- buf->virt = 0;
+ buf->virt = NULL;
buf->phys = 0;
buf->size = size;
buf->virt = dma_alloc_coherent(fl->sctx->smmu.dev, buf->size,
@@ -799,7 +803,7 @@ static int context_restore_interrupted(struct fastrpc_file *fl,
struct smq_invoke_ctx **po)
{
int err = 0;
- struct smq_invoke_ctx *ctx = 0, *ictx = 0;
+ struct smq_invoke_ctx *ctx = NULL, *ictx = NULL;
struct hlist_node *n;
struct fastrpc_ioctl_invoke *invoke = &inv->inv;
spin_lock(&fl->hlock);
@@ -852,7 +856,7 @@ static int context_build_overlap(struct smq_invoke_ctx *ctx)
ctx->overs[i].raix = i;
ctx->overps[i] = &ctx->overs[i];
}
- sort(ctx->overps, nbufs, sizeof(*ctx->overps), overlap_ptr_cmp, 0);
+ sort(ctx->overps, nbufs, sizeof(*ctx->overps), overlap_ptr_cmp, NULL);
max.start = 0;
max.end = 0;
for (i = 0; i < nbufs; ++i) {
@@ -881,7 +885,8 @@ bail:
#define K_COPY_FROM_USER(err, kernel, dst, src, size) \
do {\
if (!(kernel))\
- VERIFY(err, 0 == copy_from_user((dst), (src),\
+ VERIFY(err, 0 == copy_from_user((dst),\
+ (void const __user *)(src),\
(size)));\
else\
memmove((dst), (src), (size));\
@@ -890,8 +895,8 @@ bail:
#define K_COPY_TO_USER(err, kernel, dst, src, size) \
do {\
if (!(kernel))\
- VERIFY(err, 0 == copy_to_user((dst), (src),\
- (size)));\
+ VERIFY(err, 0 == copy_to_user((void __user *)(dst), \
+ (src), (size)));\
else\
memmove((dst), (src), (size));\
} while (0)
@@ -904,7 +909,7 @@ static int context_alloc(struct fastrpc_file *fl, uint32_t kernel,
struct smq_invoke_ctx **po)
{
int err = 0, bufs, size = 0;
- struct smq_invoke_ctx *ctx = 0;
+ struct smq_invoke_ctx *ctx = NULL;
struct fastrpc_ctx_lst *clst = &fl->clst;
struct fastrpc_ioctl_invoke *invoke = &invokefd->inv;
@@ -915,7 +920,7 @@ static int context_alloc(struct fastrpc_file *fl, uint32_t kernel,
sizeof(*ctx->overs) * (bufs) +
sizeof(*ctx->overps) * (bufs);
- VERIFY(err, ctx = kzalloc(sizeof(*ctx) + size, GFP_KERNEL));
+ VERIFY(err, NULL != (ctx = kzalloc(sizeof(*ctx) + size, GFP_KERNEL)));
if (err)
goto bail;
@@ -929,7 +934,7 @@ static int context_alloc(struct fastrpc_file *fl, uint32_t kernel,
ctx->overs = (struct overlap *)(&ctx->attrs[bufs]);
ctx->overps = (struct overlap **)(&ctx->overs[bufs]);
- K_COPY_FROM_USER(err, kernel, ctx->lpra, invoke->pra,
+ K_COPY_FROM_USER(err, kernel, (void *)ctx->lpra, invoke->pra,
bufs * sizeof(*ctx->lpra));
if (err)
goto bail;
@@ -1039,10 +1044,10 @@ static void context_list_ctor(struct fastrpc_ctx_lst *me)
static void fastrpc_context_list_dtor(struct fastrpc_file *fl)
{
struct fastrpc_ctx_lst *clst = &fl->clst;
- struct smq_invoke_ctx *ictx = 0, *ctxfree;
+ struct smq_invoke_ctx *ictx = NULL, *ctxfree;
struct hlist_node *n;
do {
- ctxfree = 0;
+ ctxfree = NULL;
spin_lock(&fl->hlock);
hlist_for_each_entry_safe(ictx, n, &clst->interrupted, hn) {
hlist_del_init(&ictx->hn);
@@ -1054,7 +1059,7 @@ static void fastrpc_context_list_dtor(struct fastrpc_file *fl)
context_free(ctxfree);
} while (ctxfree);
do {
- ctxfree = 0;
+ ctxfree = NULL;
spin_lock(&fl->hlock);
hlist_for_each_entry_safe(ictx, n, &clst->pending, hn) {
hlist_del_init(&ictx->hn);
@@ -1073,7 +1078,7 @@ static void fastrpc_file_list_dtor(struct fastrpc_apps *me)
struct fastrpc_file *fl, *free;
struct hlist_node *n;
do {
- free = 0;
+ free = NULL;
spin_lock(&me->hlock);
hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
hlist_del_init(&fl->hn);
@@ -1097,20 +1102,20 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
int outbufs = REMOTE_SCALARS_OUTBUFS(sc);
int bufs = inbufs + outbufs;
uintptr_t args;
- ssize_t rlen = 0, copylen = 0, metalen = 0;
+ size_t rlen = 0, copylen = 0, metalen = 0;
int i, inh, oix;
int err = 0;
int mflags = 0;
/* calculate size of the metadata */
- rpra = 0;
+ rpra = NULL;
list = smq_invoke_buf_start(rpra, sc);
pages = smq_phy_page_start(sc, list);
ipage = pages;
for (i = 0; i < bufs; ++i) {
uintptr_t buf = (uintptr_t)lpra[i].buf.pv;
- ssize_t len = lpra[i].buf.len;
+ size_t len = lpra[i].buf.len;
if (ctx->fds[i] && (ctx->fds[i] != -1))
fastrpc_mmap_create(ctx->fl, ctx->fds[i],
@@ -1118,12 +1123,13 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
mflags, &ctx->maps[i]);
ipage += 1;
}
- metalen = copylen = (ssize_t)&ipage[0];
+ metalen = copylen = (size_t)&ipage[0];
/* calculate len requreed for copying */
for (oix = 0; oix < inbufs + outbufs; ++oix) {
int i = ctx->overps[oix]->raix;
uintptr_t mstart, mend;
- ssize_t len = lpra[i].buf.len;
+ size_t len = lpra[i].buf.len;
+
if (!len)
continue;
if (ctx->maps[i])
@@ -1159,7 +1165,7 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
ipage = pages;
args = (uintptr_t)ctx->buf->virt + metalen;
for (i = 0; i < bufs; ++i) {
- ssize_t len = lpra[i].buf.len;
+ size_t len = lpra[i].buf.len;
list[i].num = 0;
list[i].pgidx = 0;
if (!len)
@@ -1173,7 +1179,7 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
for (i = 0; i < inbufs + outbufs; ++i) {
struct fastrpc_mmap *map = ctx->maps[i];
uint64_t buf = ptr_to_uint64(lpra[i].buf.pv);
- ssize_t len = lpra[i].buf.len;
+ size_t len = lpra[i].buf.len;
rpra[i].buf.pv = 0;
rpra[i].buf.len = len;
if (!len)
@@ -1181,7 +1187,7 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
if (map) {
struct vm_area_struct *vma;
uintptr_t offset;
- int num = buf_num_pages(buf, len);
+ uint64_t num = buf_num_pages(buf, len);
int idx = list[i].pgidx;
if (map->attr & FASTRPC_ATTR_NOVA) {
@@ -1213,9 +1219,10 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
for (oix = 0; oix < inbufs + outbufs; ++oix) {
int i = ctx->overps[oix]->raix;
struct fastrpc_mmap *map = ctx->maps[i];
- ssize_t mlen;
+ size_t mlen;
uint64_t buf;
- ssize_t len = lpra[i].buf.len;
+ size_t len = lpra[i].buf.len;
+
if (!len)
continue;
if (map)
@@ -1306,7 +1313,7 @@ static int put_args(uint32_t kernel, struct smq_invoke_ctx *ctx,
goto bail;
} else {
fastrpc_mmap_free(ctx->maps[i]);
- ctx->maps[i] = 0;
+ ctx->maps[i] = NULL;
}
}
size = sizeof(*rpra) * REMOTE_SCALARS_OUTHANDLES(sc);
@@ -1422,7 +1429,7 @@ static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx,
struct fastrpc_channel_ctx *channel_ctx = &fl->apps->channel[fl->cid];
int err = 0, len;
- VERIFY(err, 0 != channel_ctx->chan);
+ VERIFY(err, NULL != channel_ctx->chan);
if (err)
goto bail;
msg->pid = current->tgid;
@@ -1521,17 +1528,17 @@ static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
uint32_t kernel,
struct fastrpc_ioctl_invoke_attrs *inv)
{
- struct smq_invoke_ctx *ctx = 0;
+ struct smq_invoke_ctx *ctx = NULL;
struct fastrpc_ioctl_invoke *invoke = &inv->inv;
int cid = fl->cid;
int interrupted = 0;
int err = 0;
- struct timespec invoket;
+ struct timespec invoket = {0};
if (fl->profile)
getnstimeofday(&invoket);
- VERIFY(err, fl->sctx);
+ VERIFY(err, fl->sctx != NULL);
if (err)
goto bail;
VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS);
@@ -1624,14 +1631,14 @@ static int fastrpc_init_process(struct fastrpc_file *fl,
struct fastrpc_ioctl_invoke_attrs ioctl;
struct fastrpc_ioctl_init *init = &uproc->init;
struct smq_phy_page pages[1];
- struct fastrpc_mmap *file = 0, *mem = 0;
+ struct fastrpc_mmap *file = NULL, *mem = NULL;
char *proc_name = NULL;
int srcVM[1] = {VMID_HLOS};
int destVM[1] = {gcinfo[0].heap_vmid};
int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
int hlosVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
- VERIFY(err, !fastrpc_channel_open(fl));
+ VERIFY(err, 0 == (err = fastrpc_channel_open(fl)));
if (err)
goto bail;
if (init->flags == FASTRPC_INIT_ATTACH) {
@@ -1642,8 +1649,8 @@ static int fastrpc_init_process(struct fastrpc_file *fl,
ioctl.inv.handle = 1;
ioctl.inv.sc = REMOTE_SCALARS_MAKE(0, 1, 0);
ioctl.inv.pra = ra;
- ioctl.fds = 0;
- ioctl.attrs = 0;
+ ioctl.fds = NULL;
+ ioctl.attrs = NULL;
fl->pd = 0;
VERIFY(err, !(err = fastrpc_internal_invoke(fl,
FASTRPC_MODE_PARALLEL, 1, &ioctl)));
@@ -1655,9 +1662,9 @@ static int fastrpc_init_process(struct fastrpc_file *fl,
int mflags = 0;
struct {
int pgid;
- int namelen;
- int filelen;
- int pageslen;
+ unsigned int namelen;
+ unsigned int filelen;
+ unsigned int pageslen;
int attrs;
int siglen;
} inbuf;
@@ -1718,7 +1725,7 @@ static int fastrpc_init_process(struct fastrpc_file *fl,
ioctl.inv.sc = REMOTE_SCALARS_MAKE(7, 6, 0);
ioctl.inv.pra = ra;
ioctl.fds = fds;
- ioctl.attrs = 0;
+ ioctl.attrs = NULL;
VERIFY(err, !(err = fastrpc_internal_invoke(fl,
FASTRPC_MODE_PARALLEL, 1, &ioctl)));
if (err)
@@ -1726,12 +1733,12 @@ static int fastrpc_init_process(struct fastrpc_file *fl,
} else if (init->flags == FASTRPC_INIT_CREATE_STATIC) {
remote_arg_t ra[3];
uint64_t phys = 0;
- ssize_t size = 0;
+ size_t size = 0;
int fds[3];
struct {
int pgid;
- int namelen;
- int pageslen;
+ unsigned int namelen;
+ unsigned int pageslen;
} inbuf;
if (!init->filelen)
@@ -1785,8 +1792,8 @@ static int fastrpc_init_process(struct fastrpc_file *fl,
ioctl.inv.sc = REMOTE_SCALARS_MAKE(8, 3, 0);
ioctl.inv.pra = ra;
- ioctl.fds = 0;
- ioctl.attrs = 0;
+ ioctl.fds = NULL;
+ ioctl.attrs = NULL;
VERIFY(err, !(err = fastrpc_internal_invoke(fl,
FASTRPC_MODE_PARALLEL, 1, &ioctl)));
if (err)
@@ -1819,7 +1826,7 @@ static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl)
VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS);
if (err)
goto bail;
- VERIFY(err, fl->apps->channel[fl->cid].chan != 0);
+ VERIFY(err, fl->apps->channel[fl->cid].chan != NULL);
if (err)
goto bail;
tgid = fl->tgid;
@@ -1828,8 +1835,8 @@ static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl)
ioctl.inv.handle = 1;
ioctl.inv.sc = REMOTE_SCALARS_MAKE(1, 1, 0);
ioctl.inv.pra = ra;
- ioctl.fds = 0;
- ioctl.attrs = 0;
+ ioctl.fds = NULL;
+ ioctl.attrs = NULL;
VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
FASTRPC_MODE_PARALLEL, 1, &ioctl)));
bail:
@@ -1874,8 +1881,8 @@ static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags,
else
ioctl.inv.sc = REMOTE_SCALARS_MAKE(2, 2, 1);
ioctl.inv.pra = ra;
- ioctl.fds = 0;
- ioctl.attrs = 0;
+ ioctl.fds = NULL;
+ ioctl.attrs = NULL;
VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
FASTRPC_MODE_PARALLEL, 1, &ioctl)));
map->raddr = (uintptr_t)routargs.vaddrout;
@@ -1928,8 +1935,8 @@ static int fastrpc_munmap_on_dsp_rh(struct fastrpc_file *fl,
ioctl.inv.handle = 1;
ioctl.inv.sc = REMOTE_SCALARS_MAKE(7, 0, 1);
ioctl.inv.pra = ra;
- ioctl.fds = 0;
- ioctl.attrs = 0;
+ ioctl.fds = NULL;
+ ioctl.attrs = NULL;
if (fl == NULL)
goto bail;
@@ -1964,7 +1971,7 @@ static int fastrpc_munmap_on_dsp(struct fastrpc_file *fl,
struct {
int pid;
uintptr_t vaddrout;
- ssize_t size;
+ size_t size;
} inargs;
inargs.pid = current->tgid;
@@ -1979,8 +1986,8 @@ static int fastrpc_munmap_on_dsp(struct fastrpc_file *fl,
else
ioctl.inv.sc = REMOTE_SCALARS_MAKE(3, 1, 0);
ioctl.inv.pra = ra;
- ioctl.fds = 0;
- ioctl.attrs = 0;
+ ioctl.fds = NULL;
+ ioctl.attrs = NULL;
VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
FASTRPC_MODE_PARALLEL, 1, &ioctl)));
if (err)
@@ -1997,13 +2004,14 @@ bail:
static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl)
{
- struct fastrpc_mmap *match = 0, *map = NULL;
+ struct fastrpc_mmap *match = NULL, *map = NULL;
struct hlist_node *n = NULL;
int err = 0, ret = 0;
struct fastrpc_apps *me = &gfa;
struct ramdump_segment *ramdump_segments_rh = NULL;
+
do {
- match = 0;
+ match = NULL;
spin_lock(&me->hlock);
hlist_for_each_entry_safe(map, n, &me->maps, hn) {
match = map;
@@ -2041,7 +2049,7 @@ bail:
}
static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
- ssize_t len, struct fastrpc_mmap **ppmap);
+ size_t len, struct fastrpc_mmap **ppmap);
static void fastrpc_mmap_add(struct fastrpc_mmap *map);
@@ -2049,7 +2057,7 @@ static int fastrpc_internal_munmap(struct fastrpc_file *fl,
struct fastrpc_ioctl_munmap *ud)
{
int err = 0;
- struct fastrpc_mmap *map = 0;
+ struct fastrpc_mmap *map = NULL;
VERIFY(err, !fastrpc_mmap_remove(fl, ud->vaddrout, ud->size, &map));
if (err)
@@ -2068,7 +2076,7 @@ static int fastrpc_internal_mmap(struct fastrpc_file *fl,
struct fastrpc_ioctl_mmap *ud)
{
- struct fastrpc_mmap *map = 0;
+ struct fastrpc_mmap *map = NULL;
int err = 0;
if (!fastrpc_mmap_find(fl, ud->fd, (uintptr_t)ud->vaddrin, ud->size,
ud->flags, &map))
@@ -2101,7 +2109,7 @@ static void fastrpc_channel_close(struct kref *kref)
else
fastrpc_glink_close(ctx->chan, cid);
- ctx->chan = 0;
+ ctx->chan = NULL;
mutex_unlock(&me->smd_mutex);
pr_info("'closed /dev/%s c %d %d'\n", gcinfo[cid].name,
MAJOR(me->dev_no), cid);
@@ -2140,19 +2148,20 @@ static int fastrpc_session_alloc_locked(struct fastrpc_channel_ctx *chan,
return err;
}
-bool fastrpc_glink_notify_rx_intent_req(void *h, const void *priv, size_t size)
+static bool fastrpc_glink_notify_rx_intent_req(void *h, const void *priv,
+ size_t size)
{
if (glink_queue_rx_intent(h, NULL, size))
return false;
return true;
}
-void fastrpc_glink_notify_tx_done(void *handle, const void *priv,
+static void fastrpc_glink_notify_tx_done(void *handle, const void *priv,
const void *pkt_priv, const void *ptr)
{
}
-void fastrpc_glink_notify_rx(void *handle, const void *priv,
+static void fastrpc_glink_notify_rx(void *handle, const void *priv,
const void *pkt_priv, const void *ptr, size_t size)
{
struct smq_invoke_rsp *rsp = (struct smq_invoke_rsp *)ptr;
@@ -2167,7 +2176,8 @@ void fastrpc_glink_notify_rx(void *handle, const void *priv,
glink_rx_done(handle, ptr, true);
}
-void fastrpc_glink_notify_state(void *handle, const void *priv, unsigned event)
+static void fastrpc_glink_notify_state(void *handle, const void *priv,
+ unsigned int event)
{
struct fastrpc_apps *me = &gfa;
int cid = (int)(uintptr_t)priv;
@@ -2221,7 +2231,7 @@ static void fastrpc_session_free(struct fastrpc_channel_ctx *chan,
static int fastrpc_file_free(struct fastrpc_file *fl)
{
struct hlist_node *n;
- struct fastrpc_mmap *map = 0;
+ struct fastrpc_mmap *map = NULL;
int cid;
if (!fl)
@@ -2264,7 +2274,7 @@ static int fastrpc_device_release(struct inode *inode, struct file *file)
if (fl->debugfs_file != NULL)
debugfs_remove(fl->debugfs_file);
fastrpc_file_free(fl);
- file->private_data = 0;
+ file->private_data = NULL;
}
return 0;
}
@@ -2388,9 +2398,9 @@ static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer,
{
struct fastrpc_file *fl = filp->private_data;
struct hlist_node *n;
- struct fastrpc_buf *buf = 0;
- struct fastrpc_mmap *map = 0;
- struct smq_invoke_ctx *ictx = 0;
+ struct fastrpc_buf *buf = NULL;
+ struct fastrpc_mmap *map = NULL;
+ struct smq_invoke_ctx *ictx = NULL;
struct fastrpc_channel_ctx *chan;
struct fastrpc_session_ctx *sess;
unsigned int len = 0;
@@ -2514,7 +2524,7 @@ static int fastrpc_channel_open(struct fastrpc_file *fl)
}
fl->ssrcount = me->channel[cid].ssrcount;
if ((kref_get_unless_zero(&me->channel[cid].kref) == 0) ||
- (me->channel[cid].chan == 0)) {
+ (me->channel[cid].chan == NULL)) {
if (me->glink) {
VERIFY(err, 0 == fastrpc_glink_register(cid, me));
if (err)
@@ -2534,7 +2544,7 @@ static int fastrpc_channel_open(struct fastrpc_file *fl)
wait_for_completion_timeout(&me->channel[cid].workport,
RPC_TIMEOUT));
if (err) {
- me->channel[cid].chan = 0;
+ me->channel[cid].chan = NULL;
goto bail;
}
kref_init(&me->channel[cid].kref);
@@ -2568,10 +2578,10 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp)
{
int err = 0;
struct dentry *debugfs_file;
- struct fastrpc_file *fl = 0;
+ struct fastrpc_file *fl = NULL;
struct fastrpc_apps *me = &gfa;
- VERIFY(err, fl = kzalloc(sizeof(*fl), GFP_KERNEL));
+ VERIFY(err, NULL != (fl = kzalloc(sizeof(*fl), GFP_KERNEL)));
if (err)
return err;
debugfs_file = debugfs_create_file(current->comm, 0644, debugfs_root,
@@ -2600,7 +2610,7 @@ static int fastrpc_get_info(struct fastrpc_file *fl, uint32_t *info)
int err = 0;
uint32_t cid;
- VERIFY(err, fl != 0);
+ VERIFY(err, fl != NULL);
if (err)
goto bail;
if (fl->cid == -1) {
@@ -2636,8 +2646,8 @@ static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
int size = 0, err = 0;
uint32_t info;
- p.inv.fds = 0;
- p.inv.attrs = 0;
+ p.inv.fds = NULL;
+ p.inv.attrs = NULL;
spin_lock(&fl->hlock);
if (fl->file_close == 1) {
err = EBADF;
@@ -2657,7 +2667,7 @@ static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
case FASTRPC_IOCTL_INVOKE_ATTRS:
if (!size)
size = sizeof(struct fastrpc_ioctl_invoke_attrs);
- VERIFY(err, 0 == copy_from_user(&p.inv, param, size));
+ K_COPY_FROM_USER(err, 0, &p.inv, param, size);
if (err)
goto bail;
VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl, fl->mode,
@@ -2666,20 +2676,20 @@ static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
goto bail;
break;
case FASTRPC_IOCTL_MMAP:
- VERIFY(err, 0 == copy_from_user(&p.mmap, param,
- sizeof(p.mmap)));
+ K_COPY_FROM_USER(err, 0, &p.mmap, param,
+ sizeof(p.mmap));
if (err)
goto bail;
VERIFY(err, 0 == (err = fastrpc_internal_mmap(fl, &p.mmap)));
if (err)
goto bail;
- VERIFY(err, 0 == copy_to_user(param, &p.mmap, sizeof(p.mmap)));
+ K_COPY_TO_USER(err, 0, param, &p.mmap, sizeof(p.mmap));
if (err)
goto bail;
break;
case FASTRPC_IOCTL_MUNMAP:
- VERIFY(err, 0 == copy_from_user(&p.munmap, param,
- sizeof(p.munmap)));
+ K_COPY_FROM_USER(err, 0, &p.munmap, param,
+ sizeof(p.munmap));
if (err)
goto bail;
VERIFY(err, 0 == (err = fastrpc_internal_munmap(fl,
@@ -2702,35 +2712,35 @@ static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
}
break;
case FASTRPC_IOCTL_GETPERF:
- VERIFY(err, 0 == copy_from_user(&p.perf,
- param, sizeof(p.perf)));
+ K_COPY_FROM_USER(err, 0, &p.perf,
+ param, sizeof(p.perf));
if (err)
goto bail;
p.perf.numkeys = sizeof(struct fastrpc_perf)/sizeof(int64_t);
if (p.perf.keys) {
char *keys = PERF_KEYS;
- VERIFY(err, 0 == copy_to_user((char *)p.perf.keys,
- keys, strlen(keys)+1));
+ K_COPY_TO_USER(err, 0, (void *)p.perf.keys,
+ keys, strlen(keys)+1);
if (err)
goto bail;
}
if (p.perf.data) {
- VERIFY(err, 0 == copy_to_user((int64_t *)p.perf.data,
- &fl->perf, sizeof(fl->perf)));
+ K_COPY_TO_USER(err, 0, (void *)p.perf.data,
+ &fl->perf, sizeof(fl->perf));
}
- VERIFY(err, 0 == copy_to_user(param, &p.perf, sizeof(p.perf)));
+ K_COPY_TO_USER(err, 0, param, &p.perf, sizeof(p.perf));
if (err)
goto bail;
break;
case FASTRPC_IOCTL_GETINFO:
- VERIFY(err, 0 == copy_from_user(&info, param, sizeof(info)));
+ K_COPY_FROM_USER(err, 0, &info, param, sizeof(info));
if (err)
goto bail;
VERIFY(err, 0 == (err = fastrpc_get_info(fl, &info)));
if (err)
goto bail;
- VERIFY(err, 0 == copy_to_user(param, &info, sizeof(info)));
+ K_COPY_TO_USER(err, 0, param, &info, sizeof(info));
if (err)
goto bail;
break;
@@ -2742,7 +2752,7 @@ static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
case FASTRPC_IOCTL_INIT_ATTRS:
if (!size)
size = sizeof(struct fastrpc_ioctl_init_attrs);
- VERIFY(err, 0 == copy_from_user(&p.init, param, size));
+ K_COPY_FROM_USER(err, 0, &p.init, param, size);
if (err)
goto bail;
VERIFY(err, p.init.init.filelen >= 0 &&
@@ -2844,7 +2854,8 @@ static int fastrpc_cb_probe(struct device *dev)
int err = 0, i;
int secure_vmid = VMID_CP_PIXEL;
- VERIFY(err, 0 != (name = of_get_property(dev->of_node, "label", NULL)));
+ VERIFY(err, NULL != (name = of_get_property(dev->of_node,
+ "label", NULL)));
if (err)
goto bail;
for (i = 0; i < NUM_CHANNELS; i++) {
@@ -2906,8 +2917,8 @@ static int fastrpc_cb_legacy_probe(struct device *dev)
struct fastrpc_channel_ctx *chan;
struct fastrpc_session_ctx *first_sess, *sess;
const char *name;
- unsigned int *range = 0, range_size = 0;
- unsigned int *sids = 0, sids_size = 0;
+ unsigned int *range = NULL, range_size = 0;
+ unsigned int *sids = NULL, sids_size = 0;
int err = 0, ret = 0, i;
VERIFY(err, 0 != (domains_child_node = of_get_child_by_name(
@@ -3034,17 +3045,17 @@ static void fastrpc_deinit(void)
if (chan->chan) {
kref_put_mutex(&chan->kref,
fastrpc_channel_close, &me->smd_mutex);
- chan->chan = 0;
+ chan->chan = NULL;
}
for (j = 0; j < NUM_SESSIONS; j++) {
struct fastrpc_session_ctx *sess = &chan->session[j];
if (sess->smmu.dev) {
arm_iommu_detach_device(sess->smmu.dev);
- sess->smmu.dev = 0;
+ sess->smmu.dev = NULL;
}
if (sess->smmu.mapping) {
arm_iommu_release_mapping(sess->smmu.mapping);
- sess->smmu.mapping = 0;
+ sess->smmu.mapping = NULL;
}
}
}
@@ -3062,7 +3073,7 @@ static struct platform_driver fastrpc_driver = {
static int __init fastrpc_device_init(void)
{
struct fastrpc_apps *me = &gfa;
- struct device *dev = 0;
+ struct device *dev = NULL;
int err = 0, i;
memset(me, 0, sizeof(*me));
@@ -3099,7 +3110,7 @@ static int __init fastrpc_device_init(void)
me->channel[i].prevssrcount = 0;
me->channel[i].issubsystemup = 1;
me->channel[i].ramdumpenabled = 0;
- me->channel[i].remoteheap_ramdump_dev = 0;
+ me->channel[i].remoteheap_ramdump_dev = NULL;
me->channel[i].nb.notifier_call = fastrpc_restart_notifier_cb;
me->channel[i].handle = subsys_notif_register_notifier(
gcinfo[i].subsys,
diff --git a/drivers/char/adsprpc_compat.c b/drivers/char/adsprpc_compat.c
index fcd6d1142618..fc6450336061 100644
--- a/drivers/char/adsprpc_compat.c
+++ b/drivers/char/adsprpc_compat.c
@@ -39,7 +39,7 @@
struct compat_remote_buf {
compat_uptr_t pv; /* buffer pointer */
- compat_ssize_t len; /* length of buffer */
+ compat_size_t len; /* length of buffer */
};
union compat_remote_arg {
@@ -68,13 +68,13 @@ struct compat_fastrpc_ioctl_mmap {
compat_int_t fd; /* ion fd */
compat_uint_t flags; /* flags for dsp to map with */
compat_uptr_t vaddrin; /* optional virtual address */
- compat_ssize_t size; /* size */
+ compat_size_t size; /* size */
compat_uptr_t vaddrout; /* dsps virtual address */
};
struct compat_fastrpc_ioctl_munmap {
compat_uptr_t vaddrout; /* address to unmap */
- compat_ssize_t size; /* size */
+ compat_size_t size; /* size */
};
struct compat_fastrpc_ioctl_init {
@@ -105,7 +105,7 @@ static int compat_get_fastrpc_ioctl_invoke(
unsigned int cmd)
{
compat_uint_t u, sc;
- compat_ssize_t s;
+ compat_size_t s;
compat_uptr_t p;
struct fastrpc_ioctl_invoke_attrs *inv;
union compat_remote_arg *pra32;
@@ -193,7 +193,7 @@ static int compat_get_fastrpc_ioctl_mmap(
{
compat_uint_t u;
compat_int_t i;
- compat_ssize_t s;
+ compat_size_t s;
compat_uptr_t p;
int err;
@@ -227,7 +227,7 @@ static int compat_get_fastrpc_ioctl_munmap(
struct fastrpc_ioctl_munmap __user *unmap)
{
compat_uptr_t p;
- compat_ssize_t s;
+ compat_size_t s;
int err;
err = get_user(p, &unmap32->vaddrout);
diff --git a/drivers/char/adsprpc_shared.h b/drivers/char/adsprpc_shared.h
index 2a66b11bf179..be8d1a536d6c 100644
--- a/drivers/char/adsprpc_shared.h
+++ b/drivers/char/adsprpc_shared.h
@@ -113,7 +113,7 @@ do {\
struct remote_buf64 {
uint64_t pv;
- int64_t len;
+ uint64_t len;
};
union remote_arg64 {
@@ -125,7 +125,7 @@ union remote_arg64 {
struct remote_buf {
void *pv; /* buffer pointer */
- ssize_t len; /* length of buffer */
+ size_t len; /* length of buffer */
};
union remote_arg {
@@ -152,37 +152,37 @@ struct fastrpc_ioctl_invoke_attrs {
struct fastrpc_ioctl_init {
uint32_t flags; /* one of FASTRPC_INIT_* macros */
- uintptr_t __user file; /* pointer to elf file */
- int32_t filelen; /* elf file length */
+ uintptr_t file; /* pointer to elf file */
+ uint32_t filelen; /* elf file length */
int32_t filefd; /* ION fd for the file */
- uintptr_t __user mem; /* mem for the PD */
- int32_t memlen; /* mem length */
+ uintptr_t mem; /* mem for the PD */
+ uint32_t memlen; /* mem length */
int32_t memfd; /* ION fd for the mem */
};
struct fastrpc_ioctl_init_attrs {
struct fastrpc_ioctl_init init;
int attrs;
- int siglen;
+ unsigned int siglen;
};
struct fastrpc_ioctl_munmap {
uintptr_t vaddrout; /* address to unmap */
- ssize_t size; /* size */
+ size_t size; /* size */
};
struct fastrpc_ioctl_mmap {
int fd; /* ion fd */
uint32_t flags; /* flags for dsp to map with */
- uintptr_t __user *vaddrin; /* optional virtual address */
- ssize_t size; /* size */
+ uintptr_t vaddrin; /* optional virtual address */
+ size_t size; /* size */
uintptr_t vaddrout; /* dsps virtual address */
};
struct fastrpc_ioctl_perf { /* kernel performance data */
- uintptr_t __user data;
+ uintptr_t data;
uint32_t numkeys;
- uintptr_t __user keys;
+ uintptr_t keys;
};
struct smq_null_invoke {
@@ -220,14 +220,15 @@ struct smq_invoke_rsp {
static inline struct smq_invoke_buf *smq_invoke_buf_start(remote_arg64_t *pra,
uint32_t sc)
{
- int len = REMOTE_SCALARS_LENGTH(sc);
+ unsigned int len = REMOTE_SCALARS_LENGTH(sc);
+
return (struct smq_invoke_buf *)(&pra[len]);
}
static inline struct smq_phy_page *smq_phy_page_start(uint32_t sc,
struct smq_invoke_buf *buf)
{
- int nTotal = REMOTE_SCALARS_INBUFS(sc) + REMOTE_SCALARS_OUTBUFS(sc);
+ uint64_t nTotal = REMOTE_SCALARS_INBUFS(sc)+REMOTE_SCALARS_OUTBUFS(sc);
return (struct smq_phy_page *)(&buf[nTotal]);
}
diff --git a/drivers/char/diag/diagfwd_peripheral.c b/drivers/char/diag/diagfwd_peripheral.c
index b7dff47623de..7e428ce972a8 100644
--- a/drivers/char/diag/diagfwd_peripheral.c
+++ b/drivers/char/diag/diagfwd_peripheral.c
@@ -911,6 +911,7 @@ void diagfwd_peripheral_exit(void)
uint8_t peripheral;
uint8_t type;
struct diagfwd_info *fwd_info = NULL;
+ int transport = 0;
diag_smd_exit();
diag_socket_exit();
@@ -933,7 +934,10 @@ void diagfwd_peripheral_exit(void)
driver->diagfwd_dci_cmd[peripheral] = NULL;
}
- kfree(early_init_info);
+ for (transport = 0; transport < NUM_TRANSPORT; transport++) {
+ kfree(early_init_info[transport]);
+ early_init_info[transport] = NULL;
+ }
}
int diagfwd_cntl_register(uint8_t transport, uint8_t peripheral, void *ctxt,
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 25372dc381d4..5cb5e8ff0224 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -4029,7 +4029,8 @@ smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg,
}
static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
- struct list_head *timeouts, long timeout_period,
+ struct list_head *timeouts,
+ unsigned long timeout_period,
int slot, unsigned long *flags,
unsigned int *waiting_msgs)
{
@@ -4042,8 +4043,8 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
if (!ent->inuse)
return;
- ent->timeout -= timeout_period;
- if (ent->timeout > 0) {
+ if (timeout_period < ent->timeout) {
+ ent->timeout -= timeout_period;
(*waiting_msgs)++;
return;
}
@@ -4109,7 +4110,8 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
}
}
-static unsigned int ipmi_timeout_handler(ipmi_smi_t intf, long timeout_period)
+static unsigned int ipmi_timeout_handler(ipmi_smi_t intf,
+ unsigned long timeout_period)
{
struct list_head timeouts;
struct ipmi_recv_msg *msg, *msg2;
diff --git a/drivers/clk/msm/Kconfig b/drivers/clk/msm/Kconfig
index 3829f6aec124..a7501f5d446f 100644
--- a/drivers/clk/msm/Kconfig
+++ b/drivers/clk/msm/Kconfig
@@ -16,4 +16,15 @@ config MSM_CLK_CONTROLLER_V2
Generate clock data structures from definitions found in
device tree.
+config MSM_VIRTCLK_FRONTEND
+ bool
+
+config MSM_VIRTCLK_FRONTEND_8996
+ tristate "QTI msm8996 virtual clock frontend driver"
+ depends on COMMON_CLK_MSM && MSM_HAB
+ select MSM_VIRTCLK_FRONTEND
+ ---help---
+ This is the virtual clock frontend driver for the QTI msm8996
+ virtual platform.
+
source "drivers/clk/msm/mdss/Kconfig"
diff --git a/drivers/clk/msm/Makefile b/drivers/clk/msm/Makefile
index 27e07eb12205..5f50890704da 100644
--- a/drivers/clk/msm/Makefile
+++ b/drivers/clk/msm/Makefile
@@ -29,3 +29,6 @@ endif
obj-$(CONFIG_COMMON_CLK_MSM) += gdsc.o
obj-$(CONFIG_COMMON_CLK_MSM) += mdss/
+
+obj-$(CONFIG_MSM_VIRTCLK_FRONTEND) += virtclk-front.o
+obj-$(CONFIG_MSM_VIRTCLK_FRONTEND_8996) += virtclk-front-8996.o
diff --git a/drivers/clk/msm/virtclk-front-8996.c b/drivers/clk/msm/virtclk-front-8996.c
new file mode 100644
index 000000000000..2e978cd3a456
--- /dev/null
+++ b/drivers/clk/msm/virtclk-front-8996.c
@@ -0,0 +1,551 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk/msm-clock-generic.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <dt-bindings/clock/msm-clocks-8996.h>
+
+static struct virtclk_front gcc_blsp1_ahb_clk = {
+ .c = {
+ .dbg_name = "gcc_blsp1_ahb_clk",
+ .ops = &virtclk_front_ops,
+ CLK_INIT(gcc_blsp1_ahb_clk.c),
+ },
+};
+
+static struct virtclk_front gcc_blsp1_qup1_spi_apps_clk = {
+ .c = {
+ .dbg_name = "gcc_blsp1_qup1_spi_apps_clk",
+ .ops = &virtclk_front_ops,
+ CLK_INIT(gcc_blsp1_qup1_spi_apps_clk.c),
+ },
+};
+
+static struct virtclk_front gcc_blsp1_qup1_i2c_apps_clk = {
+ .c = {
+ .dbg_name = "gcc_blsp1_qup1_i2c_apps_clk",
+ .ops = &virtclk_front_ops,
+ CLK_INIT(gcc_blsp1_qup1_i2c_apps_clk.c),
+ },
+};
+
+static struct virtclk_front gcc_blsp1_uart1_apps_clk = {
+ .c = {
+ .dbg_name = "gcc_blsp1_uart1_apps_clk",
+ .ops = &virtclk_front_ops,
+ CLK_INIT(gcc_blsp1_uart1_apps_clk.c),
+ },
+};
+
+static struct virtclk_front gcc_blsp1_qup2_spi_apps_clk = {
+ .c = {
+ .dbg_name = "gcc_blsp1_qup2_spi_apps_clk",
+ .ops = &virtclk_front_ops,
+ CLK_INIT(gcc_blsp1_qup2_spi_apps_clk.c),
+ },
+};
+
+static struct virtclk_front gcc_blsp1_qup2_i2c_apps_clk = {
+ .c = {
+ .dbg_name = "gcc_blsp1_qup2_i2c_apps_clk",
+ .ops = &virtclk_front_ops,
+ CLK_INIT(gcc_blsp1_qup2_i2c_apps_clk.c),
+ },
+};
+
+static struct virtclk_front gcc_blsp1_uart2_apps_clk = {
+ .c = {
+ .dbg_name = "gcc_blsp1_uart2_apps_clk",
+ .ops = &virtclk_front_ops,
+ CLK_INIT(gcc_blsp1_uart2_apps_clk.c),
+ },
+};
+
+static struct virtclk_front gcc_blsp1_qup3_spi_apps_clk = {
+ .c = {
+ .dbg_name = "gcc_blsp1_qup3_spi_apps_clk",
+ .ops = &virtclk_front_ops,
+ CLK_INIT(gcc_blsp1_qup3_spi_apps_clk.c),
+ },
+};
+
+static struct virtclk_front gcc_blsp1_qup3_i2c_apps_clk = {
+ .c = {
+ .dbg_name = "gcc_blsp1_qup3_i2c_apps_clk",
+ .ops = &virtclk_front_ops,
+ CLK_INIT(gcc_blsp1_qup3_i2c_apps_clk.c),
+ },
+};
+
+static struct virtclk_front gcc_blsp1_uart3_apps_clk = {
+ .c = {
+ .dbg_name = "gcc_blsp1_uart3_apps_clk",
+ .ops = &virtclk_front_ops,
+ CLK_INIT(gcc_blsp1_uart3_apps_clk.c),
+ },
+};
+
+static struct virtclk_front gcc_blsp1_qup4_spi_apps_clk = {
+ .c = {
+ .dbg_name = "gcc_blsp1_qup4_spi_apps_clk",
+ .ops = &virtclk_front_ops,
+ CLK_INIT(gcc_blsp1_qup4_spi_apps_clk.c),
+ },
+};
+
+static struct virtclk_front gcc_blsp1_qup4_i2c_apps_clk = {
+ .c = {
+ .dbg_name = "gcc_blsp1_qup4_i2c_apps_clk",
+ .ops = &virtclk_front_ops,
+ CLK_INIT(gcc_blsp1_qup4_i2c_apps_clk.c),
+ },
+};
+
+static struct virtclk_front gcc_blsp1_uart4_apps_clk = {
+ .c = {
+ .dbg_name = "gcc_blsp1_uart4_apps_clk",
+ .ops = &virtclk_front_ops,
+ CLK_INIT(gcc_blsp1_uart4_apps_clk.c),
+ },
+};
+
+static struct virtclk_front gcc_blsp1_qup5_spi_apps_clk = {
+ .c = {
+ .dbg_name = "gcc_blsp1_qup5_spi_apps_clk",
+ .ops = &virtclk_front_ops,
+ CLK_INIT(gcc_blsp1_qup5_spi_apps_clk.c),
+ },
+};
+
+static struct virtclk_front gcc_blsp1_qup5_i2c_apps_clk = {
+ .c = {
+ .dbg_name = "gcc_blsp1_qup5_i2c_apps_clk",
+ .ops = &virtclk_front_ops,
+ CLK_INIT(gcc_blsp1_qup5_i2c_apps_clk.c),
+ },
+};
+
+static struct virtclk_front gcc_blsp1_uart5_apps_clk = {
+ .c = {
+ .dbg_name = "gcc_blsp1_uart5_apps_clk",
+ .ops = &virtclk_front_ops,
+ CLK_INIT(gcc_blsp1_uart5_apps_clk.c),
+ },
+};
+
+static struct virtclk_front gcc_blsp1_qup6_spi_apps_clk = {
+ .c = {
+ .dbg_name = "gcc_blsp1_qup6_spi_apps_clk",
+ .ops = &virtclk_front_ops,
+ CLK_INIT(gcc_blsp1_qup6_spi_apps_clk.c),
+ },
+};
+
+static struct virtclk_front gcc_blsp1_qup6_i2c_apps_clk = {
+ .c = {
+ .dbg_name = "gcc_blsp1_qup6_i2c_apps_clk",
+ .ops = &virtclk_front_ops,
+ CLK_INIT(gcc_blsp1_qup6_i2c_apps_clk.c),
+ },
+};
+
+static struct virtclk_front gcc_blsp1_uart6_apps_clk = {
+ .c = {
+ .dbg_name = "gcc_blsp1_uart6_apps_clk",
+ .ops = &virtclk_front_ops,
+ CLK_INIT(gcc_blsp1_uart6_apps_clk.c),
+ },
+};
+
+
+static struct virtclk_front gcc_blsp2_ahb_clk = {
+ .c = {
+ .dbg_name = "gcc_blsp2_ahb_clk",
+ .ops = &virtclk_front_ops,
+ CLK_INIT(gcc_blsp2_ahb_clk.c),
+ },
+};
+
+static struct virtclk_front gcc_blsp2_qup1_spi_apps_clk = {
+ .c = {
+ .dbg_name = "gcc_blsp2_qup1_spi_apps_clk",
+ .ops = &virtclk_front_ops,
+ CLK_INIT(gcc_blsp2_qup1_spi_apps_clk.c),
+ },
+};
+
+static struct virtclk_front gcc_blsp2_qup1_i2c_apps_clk = {
+ .c = {
+ .dbg_name = "gcc_blsp2_qup1_i2c_apps_clk",
+ .ops = &virtclk_front_ops,
+ CLK_INIT(gcc_blsp2_qup1_i2c_apps_clk.c),
+ },
+};
+
+static struct virtclk_front gcc_blsp2_uart1_apps_clk = {
+ .c = {
+ .dbg_name = "gcc_blsp2_uart1_apps_clk",
+ .ops = &virtclk_front_ops,
+ CLK_INIT(gcc_blsp2_uart1_apps_clk.c),
+ },
+};
+
+static struct virtclk_front gcc_blsp2_qup2_spi_apps_clk = {
+ .c = {
+ .dbg_name = "gcc_blsp2_qup2_spi_apps_clk",
+ .ops = &virtclk_front_ops,
+ CLK_INIT(gcc_blsp2_qup2_spi_apps_clk.c),
+ },
+};
+
+static struct virtclk_front gcc_blsp2_qup2_i2c_apps_clk = {
+ .c = {
+ .dbg_name = "gcc_blsp2_qup2_i2c_apps_clk",
+ .ops = &virtclk_front_ops,
+ CLK_INIT(gcc_blsp2_qup2_i2c_apps_clk.c),
+ },
+};
+
+static struct virtclk_front gcc_blsp2_uart2_apps_clk = {
+ .c = {
+ .dbg_name = "gcc_blsp2_uart2_apps_clk",
+ .ops = &virtclk_front_ops,
+ CLK_INIT(gcc_blsp2_uart2_apps_clk.c),
+ },
+};
+
+static struct virtclk_front gcc_blsp2_qup3_spi_apps_clk = {
+ .c = {
+ .dbg_name = "gcc_blsp2_qup3_spi_apps_clk",
+ .ops = &virtclk_front_ops,
+ CLK_INIT(gcc_blsp2_qup3_spi_apps_clk.c),
+ },
+};
+
+static struct virtclk_front gcc_blsp2_qup3_i2c_apps_clk = {
+ .c = {
+ .dbg_name = "gcc_blsp2_qup3_i2c_apps_clk",
+ .ops = &virtclk_front_ops,
+ CLK_INIT(gcc_blsp2_qup3_i2c_apps_clk.c),
+ },
+};
+
+static struct virtclk_front gcc_blsp2_uart3_apps_clk = {
+ .c = {
+ .dbg_name = "gcc_blsp2_uart3_apps_clk",
+ .ops = &virtclk_front_ops,
+ CLK_INIT(gcc_blsp2_uart3_apps_clk.c),
+ },
+};
+
+static struct virtclk_front gcc_blsp2_qup4_spi_apps_clk = {
+ .c = {
+ .dbg_name = "gcc_blsp2_qup4_spi_apps_clk",
+ .ops = &virtclk_front_ops,
+ CLK_INIT(gcc_blsp2_qup4_spi_apps_clk.c),
+ },
+};
+
+static struct virtclk_front gcc_blsp2_qup4_i2c_apps_clk = {
+ .c = {
+ .dbg_name = "gcc_blsp2_qup4_i2c_apps_clk",
+ .ops = &virtclk_front_ops,
+ CLK_INIT(gcc_blsp2_qup4_i2c_apps_clk.c),
+ },
+};
+
+static struct virtclk_front gcc_blsp2_uart4_apps_clk = {
+ .c = {
+ .dbg_name = "gcc_blsp2_uart4_apps_clk",
+ .ops = &virtclk_front_ops,
+ CLK_INIT(gcc_blsp2_uart4_apps_clk.c),
+ },
+};
+
+static struct virtclk_front gcc_blsp2_qup5_spi_apps_clk = {
+ .c = {
+ .dbg_name = "gcc_blsp2_qup5_spi_apps_clk",
+ .ops = &virtclk_front_ops,
+ CLK_INIT(gcc_blsp2_qup5_spi_apps_clk.c),
+ },
+};
+
+static struct virtclk_front gcc_blsp2_qup5_i2c_apps_clk = {
+ .c = {
+ .dbg_name = "gcc_blsp2_qup5_i2c_apps_clk",
+ .ops = &virtclk_front_ops,
+ CLK_INIT(gcc_blsp2_qup5_i2c_apps_clk.c),
+ },
+};
+
+static struct virtclk_front gcc_blsp2_uart5_apps_clk = {
+ .c = {
+ .dbg_name = "gcc_blsp2_uart5_apps_clk",
+ .ops = &virtclk_front_ops,
+ CLK_INIT(gcc_blsp2_uart5_apps_clk.c),
+ },
+};
+
+static struct virtclk_front gcc_blsp2_qup6_spi_apps_clk = {
+ .c = {
+ .dbg_name = "gcc_blsp2_qup6_spi_apps_clk",
+ .ops = &virtclk_front_ops,
+ CLK_INIT(gcc_blsp2_qup6_spi_apps_clk.c),
+ },
+};
+
+static struct virtclk_front gcc_blsp2_qup6_i2c_apps_clk = {
+ .c = {
+ .dbg_name = "gcc_blsp2_qup6_i2c_apps_clk",
+ .ops = &virtclk_front_ops,
+ CLK_INIT(gcc_blsp2_qup6_i2c_apps_clk.c),
+ },
+};
+
+static struct virtclk_front gcc_blsp2_uart6_apps_clk = {
+ .c = {
+ .dbg_name = "gcc_blsp2_uart6_apps_clk",
+ .ops = &virtclk_front_ops,
+ CLK_INIT(gcc_blsp2_uart6_apps_clk.c),
+ },
+};
+
+static struct virtclk_front gcc_sdcc2_ahb_clk = {
+ .c = {
+ .dbg_name = "gcc_sdcc2_ahb_clk",
+ .ops = &virtclk_front_ops,
+ CLK_INIT(gcc_sdcc2_ahb_clk.c),
+ },
+};
+
+static struct virtclk_front gcc_sdcc2_apps_clk = {
+ .c = {
+ .dbg_name = "gcc_sdcc2_apps_clk",
+ .ops = &virtclk_front_ops,
+ CLK_INIT(gcc_sdcc2_apps_clk.c),
+ },
+};
+
+static struct virtclk_front gcc_usb3_phy_pipe_clk = {
+ .c = {
+ .dbg_name = "gcc_usb3_phy_pipe_clk",
+ .ops = &virtclk_front_ops,
+ CLK_INIT(gcc_usb3_phy_pipe_clk.c),
+ },
+};
+
+static struct virtclk_front gcc_usb3_phy_aux_clk = {
+ .c = {
+ .dbg_name = "gcc_usb3_phy_aux_clk",
+ .ops = &virtclk_front_ops,
+ CLK_INIT(gcc_usb3_phy_aux_clk.c),
+ },
+};
+
+static struct virtclk_front gcc_usb30_mock_utmi_clk = {
+ .c = {
+ .dbg_name = "gcc_usb30_mock_utmi_clk",
+ .ops = &virtclk_front_ops,
+ CLK_INIT(gcc_usb30_mock_utmi_clk.c),
+ },
+};
+
+static struct virtclk_front gcc_aggre2_usb3_axi_clk = {
+ .c = {
+ .dbg_name = "gcc_aggre2_usb3_axi_clk",
+ .ops = &virtclk_front_ops,
+ CLK_INIT(gcc_aggre2_usb3_axi_clk.c),
+ },
+};
+
+static struct virtclk_front gcc_sys_noc_usb3_axi_clk = {
+ .c = {
+ .dbg_name = "gcc_sys_noc_usb3_axi_clk",
+ .ops = &virtclk_front_ops,
+ CLK_INIT(gcc_sys_noc_usb3_axi_clk.c),
+ },
+};
+
+static struct virtclk_front gcc_usb30_master_clk = {
+ .c = {
+ .dbg_name = "gcc_usb30_master_clk",
+ .ops = &virtclk_front_ops,
+ CLK_INIT(gcc_usb30_master_clk.c),
+ },
+};
+
+static struct virtclk_front gcc_usb30_sleep_clk = {
+ .c = {
+ .dbg_name = "gcc_usb30_sleep_clk",
+ .ops = &virtclk_front_ops,
+ CLK_INIT(gcc_usb30_sleep_clk.c),
+ },
+};
+
+static struct virtclk_front gcc_usb_phy_cfg_ahb2phy_clk = {
+ .c = {
+ .dbg_name = "gcc_usb_phy_cfg_ahb2phy_clk",
+ .ops = &virtclk_front_ops,
+ CLK_INIT(gcc_usb_phy_cfg_ahb2phy_clk.c),
+ },
+};
+
+static struct virtclk_front gcc_usb3_clkref_clk = {
+ .c = {
+ .dbg_name = "gcc_usb3_clkref_clk",
+ .ops = &virtclk_front_ops,
+ CLK_INIT(gcc_usb3_clkref_clk.c),
+ },
+};
+
+static struct virtclk_front hlos1_vote_lpass_adsp_smmu_clk = {
+ .c = {
+ .dbg_name = "gcc_lpass_adsp_smmu_clk",
+ .ops = &virtclk_front_ops,
+ CLK_INIT(hlos1_vote_lpass_adsp_smmu_clk.c),
+ },
+};
+
+static struct virtclk_front gcc_mss_cfg_ahb_clk = {
+ .c = {
+ .dbg_name = "gcc_mss_cfg_ahb_clk",
+ .ops = &virtclk_front_ops,
+ CLK_INIT(gcc_mss_cfg_ahb_clk.c),
+ },
+};
+
+static struct virtclk_front gcc_mss_q6_bimc_axi_clk = {
+ .c = {
+ .dbg_name = "gcc_mss_q6_bimc_axi_clk",
+ .ops = &virtclk_front_ops,
+ CLK_INIT(gcc_mss_q6_bimc_axi_clk.c),
+ },
+};
+
+static struct virtclk_front gcc_boot_rom_ahb_clk = {
+ .c = {
+ .dbg_name = "gcc_boot_rom_ahb_clk",
+ .ops = &virtclk_front_ops,
+ CLK_INIT(gcc_boot_rom_ahb_clk.c),
+ },
+};
+
+static struct virtclk_front gpll0_out_msscc = {
+ .c = {
+ .dbg_name = "gcc_mss_gpll0_clk",
+ .ops = &virtclk_front_ops,
+ CLK_INIT(gpll0_out_msscc.c),
+ },
+};
+
+static struct virtclk_front gcc_mss_snoc_axi_clk = {
+ .c = {
+ .dbg_name = "gcc_mss_snoc_axi_clk",
+ .ops = &virtclk_front_ops,
+ CLK_INIT(gcc_mss_snoc_axi_clk.c),
+ },
+};
+
+static struct virtclk_front gcc_mss_mnoc_bimc_axi_clk = {
+ .c = {
+ .dbg_name = "gcc_mss_mnoc_bimc_axi_clk",
+ .ops = &virtclk_front_ops,
+ CLK_INIT(gcc_mss_mnoc_bimc_axi_clk.c),
+ },
+};
+
+static struct clk_lookup msm_clocks_8996[] = {
+ CLK_LIST(gcc_blsp1_ahb_clk),
+ CLK_LIST(gcc_blsp1_qup1_spi_apps_clk),
+ CLK_LIST(gcc_blsp1_qup1_i2c_apps_clk),
+ CLK_LIST(gcc_blsp1_uart1_apps_clk),
+ CLK_LIST(gcc_blsp1_qup2_spi_apps_clk),
+ CLK_LIST(gcc_blsp1_qup2_i2c_apps_clk),
+ CLK_LIST(gcc_blsp1_uart2_apps_clk),
+ CLK_LIST(gcc_blsp1_qup3_spi_apps_clk),
+ CLK_LIST(gcc_blsp1_qup3_i2c_apps_clk),
+ CLK_LIST(gcc_blsp1_uart3_apps_clk),
+ CLK_LIST(gcc_blsp1_qup4_spi_apps_clk),
+ CLK_LIST(gcc_blsp1_qup4_i2c_apps_clk),
+ CLK_LIST(gcc_blsp1_uart4_apps_clk),
+ CLK_LIST(gcc_blsp1_qup5_spi_apps_clk),
+ CLK_LIST(gcc_blsp1_qup5_i2c_apps_clk),
+ CLK_LIST(gcc_blsp1_uart5_apps_clk),
+ CLK_LIST(gcc_blsp1_qup6_spi_apps_clk),
+ CLK_LIST(gcc_blsp1_qup6_i2c_apps_clk),
+ CLK_LIST(gcc_blsp1_uart6_apps_clk),
+ CLK_LIST(gcc_blsp2_ahb_clk),
+ CLK_LIST(gcc_blsp2_qup1_spi_apps_clk),
+ CLK_LIST(gcc_blsp2_qup1_i2c_apps_clk),
+ CLK_LIST(gcc_blsp2_uart1_apps_clk),
+ CLK_LIST(gcc_blsp2_qup2_spi_apps_clk),
+ CLK_LIST(gcc_blsp2_qup2_i2c_apps_clk),
+ CLK_LIST(gcc_blsp2_uart2_apps_clk),
+ CLK_LIST(gcc_blsp2_qup3_spi_apps_clk),
+ CLK_LIST(gcc_blsp2_qup3_i2c_apps_clk),
+ CLK_LIST(gcc_blsp2_uart3_apps_clk),
+ CLK_LIST(gcc_blsp2_qup4_spi_apps_clk),
+ CLK_LIST(gcc_blsp2_qup4_i2c_apps_clk),
+ CLK_LIST(gcc_blsp2_uart4_apps_clk),
+ CLK_LIST(gcc_blsp2_qup5_spi_apps_clk),
+ CLK_LIST(gcc_blsp2_qup5_i2c_apps_clk),
+ CLK_LIST(gcc_blsp2_uart5_apps_clk),
+ CLK_LIST(gcc_blsp2_qup6_spi_apps_clk),
+ CLK_LIST(gcc_blsp2_qup6_i2c_apps_clk),
+ CLK_LIST(gcc_blsp2_uart6_apps_clk),
+ CLK_LIST(gcc_sdcc2_ahb_clk),
+ CLK_LIST(gcc_sdcc2_apps_clk),
+ CLK_LIST(gcc_usb3_phy_pipe_clk),
+ CLK_LIST(gcc_usb3_phy_aux_clk),
+ CLK_LIST(gcc_usb30_mock_utmi_clk),
+ CLK_LIST(gcc_aggre2_usb3_axi_clk),
+ CLK_LIST(gcc_sys_noc_usb3_axi_clk),
+ CLK_LIST(gcc_usb30_master_clk),
+ CLK_LIST(gcc_usb30_sleep_clk),
+ CLK_LIST(gcc_usb_phy_cfg_ahb2phy_clk),
+ CLK_LIST(gcc_usb3_clkref_clk),
+ CLK_LIST(hlos1_vote_lpass_adsp_smmu_clk),
+ CLK_LIST(gcc_mss_cfg_ahb_clk),
+ CLK_LIST(gcc_mss_q6_bimc_axi_clk),
+ CLK_LIST(gcc_boot_rom_ahb_clk),
+ CLK_LIST(gpll0_out_msscc),
+ CLK_LIST(gcc_mss_snoc_axi_clk),
+ CLK_LIST(gcc_mss_mnoc_bimc_axi_clk),
+};
+
+static const struct of_device_id msm8996_virtclk_front_match_table[] = {
+ { .compatible = "qcom,virtclk-frontend-8996" },
+ {}
+};
+
+static int msm8996_virtclk_front_probe(struct platform_device *pdev)
+{
+ return msm_virtclk_front_probe(pdev, msm_clocks_8996,
+ ARRAY_SIZE(msm_clocks_8996));
+}
+
+static struct platform_driver msm8996_virtclk_front_driver = {
+ .probe = msm8996_virtclk_front_probe,
+ .driver = {
+ .name = "virtclk-front-8996",
+ .of_match_table = msm8996_virtclk_front_match_table,
+ .owner = THIS_MODULE,
+ },
+};
+
+int __init msm8996_virtclk_front_init(void)
+{
+ return platform_driver_register(&msm8996_virtclk_front_driver);
+}
+arch_initcall(msm8996_virtclk_front_init);
diff --git a/drivers/clk/msm/virtclk-front.c b/drivers/clk/msm/virtclk-front.c
new file mode 100644
index 000000000000..08c7e5aaa7f4
--- /dev/null
+++ b/drivers/clk/msm/virtclk-front.c
@@ -0,0 +1,460 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+#include <linux/clk/msm-clock-generic.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/habmm.h>
+#include <soc/qcom/msm-clock-controller.h>
+
+struct virtclk_front_data {
+ int handle;
+ struct rt_mutex lock;
+};
+
+enum virtclk_cmd {
+ CLK_MSG_GETID = 1,
+ CLK_MSG_ENABLE,
+ CLK_MSG_DISABLE,
+ CLK_MSG_RESET,
+ CLK_MSG_SETFREQ,
+ CLK_MSG_GETFREQ,
+ CLK_MSG_MAX
+};
+
+struct clk_msg_header {
+ u32 cmd;
+ u32 len;
+ u32 clk_id;
+} __packed;
+
+struct clk_msg_rsp {
+ struct clk_msg_header header;
+ u32 rsp;
+} __packed;
+
+struct clk_msg_setfreq {
+ struct clk_msg_header header;
+ u32 freq;
+} __packed;
+
+struct clk_msg_getid {
+ struct clk_msg_header header;
+ char name[32];
+} __packed;
+
+struct clk_msg_getfreq {
+ struct clk_msg_rsp rsp;
+ u32 freq;
+} __packed;
+
+static struct virtclk_front_data virtclk_front_ctx;
+
+static inline struct virtclk_front *to_virtclk_front(struct clk *clk)
+{
+ return container_of(clk, struct virtclk_front, c);
+}
+
+static int virtclk_front_init_iface(void)
+{
+ int ret = 0;
+ int handle;
+
+ rt_mutex_lock(&virtclk_front_ctx.lock);
+
+ if (virtclk_front_ctx.handle)
+ goto out;
+
+ ret = habmm_socket_open(&handle, MM_CLK_VM1, 0, 0);
+ if (ret) {
+ pr_err("open habmm socket failed (%d)\n", ret);
+ goto out;
+ }
+
+ virtclk_front_ctx.handle = handle;
+
+out:
+ rt_mutex_unlock(&virtclk_front_ctx.lock);
+ return ret;
+}
+
+static int virtclk_front_get_id(struct clk *clk)
+{
+ struct virtclk_front *v = to_virtclk_front(clk);
+ struct clk_msg_getid msg;
+ struct clk_msg_rsp rsp;
+ u32 rsp_size = sizeof(rsp);
+ int handle;
+ int ret = 0;
+
+ if (v->id)
+ return ret;
+
+ msg.header.cmd = CLK_MSG_GETID;
+ msg.header.len = sizeof(msg);
+ strlcpy(msg.name, clk->dbg_name, sizeof(msg.name));
+
+ rt_mutex_lock(&virtclk_front_ctx.lock);
+
+ handle = virtclk_front_ctx.handle;
+ ret = habmm_socket_send(handle, &msg, sizeof(msg), 0);
+ if (ret) {
+ pr_err("%s: habmm socket send failed (%d)\n", clk->dbg_name,
+ ret);
+ goto err_out;
+ }
+
+ ret = habmm_socket_recv(handle, &rsp, &rsp_size,
+ UINT_MAX, 0);
+ if (ret) {
+ pr_err("%s: habmm socket receive failed (%d)\n", clk->dbg_name,
+ ret);
+ goto err_out;
+ }
+
+ if (rsp.rsp) {
+ pr_err("%s: error response (%d)\n", clk->dbg_name, rsp.rsp);
+ ret = -EIO;
+ } else
+ v->id = rsp.header.clk_id;
+
+ rt_mutex_unlock(&virtclk_front_ctx.lock);
+
+ return ret;
+
+err_out:
+ habmm_socket_close(handle);
+ virtclk_front_ctx.handle = 0;
+ rt_mutex_unlock(&virtclk_front_ctx.lock);
+ return ret;
+}
+
+static int virtclk_front_prepare(struct clk *clk)
+{
+ struct virtclk_front *v = to_virtclk_front(clk);
+ struct clk_msg_header msg;
+ struct clk_msg_rsp rsp;
+ u32 rsp_size = sizeof(rsp);
+ int handle;
+ int ret = 0;
+
+ ret = virtclk_front_init_iface();
+ if (ret)
+ return ret;
+
+ ret = virtclk_front_get_id(clk);
+ if (ret)
+ return ret;
+
+ msg.clk_id = v->id;
+ msg.cmd = CLK_MSG_ENABLE;
+ msg.len = sizeof(struct clk_msg_header);
+
+ rt_mutex_lock(&virtclk_front_ctx.lock);
+
+ handle = virtclk_front_ctx.handle;
+ ret = habmm_socket_send(handle, &msg, sizeof(msg), 0);
+ if (ret) {
+ pr_err("%s: habmm socket send failed (%d)\n", clk->dbg_name,
+ ret);
+ goto err_out;
+ }
+
+ ret = habmm_socket_recv(handle, &rsp, &rsp_size, UINT_MAX, 0);
+ if (ret) {
+ pr_err("%s: habmm socket receive failed (%d)\n", clk->dbg_name,
+ ret);
+ goto err_out;
+ }
+
+ if (rsp.rsp) {
+ pr_err("%s: error response (%d)\n", clk->dbg_name, rsp.rsp);
+ ret = -EIO;
+ }
+
+ rt_mutex_unlock(&virtclk_front_ctx.lock);
+ return ret;
+
+err_out:
+ habmm_socket_close(handle);
+ virtclk_front_ctx.handle = 0;
+ rt_mutex_unlock(&virtclk_front_ctx.lock);
+ return ret;
+}
+
+static void virtclk_front_unprepare(struct clk *clk)
+{
+ struct virtclk_front *v = to_virtclk_front(clk);
+ struct clk_msg_header msg;
+ struct clk_msg_rsp rsp;
+ u32 rsp_size = sizeof(rsp);
+ int handle;
+ int ret = 0;
+
+ ret = virtclk_front_init_iface();
+ if (ret)
+ return;
+
+ ret = virtclk_front_get_id(clk);
+ if (ret)
+ return;
+
+ msg.clk_id = v->id;
+ msg.cmd = CLK_MSG_DISABLE;
+ msg.len = sizeof(struct clk_msg_header);
+
+ rt_mutex_lock(&virtclk_front_ctx.lock);
+
+ handle = virtclk_front_ctx.handle;
+ ret = habmm_socket_send(handle, &msg, sizeof(msg), 0);
+ if (ret) {
+ pr_err("%s: habmm socket send failed (%d)\n", clk->dbg_name,
+ ret);
+ goto err_out;
+ }
+
+ ret = habmm_socket_recv(handle, &rsp, &rsp_size, UINT_MAX, 0);
+ if (ret) {
+ pr_err("%s: habmm socket receive failed (%d)\n", clk->dbg_name,
+ ret);
+ goto err_out;
+ }
+
+ if (rsp.rsp)
+ pr_err("%s: error response (%d)\n", clk->dbg_name, rsp.rsp);
+
+ rt_mutex_unlock(&virtclk_front_ctx.lock);
+ return;
+
+err_out:
+ habmm_socket_close(handle);
+ virtclk_front_ctx.handle = 0;
+ rt_mutex_unlock(&virtclk_front_ctx.lock);
+}
+
+static int virtclk_front_reset(struct clk *clk, enum clk_reset_action action)
+{
+ struct virtclk_front *v = to_virtclk_front(clk);
+ struct clk_msg_header msg;
+ struct clk_msg_rsp rsp;
+ u32 rsp_size = sizeof(rsp);
+ int handle;
+ int ret = 0;
+
+ ret = virtclk_front_init_iface();
+ if (ret)
+ return ret;
+
+ ret = virtclk_front_get_id(clk);
+ if (ret)
+ return ret;
+
+ msg.clk_id = v->id;
+ msg.cmd = CLK_MSG_RESET;
+ msg.len = sizeof(struct clk_msg_header);
+
+ rt_mutex_lock(&virtclk_front_ctx.lock);
+
+ handle = virtclk_front_ctx.handle;
+ ret = habmm_socket_send(handle, &msg, sizeof(msg), 0);
+ if (ret) {
+ pr_err("%s: habmm socket send failed (%d)\n", clk->dbg_name,
+ ret);
+ goto err_out;
+ }
+
+ ret = habmm_socket_recv(handle, &rsp, &rsp_size, UINT_MAX, 0);
+ if (ret) {
+ pr_err("%s: habmm socket receive failed (%d)\n", clk->dbg_name,
+ ret);
+ goto err_out;
+ }
+
+ if (rsp.rsp) {
+ pr_err("%s: error response (%d)\n", clk->dbg_name, rsp.rsp);
+ ret = -EIO;
+ }
+
+ rt_mutex_unlock(&virtclk_front_ctx.lock);
+ return ret;
+
+err_out:
+ habmm_socket_close(handle);
+ virtclk_front_ctx.handle = 0;
+ rt_mutex_unlock(&virtclk_front_ctx.lock);
+ return ret;
+}
+
+static int virtclk_front_set_rate(struct clk *clk, unsigned long rate)
+{
+ struct virtclk_front *v = to_virtclk_front(clk);
+ struct clk_msg_setfreq msg;
+ struct clk_msg_rsp rsp;
+ u32 rsp_size = sizeof(rsp);
+ int handle;
+ int ret = 0;
+
+ ret = virtclk_front_init_iface();
+ if (ret)
+ return ret;
+
+ ret = virtclk_front_get_id(clk);
+ if (ret)
+ return ret;
+
+ msg.header.clk_id = v->id;
+ msg.header.cmd = CLK_MSG_SETFREQ;
+ msg.header.len = sizeof(msg);
+ msg.freq = (u32)rate;
+
+ rt_mutex_lock(&virtclk_front_ctx.lock);
+
+ handle = virtclk_front_ctx.handle;
+ ret = habmm_socket_send(handle, &msg, sizeof(msg), 0);
+ if (ret) {
+ pr_err("%s: habmm socket send failed (%d)\n", clk->dbg_name,
+ ret);
+ goto err_out;
+ }
+
+ ret = habmm_socket_recv(handle, &rsp, &rsp_size, UINT_MAX, 0);
+ if (ret) {
+ pr_err("%s: habmm socket receive failed (%d)\n", clk->dbg_name,
+ ret);
+ goto err_out;
+ }
+
+ if (rsp.rsp) {
+ pr_err("%s (%luHz): error response (%d)\n", clk->dbg_name,
+ rate, rsp.rsp);
+ ret = -EIO;
+ }
+
+ rt_mutex_unlock(&virtclk_front_ctx.lock);
+ return ret;
+
+err_out:
+ habmm_socket_close(handle);
+ virtclk_front_ctx.handle = 0;
+ rt_mutex_unlock(&virtclk_front_ctx.lock);
+ return ret;
+}
+
+static int virtclk_front_set_max_rate(struct clk *clk, unsigned long rate)
+{
+ return 0;
+}
+
+static int virtclk_front_is_enabled(struct clk *clk)
+{
+ struct virtclk_front *v = to_virtclk_front(clk);
+
+ return !!v->c.prepare_count;
+}
+
+static int virtclk_front_set_flags(struct clk *clk, unsigned flags)
+{
+ return 0;
+}
+
+static unsigned long virtclk_front_get_rate(struct clk *clk)
+{
+ struct virtclk_front *v = to_virtclk_front(clk);
+ struct clk_msg_header msg;
+ struct clk_msg_getfreq rsp;
+ u32 rsp_size = sizeof(rsp);
+ int handle;
+ int ret = 0;
+
+ ret = virtclk_front_init_iface();
+ if (ret)
+ return 0;
+
+ ret = virtclk_front_get_id(clk);
+ if (ret)
+ return 0;
+
+ msg.clk_id = v->id;
+ msg.cmd = CLK_MSG_GETFREQ;
+ msg.len = sizeof(msg);
+
+ rt_mutex_lock(&virtclk_front_ctx.lock);
+
+ handle = virtclk_front_ctx.handle;
+ ret = habmm_socket_send(handle, &msg, sizeof(msg), 0);
+ if (ret) {
+ ret = 0;
+ pr_err("%s: habmm socket send failed (%d)\n", clk->dbg_name,
+ ret);
+ goto err_out;
+ }
+
+ ret = habmm_socket_recv(handle, &rsp, &rsp_size, UINT_MAX, 0);
+ if (ret) {
+ ret = 0;
+ pr_err("%s: habmm socket receive failed (%d)\n", clk->dbg_name,
+ ret);
+ goto err_out;
+ }
+
+ if (rsp.rsp.rsp) {
+ pr_err("%s: error response (%d)\n", clk->dbg_name, rsp.rsp.rsp);
+ ret = 0;
+ } else
+ ret = rsp.freq;
+
+ rt_mutex_unlock(&virtclk_front_ctx.lock);
+ return ret;
+
+err_out:
+ habmm_socket_close(handle);
+ virtclk_front_ctx.handle = 0;
+ rt_mutex_unlock(&virtclk_front_ctx.lock);
+ return ret;
+}
+
+static long virtclk_front_round_rate(struct clk *clk, unsigned long rate)
+{
+ return rate;
+}
+
+struct clk_ops virtclk_front_ops = {
+ .prepare = virtclk_front_prepare,
+ .unprepare = virtclk_front_unprepare,
+ .reset = virtclk_front_reset,
+ .set_rate = virtclk_front_set_rate,
+ .set_max_rate = virtclk_front_set_max_rate,
+ .is_enabled = virtclk_front_is_enabled,
+ .set_flags = virtclk_front_set_flags,
+ .get_rate = virtclk_front_get_rate,
+ .round_rate = virtclk_front_round_rate,
+};
+
+int msm_virtclk_front_probe(struct platform_device *pdev,
+ struct clk_lookup *table,
+ size_t size)
+{
+ int ret;
+
+ ret = of_msm_clock_register(pdev->dev.of_node, table, size);
+ if (ret)
+ return ret;
+
+ rt_mutex_init(&virtclk_front_ctx.lock);
+
+ dev_info(&pdev->dev, "Registered virtual clock provider.\n");
+
+ return ret;
+}
+EXPORT_SYMBOL(msm_virtclk_front_probe);
diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c
index 2e14dfb588f4..7d060ffe8975 100644
--- a/drivers/clk/ti/clk-dra7-atl.c
+++ b/drivers/clk/ti/clk-dra7-atl.c
@@ -265,7 +265,7 @@ static int of_dra7_atl_clk_probe(struct platform_device *pdev)
/* Get configuration for the ATL instances */
snprintf(prop, sizeof(prop), "atl%u", i);
- cfg_node = of_find_node_by_name(node, prop);
+ cfg_node = of_get_child_by_name(node, prop);
if (cfg_node) {
ret = of_property_read_u32(cfg_node, "bws",
&cdesc->bws);
@@ -278,6 +278,7 @@ static int of_dra7_atl_clk_probe(struct platform_device *pdev)
atl_write(cinfo, DRA7_ATL_AWSMUX_REG(i),
cdesc->aws);
}
+ of_node_put(cfg_node);
}
cdesc->probed = true;
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 1dfd1765319b..a488125e2d6b 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -235,19 +235,6 @@ config CPU_BOOST
If in doubt, say N.
-config CPU_FREQ_GOV_SCHED
- bool "'sched' cpufreq governor"
- depends on CPU_FREQ
- depends on SMP
- select CPU_FREQ_GOV_COMMON
- help
- 'sched' - this governor scales cpu frequency from the
- scheduler as a function of cpu capacity utilization. It does
- not evaluate utilization on a periodic basis (as ondemand
- does) but instead is event-driven by the scheduler.
-
- If in doubt, say N.
-
config CPU_FREQ_GOV_SCHEDUTIL
bool "'schedutil' cpufreq policy governor"
depends on CPU_FREQ && SMP
diff --git a/drivers/cpuidle/lpm-levels-of.c b/drivers/cpuidle/lpm-levels-of.c
index ffbfd1c11af9..81a9f9763915 100644
--- a/drivers/cpuidle/lpm-levels-of.c
+++ b/drivers/cpuidle/lpm-levels-of.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -848,14 +848,12 @@ failed:
void free_cluster_node(struct lpm_cluster *cluster)
{
- struct list_head *list;
int i;
+ struct lpm_cluster *cl, *m;
- list_for_each(list, &cluster->child) {
- struct lpm_cluster *n;
- n = list_entry(list, typeof(*n), list);
- list_del(list);
- free_cluster_node(n);
+ list_for_each_entry_safe(cl, m, &cluster->child, list) {
+ list_del(&cl->list);
+ free_cluster_node(cl);
};
if (cluster->cpu) {
diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c
index 72f138985e18..d83ab4bac8b1 100644
--- a/drivers/crypto/vmx/aes_ctr.c
+++ b/drivers/crypto/vmx/aes_ctr.c
@@ -80,11 +80,13 @@ static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key,
int ret;
struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
+ preempt_disable();
pagefault_disable();
enable_kernel_altivec();
enable_kernel_vsx();
ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
pagefault_enable();
+ preempt_enable();
ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen);
return ret;
@@ -99,11 +101,13 @@ static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx,
u8 *dst = walk->dst.virt.addr;
unsigned int nbytes = walk->nbytes;
+ preempt_disable();
pagefault_disable();
enable_kernel_altivec();
enable_kernel_vsx();
aes_p8_encrypt(ctrblk, keystream, &ctx->enc_key);
pagefault_enable();
+ preempt_enable();
crypto_xor(keystream, src, nbytes);
memcpy(dst, keystream, nbytes);
@@ -132,6 +136,7 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc,
blkcipher_walk_init(&walk, dst, src, nbytes);
ret = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
+ preempt_disable();
pagefault_disable();
enable_kernel_altivec();
enable_kernel_vsx();
@@ -143,6 +148,7 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc,
&ctx->enc_key,
walk.iv);
pagefault_enable();
+ preempt_enable();
/* We need to update IV mostly for last bytes/round */
inc = (nbytes & AES_BLOCK_MASK) / AES_BLOCK_SIZE;
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index b8576fd6bd0e..1c7568c0055a 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -634,6 +634,7 @@ static int dmatest_func(void *data)
* free it this time?" dancing. For now, just
* leave it dangling.
*/
+ WARN(1, "dmatest: Kernel stack may be corrupted!!\n");
dmaengine_unmap_put(um);
result("test timed out", total_tests, src_off, dst_off,
len, 0);
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 8250950aab8b..66d84bcf9bbf 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -1657,7 +1657,6 @@ static bool _chan_ns(const struct pl330_dmac *pl330, int i)
static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
{
struct pl330_thread *thrd = NULL;
- unsigned long flags;
int chans, i;
if (pl330->state == DYING)
@@ -1665,8 +1664,6 @@ static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
chans = pl330->pcfg.num_chan;
- spin_lock_irqsave(&pl330->lock, flags);
-
for (i = 0; i < chans; i++) {
thrd = &pl330->channels[i];
if ((thrd->free) && (!_manager_ns(thrd) ||
@@ -1684,8 +1681,6 @@ static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
thrd = NULL;
}
- spin_unlock_irqrestore(&pl330->lock, flags);
-
return thrd;
}
@@ -1703,7 +1698,6 @@ static inline void _free_event(struct pl330_thread *thrd, int ev)
static void pl330_release_channel(struct pl330_thread *thrd)
{
struct pl330_dmac *pl330;
- unsigned long flags;
if (!thrd || thrd->free)
return;
@@ -1715,10 +1709,8 @@ static void pl330_release_channel(struct pl330_thread *thrd)
pl330 = thrd->dmac;
- spin_lock_irqsave(&pl330->lock, flags);
_free_event(thrd, thrd->ev);
thrd->free = true;
- spin_unlock_irqrestore(&pl330->lock, flags);
}
/* Initialize the structure for PL330 configuration, that can be used
@@ -2085,20 +2077,20 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan)
struct pl330_dmac *pl330 = pch->dmac;
unsigned long flags;
- spin_lock_irqsave(&pch->lock, flags);
+ spin_lock_irqsave(&pl330->lock, flags);
dma_cookie_init(chan);
pch->cyclic = false;
pch->thread = pl330_request_channel(pl330);
if (!pch->thread) {
- spin_unlock_irqrestore(&pch->lock, flags);
+ spin_unlock_irqrestore(&pl330->lock, flags);
return -ENOMEM;
}
tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
- spin_unlock_irqrestore(&pch->lock, flags);
+ spin_unlock_irqrestore(&pl330->lock, flags);
return 1;
}
@@ -2201,12 +2193,13 @@ static int pl330_pause(struct dma_chan *chan)
static void pl330_free_chan_resources(struct dma_chan *chan)
{
struct dma_pl330_chan *pch = to_pchan(chan);
+ struct pl330_dmac *pl330 = pch->dmac;
unsigned long flags;
tasklet_kill(&pch->task);
pm_runtime_get_sync(pch->dmac->ddma.dev);
- spin_lock_irqsave(&pch->lock, flags);
+ spin_lock_irqsave(&pl330->lock, flags);
pl330_release_channel(pch->thread);
pch->thread = NULL;
@@ -2214,7 +2207,7 @@ static void pl330_free_chan_resources(struct dma_chan *chan)
if (pch->cyclic)
list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool);
- spin_unlock_irqrestore(&pch->lock, flags);
+ spin_unlock_irqrestore(&pl330->lock, flags);
pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
pm_runtime_put_autosuspend(pch->dmac->ddma.dev);
}
diff --git a/drivers/dma/zx296702_dma.c b/drivers/dma/zx296702_dma.c
index 245d759d5ffc..6059d81e701a 100644
--- a/drivers/dma/zx296702_dma.c
+++ b/drivers/dma/zx296702_dma.c
@@ -813,6 +813,7 @@ static int zx_dma_probe(struct platform_device *op)
INIT_LIST_HEAD(&d->slave.channels);
dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
dma_cap_set(DMA_MEMCPY, d->slave.cap_mask);
+ dma_cap_set(DMA_CYCLIC, d->slave.cap_mask);
dma_cap_set(DMA_PRIVATE, d->slave.cap_mask);
d->slave.dev = &op->dev;
d->slave.device_free_chan_resources = zx_dma_free_chan_resources;
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index ca64b174f8a3..a4e1f6939c39 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -1773,6 +1773,7 @@ static int ibridge_mci_bind_devs(struct mem_ctl_info *mci,
break;
case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA:
pvt->pci_ta = pdev;
+ break;
case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS:
pvt->pci_ras = pdev;
break;
diff --git a/drivers/esoc/esoc-mdm-4x.c b/drivers/esoc/esoc-mdm-4x.c
index b24457d575c8..d071e89d3124 100644
--- a/drivers/esoc/esoc-mdm-4x.c
+++ b/drivers/esoc/esoc-mdm-4x.c
@@ -220,7 +220,7 @@ static int mdm_cmd_exe(enum esoc_cmd cmd, struct esoc_clink *esoc)
}
dev_dbg(mdm->dev, "Waiting for status gpio go low\n");
status_down = false;
- end_time = jiffies + msecs_to_jiffies(10000);
+ end_time = jiffies + msecs_to_jiffies(mdm->shutdown_timeout_ms);
while (time_before(jiffies, end_time)) {
if (gpio_get_value(MDM_GPIO(mdm, MDM2AP_STATUS))
== 0) {
@@ -1084,6 +1084,12 @@ static int mdm9x55_setup_hw(struct mdm_ctrl *mdm,
&esoc->link_info);
if (ret)
dev_info(mdm->dev, "esoc link info missing\n");
+
+ ret = of_property_read_u32(node, "qcom,shutdown-timeout-ms",
+ &mdm->shutdown_timeout_ms);
+ if (ret)
+ mdm->shutdown_timeout_ms = DEF_SHUTDOWN_TIMEOUT;
+
esoc->clink_ops = clink_ops;
esoc->parent = mdm->dev;
esoc->owner = THIS_MODULE;
diff --git a/drivers/esoc/esoc-mdm-pon.c b/drivers/esoc/esoc-mdm-pon.c
index 4d8ff4968038..4be66a16a3a1 100644
--- a/drivers/esoc/esoc-mdm-pon.c
+++ b/drivers/esoc/esoc-mdm-pon.c
@@ -41,6 +41,7 @@ static int mdm9x55_toggle_soft_reset(struct mdm_ctrl *mdm, bool atomic)
{
int soft_reset_direction_assert = 0,
soft_reset_direction_de_assert = 1;
+ uint32_t reset_time_us = mdm->reset_time_ms * 1000;
if (mdm->soft_reset_inverted) {
soft_reset_direction_assert = 1;
@@ -52,9 +53,9 @@ static int mdm9x55_toggle_soft_reset(struct mdm_ctrl *mdm, bool atomic)
* Allow PS hold assert to be detected
*/
if (!atomic)
- usleep_range(203000, 300000);
+ usleep_range(reset_time_us, reset_time_us + 100000);
else
- mdelay(203);
+ mdelay(mdm->reset_time_ms);
gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
soft_reset_direction_de_assert);
return 0;
@@ -212,6 +213,29 @@ static int apq8096_pon_dt_init(struct mdm_ctrl *mdm)
return 0;
}
+static int mdm9x55_pon_dt_init(struct mdm_ctrl *mdm)
+{
+ int val;
+ struct device_node *node = mdm->dev->of_node;
+ enum of_gpio_flags flags = OF_GPIO_ACTIVE_LOW;
+
+
+ val = of_property_read_u32(node, "qcom,reset-time-ms",
+ &mdm->reset_time_ms);
+ if (val)
+ mdm->reset_time_ms = DEF_MDM9X55_RESET_TIME;
+
+ val = of_get_named_gpio_flags(node, "qcom,ap2mdm-soft-reset-gpio",
+ 0, &flags);
+ if (val >= 0) {
+ MDM_GPIO(mdm, AP2MDM_SOFT_RESET) = val;
+ if (flags & OF_GPIO_ACTIVE_LOW)
+ mdm->soft_reset_inverted = 1;
+ return 0;
+ } else
+ return -EIO;
+}
+
static int mdm4x_pon_dt_init(struct mdm_ctrl *mdm)
{
int val;
@@ -290,7 +314,7 @@ struct mdm_pon_ops mdm9x55_pon_ops = {
.soft_reset = mdm9x55_toggle_soft_reset,
.poff_force = mdm9x55_power_down,
.cold_reset = mdm9x55_cold_reset,
- .dt_init = mdm4x_pon_dt_init,
+ .dt_init = mdm9x55_pon_dt_init,
.setup = mdm4x_pon_setup,
};
diff --git a/drivers/esoc/esoc-mdm.h b/drivers/esoc/esoc-mdm.h
index 9343e49559f2..c88b2bdba30b 100644
--- a/drivers/esoc/esoc-mdm.h
+++ b/drivers/esoc/esoc-mdm.h
@@ -43,6 +43,8 @@
#define MDM_MODEM_TIMEOUT 3000
#define DEF_RAMDUMP_TIMEOUT 120000
#define DEF_RAMDUMP_DELAY 2000
+#define DEF_SHUTDOWN_TIMEOUT 10000
+#define DEF_MDM9X55_RESET_TIME 203
#define RD_BUF_SIZE 100
#define SFR_MAX_RETRIES 10
#define SFR_RETRY_INTERVAL 1000
@@ -96,6 +98,8 @@ struct mdm_ctrl {
bool debug_fail;
unsigned int dump_timeout_ms;
unsigned int ramdump_delay_ms;
+ unsigned int shutdown_timeout_ms;
+ unsigned int reset_time_ms;
struct esoc_clink *esoc;
bool get_restart_reason;
unsigned long irq_mask;
diff --git a/drivers/extcon/extcon-palmas.c b/drivers/extcon/extcon-palmas.c
index 93c30a885740..aa2f6bb82b32 100644
--- a/drivers/extcon/extcon-palmas.c
+++ b/drivers/extcon/extcon-palmas.c
@@ -190,6 +190,11 @@ static int palmas_usb_probe(struct platform_device *pdev)
struct palmas_usb *palmas_usb;
int status;
+ if (!palmas) {
+ dev_err(&pdev->dev, "failed to get valid parent\n");
+ return -EINVAL;
+ }
+
palmas_usb = devm_kzalloc(&pdev->dev, sizeof(*palmas_usb), GFP_KERNEL);
if (!palmas_usb)
return -ENOMEM;
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index c51f3b2fe3c0..20451c290233 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -327,38 +327,6 @@ u64 __init efi_mem_desc_end(efi_memory_desc_t *md)
return end;
}
-/*
- * We can't ioremap data in EFI boot services RAM, because we've already mapped
- * it as RAM. So, look it up in the existing EFI memory map instead. Only
- * callable after efi_enter_virtual_mode and before efi_free_boot_services.
- */
-void __iomem *efi_lookup_mapped_addr(u64 phys_addr)
-{
- struct efi_memory_map *map;
- void *p;
- map = efi.memmap;
- if (!map)
- return NULL;
- if (WARN_ON(!map->map))
- return NULL;
- for (p = map->map; p < map->map_end; p += map->desc_size) {
- efi_memory_desc_t *md = p;
- u64 size = md->num_pages << EFI_PAGE_SHIFT;
- u64 end = md->phys_addr + size;
- if (!(md->attribute & EFI_MEMORY_RUNTIME) &&
- md->type != EFI_BOOT_SERVICES_CODE &&
- md->type != EFI_BOOT_SERVICES_DATA)
- continue;
- if (!md->virt_addr)
- continue;
- if (phys_addr >= md->phys_addr && phys_addr < end) {
- phys_addr += md->virt_addr - md->phys_addr;
- return (__force void __iomem *)(unsigned long)phys_addr;
- }
- }
- return NULL;
-}
-
static __initdata efi_config_table_type_t common_tables[] = {
{ACPI_20_TABLE_GUID, "ACPI 2.0", &efi.acpi20},
{ACPI_TABLE_GUID, "ACPI", &efi.acpi},
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index d775e2bfc017..9d8b2e59b755 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -10,7 +10,7 @@ cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 \
-fPIC -fno-strict-aliasing -mno-red-zone \
-mno-mmx -mno-sse
-cflags-$(CONFIG_ARM64) := $(subst -pg,,$(KBUILD_CFLAGS))
+cflags-$(CONFIG_ARM64) := $(subst -pg,,$(KBUILD_CFLAGS)) -fpie
cflags-$(CONFIG_ARM) := $(subst -pg,,$(KBUILD_CFLAGS)) \
-fno-builtin -fpic -mno-single-pic-base
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
index d5aa1d16154f..832df3c58e2f 100644
--- a/drivers/firmware/efi/libstub/arm-stub.c
+++ b/drivers/firmware/efi/libstub/arm-stub.c
@@ -18,8 +18,6 @@
#include "efistub.h"
-bool __nokaslr;
-
static int efi_secureboot_enabled(efi_system_table_t *sys_table_arg)
{
static efi_guid_t const var_guid = EFI_GLOBAL_VARIABLE_GUID;
@@ -221,18 +219,6 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
goto fail;
}
- /* check whether 'nokaslr' was passed on the command line */
- if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
- static const u8 default_cmdline[] = CONFIG_CMDLINE;
- const u8 *str, *cmdline = cmdline_ptr;
-
- if (IS_ENABLED(CONFIG_CMDLINE_FORCE))
- cmdline = default_cmdline;
- str = strstr(cmdline, "nokaslr");
- if (str == cmdline || (str > cmdline && *(str - 1) == ' '))
- __nokaslr = true;
- }
-
status = handle_kernel_image(sys_table, image_addr, &image_size,
&reserve_addr,
&reserve_size,
@@ -242,9 +228,13 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
goto fail_free_cmdline;
}
- status = efi_parse_options(cmdline_ptr);
- if (status != EFI_SUCCESS)
- pr_efi_err(sys_table, "Failed to parse EFI cmdline options\n");
+ if (IS_ENABLED(CONFIG_CMDLINE_EXTEND) ||
+ IS_ENABLED(CONFIG_CMDLINE_FORCE) ||
+ cmdline_size == 0)
+ efi_parse_options(CONFIG_CMDLINE);
+
+ if (!IS_ENABLED(CONFIG_CMDLINE_FORCE) && cmdline_size > 0)
+ efi_parse_options(cmdline_ptr);
/*
* Unauthenticated device tree data is a security hazard, so
diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c
index 556d05547670..e33593ed8f84 100644
--- a/drivers/firmware/efi/libstub/arm64-stub.c
+++ b/drivers/firmware/efi/libstub/arm64-stub.c
@@ -23,8 +23,6 @@
#include "efistub.h"
-extern bool __nokaslr;
-
efi_status_t __init handle_kernel_image(efi_system_table_t *sys_table_arg,
unsigned long *image_addr,
unsigned long *image_size,
@@ -40,7 +38,7 @@ efi_status_t __init handle_kernel_image(efi_system_table_t *sys_table_arg,
u64 phys_seed = 0;
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
- if (!__nokaslr) {
+ if (!nokaslr()) {
status = efi_get_random_bytes(sys_table_arg,
sizeof(phys_seed),
(u8 *)&phys_seed);
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index 29ed2f9b218c..58539c1280a5 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -41,6 +41,13 @@ static unsigned long __chunk_size = EFI_READ_CHUNK_SIZE;
#define EFI_ALLOC_ALIGN EFI_PAGE_SIZE
#endif
+static int __section(.data) __nokaslr;
+
+int __pure nokaslr(void)
+{
+ return __nokaslr;
+}
+
struct file_info {
efi_file_handle_t *handle;
u64 size;
@@ -313,10 +320,14 @@ void efi_free(efi_system_table_t *sys_table_arg, unsigned long size,
* environments, first in the early boot environment of the EFI boot
* stub, and subsequently during the kernel boot.
*/
-efi_status_t efi_parse_options(char *cmdline)
+efi_status_t efi_parse_options(char const *cmdline)
{
char *str;
+ str = strstr(cmdline, "nokaslr");
+ if (str == cmdline || (str && str > cmdline && *(str - 1) == ' '))
+ __nokaslr = 1;
+
/*
* If no EFI parameters were specified on the cmdline we've got
* nothing to do.
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index 5ed3d3f38166..a5eaa3ac0a5d 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -5,6 +5,8 @@
/* error code which can't be mistaken for valid address */
#define EFI_ERROR (~0UL)
+extern int __pure nokaslr(void);
+
void efi_char16_printk(efi_system_table_t *, efi_char16_t *);
efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg, void *__image,
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index f4554b39d5d9..021d8391abea 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -267,3 +267,5 @@ source "drivers/gpu/drm/amd/amdkfd/Kconfig"
source "drivers/gpu/drm/imx/Kconfig"
source "drivers/gpu/drm/vc4/Kconfig"
+
+source "drivers/gpu/drm/msm-hyp/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 1e9ff4c3e3db..ea0d5ea57213 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -68,6 +68,7 @@ obj-$(CONFIG_DRM_QXL) += qxl/
obj-$(CONFIG_DRM_BOCHS) += bochs/
obj-$(CONFIG_DRM_VIRTIO_GPU) += virtio/
obj-$(CONFIG_DRM_MSM) += msm/
+obj-$(CONFIG_DRM_MSM_HYP) += msm-hyp/
obj-$(CONFIG_DRM_TEGRA) += tegra/
obj-$(CONFIG_DRM_STI) += sti/
obj-$(CONFIG_DRM_IMX) += imx/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index f4cae5357e40..3e90ddcbb24a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -1575,34 +1575,32 @@ void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev)
WREG32(mmBIOS_SCRATCH_0 + i, adev->bios_scratch[i]);
}
-/* Atom needs data in little endian format
- * so swap as appropriate when copying data to
- * or from atom. Note that atom operates on
- * dw units.
+/* Atom needs data in little endian format so swap as appropriate when copying
+ * data to or from atom. Note that atom operates on dw units.
+ *
+ * Use to_le=true when sending data to atom and provide at least
+ * ALIGN(num_bytes,4) bytes in the dst buffer.
+ *
+ * Use to_le=false when receiving data from atom and provide ALIGN(num_bytes,4)
+ * byes in the src buffer.
*/
void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le)
{
#ifdef __BIG_ENDIAN
- u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */
- u32 *dst32, *src32;
+ u32 src_tmp[5], dst_tmp[5];
int i;
+ u8 align_num_bytes = ALIGN(num_bytes, 4);
- memcpy(src_tmp, src, num_bytes);
- src32 = (u32 *)src_tmp;
- dst32 = (u32 *)dst_tmp;
if (to_le) {
- for (i = 0; i < ((num_bytes + 3) / 4); i++)
- dst32[i] = cpu_to_le32(src32[i]);
- memcpy(dst, dst_tmp, num_bytes);
+ memcpy(src_tmp, src, num_bytes);
+ for (i = 0; i < align_num_bytes / 4; i++)
+ dst_tmp[i] = cpu_to_le32(src_tmp[i]);
+ memcpy(dst, dst_tmp, align_num_bytes);
} else {
- u8 dws = num_bytes & ~3;
- for (i = 0; i < ((num_bytes + 3) / 4); i++)
- dst32[i] = le32_to_cpu(src32[i]);
- memcpy(dst, dst_tmp, dws);
- if (num_bytes % 4) {
- for (i = 0; i < (num_bytes % 4); i++)
- dst[dws+i] = dst_tmp[dws+i];
- }
+ memcpy(src_tmp, src, align_num_bytes);
+ for (i = 0; i < align_num_bytes / 4; i++)
+ dst_tmp[i] = le32_to_cpu(src_tmp[i]);
+ memcpy(dst, dst_tmp, num_bytes);
}
#else
memcpy(dst, src, num_bytes);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index bb0da76051a1..e5da6f19b9b8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -284,6 +284,10 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work)
amdgpu_dpm_enable_vce(adev, false);
} else {
amdgpu_asic_set_vce_clocks(adev, 0, 0);
+ amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_PG_STATE_GATE);
+ amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_CG_STATE_GATE);
}
} else {
schedule_delayed_work(&adev->vce.idle_work,
@@ -315,6 +319,11 @@ static void amdgpu_vce_note_usage(struct amdgpu_device *adev)
amdgpu_dpm_enable_vce(adev, true);
} else {
amdgpu_asic_set_vce_clocks(adev, 53300, 40000);
+ amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_CG_STATE_UNGATE);
+ amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_PG_STATE_UNGATE);
+
}
}
}
diff --git a/drivers/gpu/drm/armada/Makefile b/drivers/gpu/drm/armada/Makefile
index ffd673615772..26412d2f8c98 100644
--- a/drivers/gpu/drm/armada/Makefile
+++ b/drivers/gpu/drm/armada/Makefile
@@ -4,3 +4,5 @@ armada-y += armada_510.o
armada-$(CONFIG_DEBUG_FS) += armada_debugfs.o
obj-$(CONFIG_DRM_ARMADA) := armada.o
+
+CFLAGS_armada_trace.o := -I$(src)
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 7dd6728dd092..ccc2044af831 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -312,7 +312,7 @@ static int drm_minor_register(struct drm_device *dev, unsigned int type)
ret = drm_debugfs_init(minor, minor->index, drm_debugfs_root);
if (ret) {
DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
- return ret;
+ goto err_debugfs;
}
ret = device_add(minor->kdev);
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 6e4dd62d4ed9..db1f2a738eb2 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -378,14 +378,12 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
BUG_ON(!hole_node->hole_follows || node->allocated);
- if (adj_start < start)
- adj_start = start;
- if (adj_end > end)
- adj_end = end;
-
if (mm->color_adjust)
mm->color_adjust(hole_node, color, &adj_start, &adj_end);
+ adj_start = max(adj_start, start);
+ adj_end = min(adj_end, end);
+
if (flags & DRM_MM_CREATE_TOP)
adj_start = adj_end - size;
@@ -541,6 +539,9 @@ static struct drm_mm_node *get_first_hole(const struct drm_mm *mm,
if (flags & DRM_MM_SEARCH_BOTTOM_UP) {
struct rb_node *node = rb_first(&mm->holes_tree);
+ if (!node)
+ return NULL;
+
return rb_entry(node, struct drm_mm_node, hole_node);
} else if (flags & DRM_MM_SEARCH_BELOW) {
return list_entry((mm)->hole_stack.prev,
@@ -555,8 +556,12 @@ static struct drm_mm_node *get_next_hole(struct drm_mm_node *entry,
enum drm_mm_search_flags flags)
{
if (flags & DRM_MM_SEARCH_BOTTOM_UP) {
- return rb_entry(rb_next(&entry->hole_node),
- struct drm_mm_node, hole_node);
+ struct rb_node *node = rb_next(&entry->hole_node);
+
+ if (!node)
+ return NULL;
+
+ return rb_entry(node, struct drm_mm_node, hole_node);
} else if (flags & DRM_MM_SEARCH_BELOW) {
return list_entry(entry->hole_stack.prev,
struct drm_mm_node, hole_stack);
@@ -650,17 +655,15 @@ static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_
adj_end = drm_mm_hole_node_end(entry);
hole_size = adj_end - adj_start;
- if (adj_start < start)
- adj_start = start;
- if (adj_end > end)
- adj_end = end;
-
if (mm->color_adjust) {
mm->color_adjust(entry, color, &adj_start, &adj_end);
if (adj_end <= adj_start)
continue;
}
+ adj_start = max(adj_start, start);
+ adj_end = min(adj_end, end);
+
if (!check_free_hole(adj_start, adj_end, size, alignment))
continue;
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index fbe1b3174f75..34cebcdc2fc4 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -180,6 +180,8 @@ static void decon_commit(struct exynos_drm_crtc *crtc)
/* enable output and display signal */
decon_set_bits(ctx, DECON_VIDCON0, VIDCON0_ENVID | VIDCON0_ENVID_F, ~0);
+
+ decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
}
static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 5044f2257e89..6fca39e1c419 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3475,11 +3475,6 @@ static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev)
return VGACNTRL;
}
-static inline void __user *to_user_ptr(u64 address)
-{
- return (void __user *)(uintptr_t)address;
-}
-
static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
{
unsigned long j = msecs_to_jiffies(m);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index f56af0aaafde..659b90657f36 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -324,7 +324,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
{
struct drm_device *dev = obj->base.dev;
void *vaddr = obj->phys_handle->vaddr + args->offset;
- char __user *user_data = to_user_ptr(args->data_ptr);
+ char __user *user_data = u64_to_user_ptr(args->data_ptr);
int ret = 0;
/* We manually control the domain here and pretend that it
@@ -605,7 +605,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
int needs_clflush = 0;
struct sg_page_iter sg_iter;
- user_data = to_user_ptr(args->data_ptr);
+ user_data = u64_to_user_ptr(args->data_ptr);
remain = args->size;
obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
@@ -692,7 +692,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
return 0;
if (!access_ok(VERIFY_WRITE,
- to_user_ptr(args->data_ptr),
+ u64_to_user_ptr(args->data_ptr),
args->size))
return -EFAULT;
@@ -783,7 +783,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
if (ret)
goto out_unpin;
- user_data = to_user_ptr(args->data_ptr);
+ user_data = u64_to_user_ptr(args->data_ptr);
remain = args->size;
offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
@@ -907,7 +907,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
int needs_clflush_before = 0;
struct sg_page_iter sg_iter;
- user_data = to_user_ptr(args->data_ptr);
+ user_data = u64_to_user_ptr(args->data_ptr);
remain = args->size;
obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
@@ -1036,12 +1036,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
return 0;
if (!access_ok(VERIFY_READ,
- to_user_ptr(args->data_ptr),
+ u64_to_user_ptr(args->data_ptr),
args->size))
return -EFAULT;
if (likely(!i915.prefault_disable)) {
- ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
+ ret = fault_in_multipages_readable(u64_to_user_ptr(args->data_ptr),
args->size);
if (ret)
return -EFAULT;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 201947b4377c..8800f410b2d2 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -492,7 +492,7 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
int remain, ret;
- user_relocs = to_user_ptr(entry->relocs_ptr);
+ user_relocs = u64_to_user_ptr(entry->relocs_ptr);
remain = entry->relocation_count;
while (remain) {
@@ -831,7 +831,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
u64 invalid_offset = (u64)-1;
int j;
- user_relocs = to_user_ptr(exec[i].relocs_ptr);
+ user_relocs = u64_to_user_ptr(exec[i].relocs_ptr);
if (copy_from_user(reloc+total, user_relocs,
exec[i].relocation_count * sizeof(*reloc))) {
@@ -975,7 +975,7 @@ validate_exec_list(struct drm_device *dev,
invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
for (i = 0; i < count; i++) {
- char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
+ char __user *ptr = u64_to_user_ptr(exec[i].relocs_ptr);
int length; /* limited by fault_in_pages_readable() */
if (exec[i].flags & invalid_flags)
@@ -1633,7 +1633,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
return -ENOMEM;
}
ret = copy_from_user(exec_list,
- to_user_ptr(args->buffers_ptr),
+ u64_to_user_ptr(args->buffers_ptr),
sizeof(*exec_list) * args->buffer_count);
if (ret != 0) {
DRM_DEBUG("copy %d exec entries failed %d\n",
@@ -1669,7 +1669,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
if (!ret) {
struct drm_i915_gem_exec_object __user *user_exec_list =
- to_user_ptr(args->buffers_ptr);
+ u64_to_user_ptr(args->buffers_ptr);
/* Copy the new buffer offsets back to the user's exec list. */
for (i = 0; i < args->buffer_count; i++) {
@@ -1721,7 +1721,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
return -ENOMEM;
}
ret = copy_from_user(exec2_list,
- to_user_ptr(args->buffers_ptr),
+ u64_to_user_ptr(args->buffers_ptr),
sizeof(*exec2_list) * args->buffer_count);
if (ret != 0) {
DRM_DEBUG("copy %d exec entries failed %d\n",
@@ -1734,7 +1734,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
if (!ret) {
/* Copy the new buffer offsets back to the user's exec list. */
struct drm_i915_gem_exec_object2 __user *user_exec_list =
- to_user_ptr(args->buffers_ptr);
+ u64_to_user_ptr(args->buffers_ptr);
int i;
for (i = 0; i < args->buffer_count; i++) {
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index f3bee54c414f..cb4313c68f71 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -440,7 +440,9 @@ static bool
gmbus_is_index_read(struct i2c_msg *msgs, int i, int num)
{
return (i + 1 < num &&
- !(msgs[i].flags & I2C_M_RD) && msgs[i].len <= 2 &&
+ msgs[i].addr == msgs[i + 1].addr &&
+ !(msgs[i].flags & I2C_M_RD) &&
+ (msgs[i].len == 1 || msgs[i].len == 2) &&
(msgs[i + 1].flags & I2C_M_RD));
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index b1a0f5656175..44df959cbadb 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -145,6 +145,8 @@ static int mga_vram_init(struct mga_device *mdev)
}
mem = pci_iomap(mdev->dev->pdev, 0, 0);
+ if (!mem)
+ return -ENOMEM;
mdev->mc.vram_size = mga_probe_vram(mdev, mem);
diff --git a/drivers/gpu/drm/msm-hyp/Kconfig b/drivers/gpu/drm/msm-hyp/Kconfig
new file mode 100644
index 000000000000..676c0174c0ee
--- /dev/null
+++ b/drivers/gpu/drm/msm-hyp/Kconfig
@@ -0,0 +1,15 @@
+#
+# Drm MSM hypervisor configuration
+#
+# This driver provides support for the User Space DRM Masters
+#
+config DRM_MSM_HYP
+ tristate "MSM DRM HYP"
+ depends on DRM
+ depends on MSM_GVM_QUIN
+ depends on OF
+ default y
+ help
+ DRM/KMS driver for MSM/snapdragon in Guest VM mode. This driver registers
+ with DRM framework to create /dev/dri/card0 path and issue events to User
+ Space listeners.
diff --git a/drivers/gpu/drm/msm-hyp/Makefile b/drivers/gpu/drm/msm-hyp/Makefile
new file mode 100644
index 000000000000..dbf54f0e58c7
--- /dev/null
+++ b/drivers/gpu/drm/msm-hyp/Makefile
@@ -0,0 +1,4 @@
+ccflags-y := -Iinclude/drm
+
+obj-y := \
+ msm_drv_hyp.o
diff --git a/drivers/gpu/drm/msm-hyp/NOTES b/drivers/gpu/drm/msm-hyp/NOTES
new file mode 100644
index 000000000000..9571da75b604
--- /dev/null
+++ b/drivers/gpu/drm/msm-hyp/NOTES
@@ -0,0 +1,8 @@
+NOTES about msm drm/kms hyp driver:
+
+This driver registers with drm framework for the purpose of creating the
+/dev/dri/card0 path, which User Space DRM Masters rely on.
+Furthermore, per-CRTC VBLANK and PAGE_FLIP events are queued to the device
+path to notify User Space components listeners.
+
+No other IOCTL or HW support is provided through this driver. \ No newline at end of file
diff --git a/drivers/gpu/drm/msm-hyp/msm_drv_hyp.c b/drivers/gpu/drm/msm-hyp/msm_drv_hyp.c
new file mode 100644
index 000000000000..7dd817e41ddd
--- /dev/null
+++ b/drivers/gpu/drm/msm-hyp/msm_drv_hyp.c
@@ -0,0 +1,257 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+/*
+ * Copyright (C) 2014 Red Hat
+ * Copyright (C) 2014 Intel Corp.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rob Clark <robdclark@gmail.com>
+ * Daniel Vetter <daniel.vetter@ffwll.ch>
+ */
+
+#include "msm_drv_hyp.h"
+
+/*
+ * DRM operations:
+ */
+
+static int msm_unload(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+
+ dev->dev_private = NULL;
+
+ kfree(priv);
+
+ return 0;
+}
+
+static int msm_load(struct drm_device *dev, unsigned long flags)
+{
+ struct msm_drm_private *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ dev->dev_private = priv;
+
+ return 0;
+}
+
+static int msm_open(struct drm_device *dev, struct drm_file *file)
+{
+ struct msm_file_private *ctx;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ file->driver_priv = ctx;
+
+ return 0;
+}
+
+static void msm_preclose(struct drm_device *dev, struct drm_file *file)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_file_private *ctx = file->driver_priv;
+
+ mutex_lock(&dev->struct_mutex);
+ if (ctx == priv->lastctx)
+ priv->lastctx = NULL;
+ mutex_unlock(&dev->struct_mutex);
+
+ kfree(ctx);
+}
+
+static struct drm_pending_vblank_event *create_vblank_event(
+ struct drm_device *dev, struct drm_file *file_priv, u32 type,
+ uint64_t user_data)
+{
+ struct drm_pending_vblank_event *e = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ if (file_priv->event_space < sizeof(e->event)) {
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ goto out;
+ }
+ file_priv->event_space -= sizeof(e->event);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+ if (e == NULL) {
+ spin_lock_irqsave(&dev->event_lock, flags);
+ file_priv->event_space += sizeof(e->event);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ goto out;
+ }
+
+ e->event.base.type = type;
+ e->event.base.length = sizeof(e->event);
+ e->event.user_data = user_data;
+ e->base.event = &e->event.base;
+ e->base.file_priv = file_priv;
+ e->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
+
+out:
+ return e;
+}
+
+struct event_req {
+ u32 type;
+ u64 user_data;
+};
+
+static size_t msm_drm_write(struct file *filp, const char __user *buffer,
+ size_t count, loff_t *offset)
+{
+ struct drm_file *file_priv = filp->private_data;
+ struct drm_device *dev = file_priv->minor->dev;
+ struct event_req e_req;
+ struct drm_pending_vblank_event *e;
+ unsigned long flags;
+ int ret = 0;
+
+ if (count != sizeof(struct event_req))
+ return -EINVAL;
+
+ if (!access_ok(VERIFY_READ, buffer, count))
+ return -EFAULT;
+
+ ret = copy_from_user(&e_req, buffer, sizeof(e_req));
+ if (ret)
+ return -EFAULT;
+
+ if (!(e_req.type & DRM_EVENT_VBLANK) &&
+ !(e_req.type & DRM_EVENT_FLIP_COMPLETE))
+ return -EINVAL;
+
+ e = create_vblank_event(dev, file_priv, e_req.type, e_req.user_data);
+ if (!e)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ drm_send_vblank_event(dev, 2, e);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ return count;
+}
+
+static const struct file_operations fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .poll = drm_poll,
+ .read = drm_read,
+ .write = msm_drm_write,
+ .llseek = no_llseek,
+};
+
+static struct drm_driver msm_driver = {
+ .driver_features = 0,
+ .load = msm_load,
+ .unload = msm_unload,
+ .open = msm_open,
+ .preclose = msm_preclose,
+ .set_busid = drm_platform_set_busid,
+ .get_vblank_counter = drm_vblank_no_hw_counter,
+ .num_ioctls = 0,
+ .fops = &fops,
+ .name = "msm_drm_hyp",
+ .desc = "MSM Snapdragon DRM",
+ .date = "20170831",
+ .major = 1,
+ .minor = 0,
+};
+
+/*
+ * Platform driver:
+ */
+
+static int msm_pdev_probe(struct platform_device *pdev)
+{
+ return drm_platform_init(&msm_driver, pdev);
+}
+
+static int msm_pdev_remove(struct platform_device *pdev)
+{
+ drm_put_dev(platform_get_drvdata(pdev));
+
+ return 0;
+}
+
+static const struct platform_device_id msm_id[] = {
+ { "mdp-hyp", 0 },
+ { }
+};
+
+static const struct of_device_id dt_match[] = {
+ { .compatible = "qcom,sde-kms-hyp" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dt_match);
+
+static struct platform_driver msm_platform_driver = {
+ .probe = msm_pdev_probe,
+ .remove = msm_pdev_remove,
+ .driver = {
+ .name = "msm_drm_hyp",
+ .of_match_table = dt_match,
+ },
+ .id_table = msm_id,
+};
+
+static int __init msm_drm_register(void)
+{
+ DBG("init");
+ return platform_driver_register(&msm_platform_driver);
+}
+
+static void __exit msm_drm_unregister(void)
+{
+ DBG("fini");
+ platform_driver_unregister(&msm_platform_driver);
+}
+
+module_init(msm_drm_register);
+module_exit(msm_drm_unregister);
+
+MODULE_DESCRIPTION("MSM DRM Mini Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/msm-hyp/msm_drv_hyp.h b/drivers/gpu/drm/msm-hyp/msm_drv_hyp.h
new file mode 100644
index 000000000000..affce322ba06
--- /dev/null
+++ b/drivers/gpu/drm/msm-hyp/msm_drv_hyp.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __MSM_DRV_HYP_H__
+#define __MSM_DRV_HYP_H__
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/sizes.h>
+#include <linux/kthread.h>
+
+#include <drm/drmP.h>
+
+struct msm_file_private {
+ /* currently we don't do anything useful with this.. but when
+ * per-context address spaces are supported we'd keep track of
+ * the context's page-tables here.
+ */
+ int dummy;
+};
+
+enum msm_mdp_display_id {
+ DISPLAY_ID_NONE,
+ DISPLAY_ID_PRIMARY,
+ DISPLAY_ID_SECONDARY,
+ DISPLAY_ID_TERTIARY,
+ DISPLAY_ID_QUATERNARY,
+ DISPLAY_ID_MAX
+};
+
+struct msm_drm_private {
+ struct msm_file_private *lastctx;
+};
+
+#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
+#define VERB(fmt, ...) (if (0) DRM_DEBUG(fmt"\n", ##__VA_ARGS__))
+
+static inline enum msm_mdp_display_id msm_get_display_id(
+ const char *display_type)
+{
+ if (!display_type)
+ return DISPLAY_ID_NONE;
+ else if (!strcmp(display_type, "primary"))
+ return DISPLAY_ID_PRIMARY;
+ else if (!strcmp(display_type, "secondary"))
+ return DISPLAY_ID_SECONDARY;
+ else if (!strcmp(display_type, "tertiary"))
+ return DISPLAY_ID_TERTIARY;
+ else if (!strcmp(display_type, "quaternary"))
+ return DISPLAY_ID_QUATERNARY;
+ else
+ return DISPLAY_ID_NONE;
+};
+
+/* for the generated headers: */
+#define FIELD(val, name) (((val) & name ## __MASK) >> name ## __SHIFT)
+
+/* for conditionally setting boolean flag(s): */
+#define COND(bool, val) ((bool) ? (val) : 0)
+
+#endif /* __MSM_DRV_HYP_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index d397c44f1203..16b10b608855 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -61,6 +61,9 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
case MSM_PARAM_NR_RINGS:
*value = gpu->nr_rings;
return 0;
+ case MSM_PARAM_GPU_HANG_TIMEOUT:
+ *value = DRM_MSM_HANGCHECK_PERIOD;
+ return 0;
default:
DBG("%s: invalid param: %u", gpu->name, param);
return -EINVAL;
@@ -394,14 +397,6 @@ static int _adreno_get_pwrlevels(struct msm_gpu *gpu, struct device_node *node)
{
struct device_node *child;
- gpu->active_level = 1;
-
- /* The device tree will tell us the best clock to initialize with */
- of_property_read_u32(node, "qcom,initial-pwrlevel", &gpu->active_level);
-
- if (gpu->active_level >= ARRAY_SIZE(gpu->gpufreq))
- gpu->active_level = 1;
-
for_each_child_of_node(node, child) {
unsigned int index;
@@ -450,6 +445,15 @@ static int adreno_get_pwrlevels(struct msm_gpu *gpu, struct device_node *parent)
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct device_node *node, *child;
+ gpu->active_level = 1;
+
+ /* The device tree will tell us the best clock to initialize with */
+ of_property_read_u32(parent, "qcom,initial-pwrlevel",
+ &gpu->active_level);
+
+ if (gpu->active_level >= ARRAY_SIZE(gpu->gpufreq))
+ gpu->active_level = 1;
+
/* See if the target has defined a number of power bins */
node = of_find_node_by_name(parent, "qcom,gpu-pwrlevel-bins");
if (!node) {
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c
index f70a0ea022d8..35ba396e1cd1 100644
--- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c
@@ -1342,56 +1342,7 @@ fail:
return ret;
}
-int sde_hdmi_core_enable(struct sde_hdmi *sde_hdmi)
-{
- struct hdmi *hdmi = sde_hdmi->ctrl.ctrl;
- const struct hdmi_platform_config *config = hdmi->config;
- struct device *dev = &hdmi->pdev->dev;
- int i, ret;
- struct drm_connector *connector;
- struct msm_drm_private *priv;
- struct sde_kms *sde_kms;
-
- connector = hdmi->connector;
- priv = connector->dev->dev_private;
- sde_kms = to_sde_kms(priv->kms);
-
- for (i = 0; i < config->hpd_reg_cnt; i++) {
- ret = regulator_enable(hdmi->hpd_regs[i]);
- if (ret) {
- SDE_ERROR("failed to enable hpd regulator: %s (%d)\n",
- config->hpd_reg_names[i], ret);
- }
- }
-
- ret = pinctrl_pm_select_default_state(dev);
- if (ret)
- SDE_ERROR("pinctrl state chg failed: %d\n", ret);
-
- ret = _sde_hdmi_gpio_config(hdmi, true);
- if (ret)
- SDE_ERROR("failed to configure GPIOs: %d\n", ret);
-
- for (i = 0; i < config->hpd_clk_cnt; i++) {
- if (config->hpd_freq && config->hpd_freq[i]) {
- ret = clk_set_rate(hdmi->hpd_clks[i],
- config->hpd_freq[i]);
- if (ret)
- pr_warn("failed to set clk %s (%d)\n",
- config->hpd_clk_names[i], ret);
- }
-
- ret = clk_prepare_enable(hdmi->hpd_clks[i]);
- if (ret) {
- SDE_ERROR("failed to enable hpd clk: %s (%d)\n",
- config->hpd_clk_names[i], ret);
- }
- }
-
- return ret;
-}
-
-static void _sde_hdmi_hpd_disable(struct sde_hdmi *sde_hdmi)
+static void _sde_hdmi_hdp_disable(struct sde_hdmi *sde_hdmi)
{
struct hdmi *hdmi = sde_hdmi->ctrl.ctrl;
const struct hdmi_platform_config *config = hdmi->config;
@@ -1422,12 +1373,6 @@ static void _sde_hdmi_hpd_disable(struct sde_hdmi *sde_hdmi)
}
}
-void sde_hdmi_core_disable(struct sde_hdmi *sde_hdmi)
-{
- /* HPD contains all the core clock and pwr */
- _sde_hdmi_hpd_disable(sde_hdmi);
-}
-
static void _sde_hdmi_cec_update_phys_addr(struct sde_hdmi *display)
{
struct edid *edid = display->edid_ctrl->edid;
@@ -2272,8 +2217,7 @@ int sde_hdmi_connector_pre_deinit(struct drm_connector *connector,
return -EINVAL;
}
- if (!sde_hdmi->non_pluggable)
- _sde_hdmi_hpd_disable(sde_hdmi);
+ _sde_hdmi_hdp_disable(sde_hdmi);
return 0;
}
@@ -2445,14 +2389,9 @@ int sde_hdmi_connector_post_init(struct drm_connector *connector,
INIT_WORK(&sde_hdmi->hpd_work, _sde_hdmi_hotplug_work);
/* Enable HPD detection */
- if (!sde_hdmi->non_pluggable) {
- rc = _sde_hdmi_hpd_enable(sde_hdmi);
- if (rc)
- SDE_ERROR("failed to enable HPD: %d\n", rc);
- } else {
- /* Disable HPD interrupt */
- hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, 0);
- }
+ rc = _sde_hdmi_hpd_enable(sde_hdmi);
+ if (rc)
+ SDE_ERROR("failed to enable HPD: %d\n", rc);
_sde_hdmi_get_tx_version(sde_hdmi);
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h
index 471472ea23cf..865998c6a126 100644
--- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h
@@ -302,22 +302,6 @@ sde_hdmi_connector_detect(struct drm_connector *connector,
void *display);
/**
- * sde_hdmi_core_enable()- turn on clk and pwr for hdmi core
- * @sde_hdmi: Pointer to sde_hdmi structure
- *
- * Return: error code
- */
-int sde_hdmi_core_enable(struct sde_hdmi *sde_hdmi);
-
-/**
- * sde_hdmi_core_disable()- turn off clk and pwr for hdmi core
- * @sde_hdmi: Pointer to sde_hdmi structure
- *
- * Return: none
- */
-void sde_hdmi_core_disable(struct sde_hdmi *sde_hdmi);
-
-/**
* sde_hdmi_connector_get_modes - add drm modes via drm_mode_probed_add()
* @connector: Pointer to drm connector structure
* @display: Pointer to private display handle
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c
index 01283aa30450..5fbe4767ad3a 100644
--- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c
@@ -106,8 +106,6 @@ struct sde_hdmi_bridge {
#define HDMI_AVI_IFRAME_LINE_NUMBER 1
#define HDMI_VENDOR_IFRAME_LINE_NUMBER 3
-static int _sde_hdmi_bridge_setup_scrambler(struct hdmi *hdmi,
- struct drm_display_mode *mode);
void _sde_hdmi_bridge_destroy(struct drm_bridge *bridge)
{
}
@@ -131,8 +129,6 @@ static void _sde_hdmi_bridge_power_on(struct drm_bridge *bridge)
struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
const struct hdmi_platform_config *config = hdmi->config;
- struct sde_connector *c_conn = to_sde_connector(hdmi->connector);
- struct sde_hdmi *display = (struct sde_hdmi *)c_conn->display;
int i, ret;
for (i = 0; i < config->pwr_reg_cnt; i++) {
@@ -159,13 +155,6 @@ static void _sde_hdmi_bridge_power_on(struct drm_bridge *bridge)
config->pwr_clk_names[i], ret);
}
}
-
- if (display->non_pluggable) {
- ret = sde_hdmi_core_enable(display);
- if (ret)
- SDE_ERROR("failed to enable hpd clks: %d\n", ret);
- }
- _sde_hdmi_bridge_setup_scrambler(hdmi, &display->mode);
}
static void _sde_hdmi_bridge_power_off(struct drm_bridge *bridge)
@@ -173,8 +162,6 @@ static void _sde_hdmi_bridge_power_off(struct drm_bridge *bridge)
struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
const struct hdmi_platform_config *config = hdmi->config;
- struct sde_connector *c_conn = to_sde_connector(hdmi->connector);
- struct sde_hdmi *display = (struct sde_hdmi *)c_conn->display;
int i, ret;
/* Wait for vsync */
@@ -190,9 +177,6 @@ static void _sde_hdmi_bridge_power_off(struct drm_bridge *bridge)
config->pwr_reg_names[i], ret);
}
}
-
- if (display->non_pluggable)
- sde_hdmi_core_disable(display);
}
static int _sde_hdmi_bridge_ddc_clear_irq(struct hdmi *hdmi,
@@ -504,9 +488,6 @@ static void _sde_hdmi_bridge_pre_enable(struct drm_bridge *bridge)
if (hdmi->hdcp_ctrl && hdmi->is_hdcp_supported)
hdmi_hdcp_ctrl_on(hdmi->hdcp_ctrl);
- /* turn on scrambler, scrambler was skipped if HDMI is off */
- _sde_hdmi_bridge_setup_scrambler(hdmi, &display->mode);
-
mutex_lock(&display->display_lock);
if (display->codec_ready)
sde_hdmi_notify_clients(display, display->connected);
@@ -915,8 +896,7 @@ static void _sde_hdmi_bridge_mode_set(struct drm_bridge *bridge,
}
_sde_hdmi_save_mode(hdmi, mode);
- if (hdmi->power_on)
- _sde_hdmi_bridge_setup_scrambler(hdmi, mode);
+ _sde_hdmi_bridge_setup_scrambler(hdmi, mode);
_sde_hdmi_bridge_setup_deep_color(hdmi);
}
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index d66071672c62..3610c8fca5f3 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -585,7 +585,7 @@ int msm_gem_get_iova(struct drm_gem_object *obj,
obj_remove_domain(domain);
mutex_unlock(&msm_obj->lock);
- return 0;
+ return ret;
}
/* get iova without taking a reference, used in places where you have
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index f2b6aa29b410..2e528b112e1f 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -29,19 +29,14 @@
#define BO_LOCKED 0x4000
#define BO_PINNED 0x2000
-static inline void __user *to_user_ptr(u64 address)
-{
- return (void __user *)(uintptr_t)address;
-}
-
static struct msm_gem_submit *submit_create(struct drm_device *dev,
struct msm_gem_address_space *aspace,
uint32_t nr_bos, uint32_t nr_cmds,
struct msm_gpu_submitqueue *queue)
{
struct msm_gem_submit *submit;
- uint64_t sz = sizeof(*submit) + (nr_bos * sizeof(submit->bos[0])) +
- (nr_cmds * sizeof(submit->cmd[0]));
+ uint64_t sz = sizeof(*submit) + ((u64)nr_bos * sizeof(submit->bos[0])) +
+ ((u64)nr_cmds * sizeof(submit->cmd[0]));
if (sz > SIZE_MAX)
return NULL;
@@ -107,7 +102,7 @@ static int submit_lookup_objects(struct msm_gpu *gpu,
struct drm_gem_object *obj;
struct msm_gem_object *msm_obj;
void __user *userptr =
- to_user_ptr(args->bos + (i * sizeof(submit_bo)));
+ u64_to_user_ptr(args->bos + (i * sizeof(submit_bo)));
if (copy_from_user_inatomic(&submit_bo, userptr,
sizeof(submit_bo))) {
@@ -362,7 +357,7 @@ static int submit_reloc(struct msm_gpu *gpu,
for (i = 0; i < nr_relocs; i++) {
struct drm_msm_gem_submit_reloc submit_reloc;
void __user *userptr =
- to_user_ptr(relocs + (i * sizeof(submit_reloc)));
+ u64_to_user_ptr(relocs + (i * sizeof(submit_reloc)));
uint64_t iova;
uint32_t off;
bool valid;
@@ -473,7 +468,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
for (i = 0; i < args->nr_cmds; i++) {
struct drm_msm_gem_submit_cmd submit_cmd;
void __user *userptr =
- to_user_ptr(args->cmds + (i * sizeof(submit_cmd)));
+ u64_to_user_ptr(args->cmds + (i * sizeof(submit_cmd)));
struct msm_gem_object *msm_obj;
uint64_t iova;
size_t size;
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index f399d24019e4..a5a768a63858 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -60,7 +60,7 @@ msm_gem_address_space_new(struct msm_mmu *mmu, const char *name,
if (aspace->va_len)
drm_mm_init(&aspace->mm, (start >> PAGE_SHIFT),
- (end >> PAGE_SHIFT) - 1);
+ (aspace->va_len >> PAGE_SHIFT));
kref_init(&aspace->kref);
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 44d9784d1bd7..50dd710aa510 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -526,8 +526,6 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
list_add_tail(&submit->node, &ring->submits);
- msm_rd_dump_submit(submit);
-
ring->submitted_fence = submit->fence;
submit->tick_index = ring->tick_index;
@@ -556,6 +554,8 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
/* ring takes a reference to the bo and iova: */
drm_gem_object_reference(&msm_obj->base);
msm_gem_get_iova(&msm_obj->base, aspace, &iova);
+
+ submit->bos[i].iova = iova;
}
if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
@@ -564,6 +564,8 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);
}
+ msm_rd_dump_submit(submit);
+
gpu->funcs->submit(gpu, submit);
hangcheck_timer_reset(gpu);
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
index 270e79a774b2..46e2a13cecc4 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
@@ -21,6 +21,9 @@
(0x40 + (((lm) - LM_0) * 0x004))
#define CTL_LAYER_EXT2(lm) \
(0x70 + (((lm) - LM_0) * 0x004))
+#define CTL_LAYER_EXT3(lm) \
+ (0xA0 + (((lm) - LM_0) * 0x004))
+
#define CTL_TOP 0x014
#define CTL_FLUSH 0x018
#define CTL_START 0x01C
@@ -315,8 +318,12 @@ static void sde_hw_ctl_clear_all_blendstages(struct sde_hw_ctl *ctx)
int i;
for (i = 0; i < ctx->mixer_count; i++) {
- SDE_REG_WRITE(c, CTL_LAYER(LM_0 + i), 0);
- SDE_REG_WRITE(c, CTL_LAYER_EXT(LM_0 + i), 0);
+ int mixer_id = ctx->mixer_hw_caps[i].id;
+
+ SDE_REG_WRITE(c, CTL_LAYER(mixer_id), 0);
+ SDE_REG_WRITE(c, CTL_LAYER_EXT(mixer_id), 0);
+ SDE_REG_WRITE(c, CTL_LAYER_EXT2(mixer_id), 0);
+ SDE_REG_WRITE(c, CTL_LAYER_EXT3(mixer_id), 0);
}
}
diff --git a/drivers/gpu/drm/msm/sde_power_handle.c b/drivers/gpu/drm/msm/sde_power_handle.c
index ab65283ceafc..a26188f9e8e9 100644
--- a/drivers/gpu/drm/msm/sde_power_handle.c
+++ b/drivers/gpu/drm/msm/sde_power_handle.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -550,12 +550,13 @@ static int sde_power_reg_bus_update(u32 reg_bus_hdl, u32 usecase_ndx)
}
#else
static int sde_power_data_bus_parse(struct platform_device *pdev,
- struct sde_power_handle *phandle)
+ struct sde_power_data_bus_handle *pdbus)
{
return 0;
}
-static void sde_power_data_bus_unregister(u32 reg_bus_hdl)
+static void sde_power_data_bus_unregister(
+ struct sde_power_data_bus_handle *pdbus)
{
}
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index f97b73ec4713..f418c002d323 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -352,6 +352,7 @@ static int panel_simple_remove(struct device *dev)
drm_panel_remove(&panel->base);
panel_simple_disable(&panel->base);
+ panel_simple_unprepare(&panel->base);
if (panel->ddc)
put_device(&panel->ddc->dev);
@@ -367,6 +368,7 @@ static void panel_simple_shutdown(struct device *dev)
struct panel_simple *panel = dev_get_drvdata(dev);
panel_simple_disable(&panel->base);
+ panel_simple_unprepare(&panel->base);
}
static const struct drm_display_mode ampire_am800480r3tmqwa1h_mode = {
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index b5760851195c..0c6216a6ee9e 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -45,34 +45,32 @@ static char *pre_emph_names[] = {
/***** radeon AUX functions *****/
-/* Atom needs data in little endian format
- * so swap as appropriate when copying data to
- * or from atom. Note that atom operates on
- * dw units.
+/* Atom needs data in little endian format so swap as appropriate when copying
+ * data to or from atom. Note that atom operates on dw units.
+ *
+ * Use to_le=true when sending data to atom and provide at least
+ * ALIGN(num_bytes,4) bytes in the dst buffer.
+ *
+ * Use to_le=false when receiving data from atom and provide ALIGN(num_bytes,4)
+ * byes in the src buffer.
*/
void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le)
{
#ifdef __BIG_ENDIAN
- u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */
- u32 *dst32, *src32;
+ u32 src_tmp[5], dst_tmp[5];
int i;
+ u8 align_num_bytes = ALIGN(num_bytes, 4);
- memcpy(src_tmp, src, num_bytes);
- src32 = (u32 *)src_tmp;
- dst32 = (u32 *)dst_tmp;
if (to_le) {
- for (i = 0; i < ((num_bytes + 3) / 4); i++)
- dst32[i] = cpu_to_le32(src32[i]);
- memcpy(dst, dst_tmp, num_bytes);
+ memcpy(src_tmp, src, num_bytes);
+ for (i = 0; i < align_num_bytes / 4; i++)
+ dst_tmp[i] = cpu_to_le32(src_tmp[i]);
+ memcpy(dst, dst_tmp, align_num_bytes);
} else {
- u8 dws = num_bytes & ~3;
- for (i = 0; i < ((num_bytes + 3) / 4); i++)
- dst32[i] = le32_to_cpu(src32[i]);
- memcpy(dst, dst_tmp, dws);
- if (num_bytes % 4) {
- for (i = 0; i < (num_bytes % 4); i++)
- dst[dws+i] = dst_tmp[dws+i];
- }
+ memcpy(src_tmp, src, align_num_bytes);
+ for (i = 0; i < align_num_bytes / 4; i++)
+ dst_tmp[i] = le32_to_cpu(src_tmp[i]);
+ memcpy(dst, dst_tmp, num_bytes);
}
#else
memcpy(dst, src, num_bytes);
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 26da2f4d7b4f..a2937a693591 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -226,7 +226,6 @@ static int radeonfb_create(struct drm_fb_helper *helper,
}
info->par = rfbdev;
- info->skip_vt_switch = true;
ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
if (ret) {
diff --git a/drivers/gpu/drm/sti/sti_vtg.c b/drivers/gpu/drm/sti/sti_vtg.c
index d56630c60039..117a2f52fb4e 100644
--- a/drivers/gpu/drm/sti/sti_vtg.c
+++ b/drivers/gpu/drm/sti/sti_vtg.c
@@ -346,6 +346,10 @@ static int vtg_probe(struct platform_device *pdev)
return -ENOMEM;
}
vtg->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
+ if (!vtg->regs) {
+ DRM_ERROR("failed to remap I/O memory\n");
+ return -ENOMEM;
+ }
np = of_parse_phandle(pdev->dev.of_node, "st,slave", 0);
if (np) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index f3f31f995878..be3971b22a02 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -708,7 +708,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
* allocation taken by fbdev
*/
if (!(dev_priv->capabilities & SVGA_CAP_3D))
- mem_size *= 2;
+ mem_size *= 3;
dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
dev_priv->prim_bb_mem =
diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c
index d7e3af671543..d8803c3bbfdc 100644
--- a/drivers/i2c/busses/i2c-riic.c
+++ b/drivers/i2c/busses/i2c-riic.c
@@ -80,6 +80,7 @@
#define ICIER_TEIE 0x40
#define ICIER_RIE 0x20
#define ICIER_NAKIE 0x10
+#define ICIER_SPIE 0x08
#define ICSR2_NACKF 0x10
@@ -216,11 +217,10 @@ static irqreturn_t riic_tend_isr(int irq, void *data)
return IRQ_NONE;
}
- if (riic->is_last || riic->err)
+ if (riic->is_last || riic->err) {
+ riic_clear_set_bit(riic, 0, ICIER_SPIE, RIIC_ICIER);
writeb(ICCR2_SP, riic->base + RIIC_ICCR2);
-
- writeb(0, riic->base + RIIC_ICIER);
- complete(&riic->msg_done);
+ }
return IRQ_HANDLED;
}
@@ -240,13 +240,13 @@ static irqreturn_t riic_rdrf_isr(int irq, void *data)
if (riic->bytes_left == 1) {
/* STOP must come before we set ACKBT! */
- if (riic->is_last)
+ if (riic->is_last) {
+ riic_clear_set_bit(riic, 0, ICIER_SPIE, RIIC_ICIER);
writeb(ICCR2_SP, riic->base + RIIC_ICCR2);
+ }
riic_clear_set_bit(riic, 0, ICMR3_ACKBT, RIIC_ICMR3);
- writeb(0, riic->base + RIIC_ICIER);
- complete(&riic->msg_done);
} else {
riic_clear_set_bit(riic, ICMR3_ACKBT, 0, RIIC_ICMR3);
}
@@ -259,6 +259,21 @@ static irqreturn_t riic_rdrf_isr(int irq, void *data)
return IRQ_HANDLED;
}
+static irqreturn_t riic_stop_isr(int irq, void *data)
+{
+ struct riic_dev *riic = data;
+
+ /* read back registers to confirm writes have fully propagated */
+ writeb(0, riic->base + RIIC_ICSR2);
+ readb(riic->base + RIIC_ICSR2);
+ writeb(0, riic->base + RIIC_ICIER);
+ readb(riic->base + RIIC_ICIER);
+
+ complete(&riic->msg_done);
+
+ return IRQ_HANDLED;
+}
+
static u32 riic_func(struct i2c_adapter *adap)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
@@ -326,6 +341,7 @@ static struct riic_irq_desc riic_irqs[] = {
{ .res_num = 0, .isr = riic_tend_isr, .name = "riic-tend" },
{ .res_num = 1, .isr = riic_rdrf_isr, .name = "riic-rdrf" },
{ .res_num = 2, .isr = riic_tdre_isr, .name = "riic-tdre" },
+ { .res_num = 3, .isr = riic_stop_isr, .name = "riic-stop" },
{ .res_num = 5, .isr = riic_tend_isr, .name = "riic-nack" },
};
diff --git a/drivers/iio/light/cm3232.c b/drivers/iio/light/cm3232.c
index fe89b6823217..263e97235ea0 100644
--- a/drivers/iio/light/cm3232.c
+++ b/drivers/iio/light/cm3232.c
@@ -119,7 +119,7 @@ static int cm3232_reg_init(struct cm3232_chip *chip)
if (ret < 0)
dev_err(&chip->client->dev, "Error writing reg_cmd\n");
- return 0;
+ return ret;
}
/**
diff --git a/drivers/iio/trigger/iio-trig-interrupt.c b/drivers/iio/trigger/iio-trig-interrupt.c
index 572bc6f02ca8..e18f12b74610 100644
--- a/drivers/iio/trigger/iio-trig-interrupt.c
+++ b/drivers/iio/trigger/iio-trig-interrupt.c
@@ -58,7 +58,7 @@ static int iio_interrupt_trigger_probe(struct platform_device *pdev)
trig_info = kzalloc(sizeof(*trig_info), GFP_KERNEL);
if (!trig_info) {
ret = -ENOMEM;
- goto error_put_trigger;
+ goto error_free_trigger;
}
iio_trigger_set_drvdata(trig, trig_info);
trig_info->irq = irq;
@@ -83,8 +83,8 @@ error_release_irq:
free_irq(irq, trig);
error_free_trig_info:
kfree(trig_info);
-error_put_trigger:
- iio_trigger_put(trig);
+error_free_trigger:
+ iio_trigger_free(trig);
error_ret:
return ret;
}
@@ -99,7 +99,7 @@ static int iio_interrupt_trigger_remove(struct platform_device *pdev)
iio_trigger_unregister(trig);
free_irq(trig_info->irq, trig);
kfree(trig_info);
- iio_trigger_put(trig);
+ iio_trigger_free(trig);
return 0;
}
diff --git a/drivers/iio/trigger/iio-trig-sysfs.c b/drivers/iio/trigger/iio-trig-sysfs.c
index 3dfab2bc6d69..202e8b89caf2 100644
--- a/drivers/iio/trigger/iio-trig-sysfs.c
+++ b/drivers/iio/trigger/iio-trig-sysfs.c
@@ -174,7 +174,7 @@ static int iio_sysfs_trigger_probe(int id)
return 0;
out2:
- iio_trigger_put(t->trig);
+ iio_trigger_free(t->trig);
free_t:
kfree(t);
out1:
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 2018d24344de..f74b11542603 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -1373,7 +1373,7 @@ static void ipoib_cm_tx_reap(struct work_struct *work)
while (!list_empty(&priv->cm.reap_list)) {
p = list_entry(priv->cm.reap_list.next, typeof(*p), list);
- list_del(&p->list);
+ list_del_init(&p->list);
spin_unlock_irqrestore(&priv->lock, flags);
netif_tx_unlock_bh(dev);
ipoib_cm_tx_destroy(p);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index e397f1b0af09..9a99cee2665a 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -670,12 +670,19 @@ static void srp_path_rec_completion(int status,
static int srp_lookup_path(struct srp_rdma_ch *ch)
{
struct srp_target_port *target = ch->target;
- int ret;
+ int ret = -ENODEV;
ch->path.numb_path = 1;
init_completion(&ch->done);
+ /*
+ * Avoid that the SCSI host can be removed by srp_remove_target()
+ * before srp_path_rec_completion() is called.
+ */
+ if (!scsi_host_get(target->scsi_host))
+ goto out;
+
ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
target->srp_host->srp_dev->dev,
target->srp_host->port,
@@ -689,18 +696,24 @@ static int srp_lookup_path(struct srp_rdma_ch *ch)
GFP_KERNEL,
srp_path_rec_completion,
ch, &ch->path_query);
- if (ch->path_query_id < 0)
- return ch->path_query_id;
+ ret = ch->path_query_id;
+ if (ret < 0)
+ goto put;
ret = wait_for_completion_interruptible(&ch->done);
if (ret < 0)
- return ret;
+ goto put;
- if (ch->status < 0)
+ ret = ch->status;
+ if (ret < 0)
shost_printk(KERN_WARNING, target->scsi_host,
PFX "Path record query failed\n");
- return ch->status;
+put:
+ scsi_host_put(target->scsi_host);
+
+out:
+ return ret;
}
static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index eaabf3125846..c52131233ba7 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -3425,7 +3425,7 @@ static int srpt_parse_i_port_id(u8 i_port_id[16], const char *name)
{
const char *p;
unsigned len, count, leading_zero_bytes;
- int ret, rc;
+ int ret;
p = name;
if (strncasecmp(p, "0x", 2) == 0)
@@ -3437,10 +3437,9 @@ static int srpt_parse_i_port_id(u8 i_port_id[16], const char *name)
count = min(len / 2, 16U);
leading_zero_bytes = 16 - count;
memset(i_port_id, 0, leading_zero_bytes);
- rc = hex2bin(i_port_id + leading_zero_bytes, p, count);
- if (rc < 0)
- pr_debug("hex2bin failed for srpt_parse_i_port_id: %d\n", rc);
- ret = 0;
+ ret = hex2bin(i_port_id + leading_zero_bytes, p, count);
+ if (ret < 0)
+ pr_debug("hex2bin failed for srpt_parse_i_port_id: %d\n", ret);
out:
return ret;
}
diff --git a/drivers/input/keyboard/mpr121_touchkey.c b/drivers/input/keyboard/mpr121_touchkey.c
index 0fd612dd76ed..aaf43befffaa 100644
--- a/drivers/input/keyboard/mpr121_touchkey.c
+++ b/drivers/input/keyboard/mpr121_touchkey.c
@@ -87,7 +87,8 @@ static irqreturn_t mpr_touchkey_interrupt(int irq, void *dev_id)
struct mpr121_touchkey *mpr121 = dev_id;
struct i2c_client *client = mpr121->client;
struct input_dev *input = mpr121->input_dev;
- unsigned int key_num, key_val, pressed;
+ unsigned long bit_changed;
+ unsigned int key_num;
int reg;
reg = i2c_smbus_read_byte_data(client, ELE_TOUCH_STATUS_1_ADDR);
@@ -105,18 +106,22 @@ static irqreturn_t mpr_touchkey_interrupt(int irq, void *dev_id)
reg &= TOUCH_STATUS_MASK;
/* use old press bit to figure out which bit changed */
- key_num = ffs(reg ^ mpr121->statusbits) - 1;
- pressed = reg & (1 << key_num);
+ bit_changed = reg ^ mpr121->statusbits;
mpr121->statusbits = reg;
+ for_each_set_bit(key_num, &bit_changed, mpr121->keycount) {
+ unsigned int key_val, pressed;
- key_val = mpr121->keycodes[key_num];
+ pressed = reg & BIT(key_num);
+ key_val = mpr121->keycodes[key_num];
- input_event(input, EV_MSC, MSC_SCAN, key_num);
- input_report_key(input, key_val, pressed);
- input_sync(input);
+ input_event(input, EV_MSC, MSC_SCAN, key_num);
+ input_report_key(input, key_val, pressed);
+
+ dev_dbg(&client->dev, "key %d %d %s\n", key_num, key_val,
+ pressed ? "pressed" : "released");
- dev_dbg(&client->dev, "key %d %d %s\n", key_num, key_val,
- pressed ? "pressed" : "released");
+ }
+ input_sync(input);
out:
return IRQ_HANDLED;
@@ -231,6 +236,7 @@ static int mpr_touchkey_probe(struct i2c_client *client,
input_dev->id.bustype = BUS_I2C;
input_dev->dev.parent = &client->dev;
input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP);
+ input_set_capability(input_dev, EV_MSC, MSC_SCAN);
input_dev->keycode = mpr121->keycodes;
input_dev->keycodesize = sizeof(mpr121->keycodes[0]);
diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
index f4e8fbec6a94..b5304e264881 100644
--- a/drivers/input/misc/ims-pcu.c
+++ b/drivers/input/misc/ims-pcu.c
@@ -1635,13 +1635,25 @@ ims_pcu_get_cdc_union_desc(struct usb_interface *intf)
return NULL;
}
- while (buflen > 0) {
+ while (buflen >= sizeof(*union_desc)) {
union_desc = (struct usb_cdc_union_desc *)buf;
+ if (union_desc->bLength > buflen) {
+ dev_err(&intf->dev, "Too large descriptor\n");
+ return NULL;
+ }
+
if (union_desc->bDescriptorType == USB_DT_CS_INTERFACE &&
union_desc->bDescriptorSubType == USB_CDC_UNION_TYPE) {
dev_dbg(&intf->dev, "Found union header\n");
- return union_desc;
+
+ if (union_desc->bLength >= sizeof(*union_desc))
+ return union_desc;
+
+ dev_err(&intf->dev,
+ "Union descriptor to short (%d vs %zd\n)",
+ union_desc->bLength, sizeof(*union_desc));
+ return NULL;
}
buflen -= union_desc->bLength;
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index 681dce15fbc8..c9d491bc85e0 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -1240,6 +1240,8 @@ static const struct acpi_device_id elan_acpi_id[] = {
{ "ELAN0605", 0 },
{ "ELAN0609", 0 },
{ "ELAN060B", 0 },
+ { "ELAN060C", 0 },
+ { "ELAN0611", 0 },
{ "ELAN1000", 0 },
{ }
};
diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c
index 7c18249d6c8e..8b68a210277b 100644
--- a/drivers/input/tablet/gtco.c
+++ b/drivers/input/tablet/gtco.c
@@ -231,13 +231,17 @@ static void parse_hid_report_descriptor(struct gtco *device, char * report,
/* Walk this report and pull out the info we need */
while (i < length) {
- prefix = report[i];
-
- /* Skip over prefix */
- i++;
+ prefix = report[i++];
/* Determine data size and save the data in the proper variable */
- size = PREF_SIZE(prefix);
+ size = (1U << PREF_SIZE(prefix)) >> 1;
+ if (i + size > length) {
+ dev_err(ddev,
+ "Not enough data (need %d, have %d)\n",
+ i + size, length);
+ break;
+ }
+
switch (size) {
case 1:
data = report[i];
@@ -245,8 +249,7 @@ static void parse_hid_report_descriptor(struct gtco *device, char * report,
case 2:
data16 = get_unaligned_le16(&report[i]);
break;
- case 3:
- size = 4;
+ case 4:
data32 = get_unaligned_le32(&report[i]);
break;
}
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 00df3832faab..64f1eb8fdcbc 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -1033,13 +1033,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
}
}
- /* Nuke the existing Config, as we're going to rewrite it */
- val &= ~(STRTAB_STE_0_CFG_MASK << STRTAB_STE_0_CFG_SHIFT);
-
- if (ste->valid)
- val |= STRTAB_STE_0_V;
- else
- val &= ~STRTAB_STE_0_V;
+ /* Nuke the existing STE_0 value, as we're going to rewrite it */
+ val = ste->valid ? STRTAB_STE_0_V : 0;
if (ste->bypass) {
val |= disable_bypass ? STRTAB_STE_0_CFG_ABORT
@@ -1068,7 +1063,6 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
val |= (ste->s1_cfg->cdptr_dma & STRTAB_STE_0_S1CTXPTR_MASK
<< STRTAB_STE_0_S1CTXPTR_SHIFT) |
STRTAB_STE_0_CFG_S1_TRANS;
-
}
if (ste->s2_cfg) {
diff --git a/drivers/leds/leds-qpnp-flash-v2.c b/drivers/leds/leds-qpnp-flash-v2.c
index 15c931bbbf65..de13d3367648 100644
--- a/drivers/leds/leds-qpnp-flash-v2.c
+++ b/drivers/leds/leds-qpnp-flash-v2.c
@@ -731,7 +731,8 @@ static int qpnp_flash_led_get_voltage_headroom(struct qpnp_flash_led *led)
#define FLASH_VDIP_MARGIN 50000
#define BOB_EFFICIENCY 900LL
#define VIN_FLASH_MIN_UV 3300000LL
-static int qpnp_flash_led_calc_max_current(struct qpnp_flash_led *led)
+static int qpnp_flash_led_calc_max_current(struct qpnp_flash_led *led,
+ int *max_current)
{
int ocv_uv = 0, rbatt_uohm = 0, ibat_now = 0, voltage_hdrm_mv = 0;
int rc = 0;
@@ -747,8 +748,10 @@ static int qpnp_flash_led_calc_max_current(struct qpnp_flash_led *led)
}
/* If no battery is connected, return max possible flash current */
- if (!rbatt_uohm)
- return FLASH_LED_MAX_TOTAL_CURRENT_MA;
+ if (!rbatt_uohm) {
+ *max_current = FLASH_LED_MAX_TOTAL_CURRENT_MA;
+ return 0;
+ }
rc = get_property_from_fg(led, POWER_SUPPLY_PROP_VOLTAGE_OCV, &ocv_uv);
if (rc < 0) {
@@ -785,7 +788,7 @@ static int qpnp_flash_led_calc_max_current(struct qpnp_flash_led *led)
/* Wait for LMH mitigation to take effect */
udelay(100);
- return qpnp_flash_led_calc_max_current(led);
+ return qpnp_flash_led_calc_max_current(led, max_current);
}
/*
@@ -825,13 +828,14 @@ static int qpnp_flash_led_calc_max_current(struct qpnp_flash_led *led)
avail_flash_ua = div64_s64(avail_flash_power_fw, vin_flash_uv * MCONV);
pr_debug("avail_iflash=%lld, ocv=%d, ibat=%d, rbatt=%d, trigger_lmh=%d\n",
avail_flash_ua, ocv_uv, ibat_now, rbatt_uohm, led->trigger_lmh);
- return min(FLASH_LED_MAX_TOTAL_CURRENT_MA,
+ *max_current = min(FLASH_LED_MAX_TOTAL_CURRENT_MA,
(int)(div64_s64(avail_flash_ua, MCONV)));
+ return 0;
}
-static int qpnp_flash_led_calc_thermal_current_lim(struct qpnp_flash_led *led)
+static int qpnp_flash_led_calc_thermal_current_lim(struct qpnp_flash_led *led,
+ int *thermal_current_lim)
{
- int thermal_current_lim = 0;
int rc;
u8 thermal_thrsh1, thermal_thrsh2, thermal_thrsh3, otst_status;
@@ -888,7 +892,7 @@ static int qpnp_flash_led_calc_thermal_current_lim(struct qpnp_flash_led *led)
/* Look up current limit based on THERMAL_OTST status */
if (otst_status)
- thermal_current_lim =
+ *thermal_current_lim =
led->pdata->thermal_derate_current[otst_status >> 1];
/* Restore THERMAL_THRESHx registers to original values */
@@ -913,23 +917,36 @@ static int qpnp_flash_led_calc_thermal_current_lim(struct qpnp_flash_led *led)
if (rc < 0)
return rc;
- return thermal_current_lim;
+ return 0;
}
-static int qpnp_flash_led_get_max_avail_current(struct qpnp_flash_led *led)
+static int qpnp_flash_led_get_max_avail_current(struct qpnp_flash_led *led,
+ int *max_avail_current)
{
- int max_avail_current, thermal_current_lim = 0;
+ int thermal_current_lim = 0, rc;
led->trigger_lmh = false;
- max_avail_current = qpnp_flash_led_calc_max_current(led);
- if (led->pdata->thermal_derate_en)
- thermal_current_lim =
- qpnp_flash_led_calc_thermal_current_lim(led);
+ rc = qpnp_flash_led_calc_max_current(led, max_avail_current);
+ if (rc < 0) {
+ pr_err("Couldn't calculate max_avail_current, rc=%d\n", rc);
+ return rc;
+ }
+
+ if (led->pdata->thermal_derate_en) {
+ rc = qpnp_flash_led_calc_thermal_current_lim(led,
+ &thermal_current_lim);
+ if (rc < 0) {
+ pr_err("Couldn't calculate thermal_current_lim, rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
if (thermal_current_lim)
- max_avail_current = min(max_avail_current, thermal_current_lim);
+ *max_avail_current = min(*max_avail_current,
+ thermal_current_lim);
- return max_avail_current;
+ return 0;
}
static void qpnp_flash_led_aggregate_max_current(struct flash_node_data *fnode)
@@ -1237,12 +1254,11 @@ int qpnp_flash_led_prepare(struct led_trigger *trig, int options,
}
if (options & QUERY_MAX_CURRENT) {
- rc = qpnp_flash_led_get_max_avail_current(led);
+ rc = qpnp_flash_led_get_max_avail_current(led, max_current);
if (rc < 0) {
pr_err("query max current failed, rc=%d\n", rc);
return rc;
}
- *max_current = rc;
}
return 0;
@@ -1291,7 +1307,7 @@ static void qpnp_flash_led_brightness_set(struct led_classdev *led_cdev,
static ssize_t qpnp_flash_led_max_current_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- int rc;
+ int rc, max_current = 0;
struct flash_switch_data *snode;
struct qpnp_flash_led *led;
struct led_classdev *led_cdev = dev_get_drvdata(dev);
@@ -1299,11 +1315,11 @@ static ssize_t qpnp_flash_led_max_current_show(struct device *dev,
snode = container_of(led_cdev, struct flash_switch_data, cdev);
led = dev_get_drvdata(&snode->pdev->dev);
- rc = qpnp_flash_led_get_max_avail_current(led);
+ rc = qpnp_flash_led_get_max_avail_current(led, &max_current);
if (rc < 0)
pr_err("query max current failed, rc=%d\n", rc);
- return snprintf(buf, PAGE_SIZE, "%d\n", rc);
+ return snprintf(buf, PAGE_SIZE, "%d\n", max_current);
}
/* sysfs attributes exported by flash_led */
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index 8eeab72b93e2..4d46f2ce606f 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -406,7 +406,8 @@ long bch_bucket_alloc(struct cache *ca, unsigned reserve, bool wait)
finish_wait(&ca->set->bucket_wait, &w);
out:
- wake_up_process(ca->alloc_thread);
+ if (ca->alloc_thread)
+ wake_up_process(ca->alloc_thread);
trace_bcache_alloc(ca, reserve);
@@ -478,7 +479,7 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
if (b == -1)
goto err;
- k->ptr[i] = PTR(ca->buckets[b].gen,
+ k->ptr[i] = MAKE_PTR(ca->buckets[b].gen,
bucket_to_sector(c, b),
ca->sb.nr_this_dev);
diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c
index 243de0bf15cd..4bf15182c4da 100644
--- a/drivers/md/bcache/extents.c
+++ b/drivers/md/bcache/extents.c
@@ -584,7 +584,7 @@ static bool bch_extent_merge(struct btree_keys *bk, struct bkey *l, struct bkey
return false;
for (i = 0; i < KEY_PTRS(l); i++)
- if (l->ptr[i] + PTR(0, KEY_SIZE(l), 0) != r->ptr[i] ||
+ if (l->ptr[i] + MAKE_PTR(0, KEY_SIZE(l), 0) != r->ptr[i] ||
PTR_BUCKET_NR(b->c, l, i) != PTR_BUCKET_NR(b->c, r, i))
return false;
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 29eba7219b01..6ed066a0e7c0 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -508,7 +508,7 @@ static void journal_reclaim(struct cache_set *c)
continue;
ja->cur_idx = next;
- k->ptr[n++] = PTR(0,
+ k->ptr[n++] = MAKE_PTR(0,
bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
ca->sb.nr_this_dev);
}
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 0ee41fd9d850..1445aab270f4 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -708,7 +708,14 @@ static void cached_dev_read_error(struct closure *cl)
struct search *s = container_of(cl, struct search, cl);
struct bio *bio = &s->bio.bio;
- if (s->recoverable) {
+ /*
+ * If read request hit dirty data (s->read_dirty_data is true),
+ * then recovery a failed read request from cached device may
+ * get a stale data back. So read failure recovery is only
+ * permitted when read request hit clean data in cache device,
+ * or when cache read race happened.
+ */
+ if (s->recoverable && !s->read_dirty_data) {
/* Retry from the backing device: */
trace_bcache_read_retry(s->orig_bio);
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index cdceefd0e57d..2ec7f90e3455 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -928,7 +928,8 @@ static void __get_memory_limit(struct dm_bufio_client *c,
buffers = c->minimum_buffers;
*limit_buffers = buffers;
- *threshold_buffers = buffers * DM_BUFIO_WRITEBACK_PERCENT / 100;
+ *threshold_buffers = mult_frac(buffers,
+ DM_BUFIO_WRITEBACK_PERCENT, 100);
}
/*
@@ -1829,19 +1830,15 @@ static int __init dm_bufio_init(void)
memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches);
memset(&dm_bufio_cache_names, 0, sizeof dm_bufio_cache_names);
- mem = (__u64)((totalram_pages - totalhigh_pages) *
- DM_BUFIO_MEMORY_PERCENT / 100) << PAGE_SHIFT;
+ mem = (__u64)mult_frac(totalram_pages - totalhigh_pages,
+ DM_BUFIO_MEMORY_PERCENT, 100) << PAGE_SHIFT;
if (mem > ULONG_MAX)
mem = ULONG_MAX;
#ifdef CONFIG_MMU
- /*
- * Get the size of vmalloc space the same way as VMALLOC_TOTAL
- * in fs/proc/internal.h
- */
- if (mem > (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100)
- mem = (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100;
+ if (mem > mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100))
+ mem = mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100);
#endif
dm_bufio_default_cache_size = mem;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 47ac131099d9..f7f560f5f056 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -3517,11 +3517,15 @@ struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
md = container_of(kobj, struct mapped_device, kobj_holder.kobj);
- if (test_bit(DMF_FREEING, &md->flags) ||
- dm_deleting_md(md))
- return NULL;
-
+ spin_lock(&_minor_lock);
+ if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) {
+ md = NULL;
+ goto out;
+ }
dm_get(md);
+out:
+ spin_unlock(&_minor_lock);
+
return md;
}
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index ba3287d176af..f7f2f09cc06b 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -233,6 +233,18 @@ config VIDEO_ADV7481
To compile this driver as a module, choose M here: the
module will be called adv7481.
+config VIDEO_TVTUNER
+ tristate "Analog Tv Tuner driver"
+ depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
+ ---help---
+ Support for the Dummy TV Tuner.
+
+ This is a Dummy TV Tuner Driver to Validate call flow
+ from tv_input_test unit-test app.
+
+ To compile this driver as a module, choose M here: the
+ module will be called tv-tuner.
+
config VIDEO_BT819
tristate "BT819A VideoStream decoder"
depends on VIDEO_V4L2 && I2C
diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile
index ade6ecaad80d..eec9e870755d 100644
--- a/drivers/media/i2c/Makefile
+++ b/drivers/media/i2c/Makefile
@@ -39,6 +39,7 @@ ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/io
ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/cci
endif
obj-$(CONFIG_VIDEO_ADV7481) += adv7481.o
+obj-$(CONFIG_VIDEO_TVTUNER) += tvtuner.o
obj-$(CONFIG_VIDEO_AD9389B) += ad9389b.o
obj-$(CONFIG_VIDEO_ADV7511) += adv7511.o
obj-$(CONFIG_VIDEO_VPX3220) += vpx3220.o
diff --git a/drivers/media/i2c/adv7481.c b/drivers/media/i2c/adv7481.c
index 74d7b9584827..a14f13c44a36 100644
--- a/drivers/media/i2c/adv7481.c
+++ b/drivers/media/i2c/adv7481.c
@@ -75,6 +75,19 @@ enum adv7481_gpio_t {
ADV7481_GPIO_MAX,
};
+enum adv7481_resolution {
+ RES_1080P = 0,
+ RES_720P,
+ RES_576P_480P,
+ RES_MAX,
+};
+
+struct resolution_config {
+ uint32_t lane_cnt;
+ uint32_t settle_cnt;
+ char resolution[20];
+};
+
struct adv7481_state {
struct device *dev;
@@ -125,6 +138,9 @@ struct adv7481_state {
int csib_src;
int mode;
+ /* resolution configuration */
+ struct resolution_config res_configs[RES_MAX];
+
/* CSI configuration data */
int tx_auto_params;
enum adv7481_mipi_lane tx_lanes;
@@ -241,6 +257,13 @@ const uint8_t adv7481_default_edid_data[] = {
static u32 adv7481_inp_to_ba(u32 adv_input);
static bool adv7481_is_timing_locked(struct adv7481_state *state);
+static int adv7481_get_hdmi_timings(struct adv7481_state *state,
+ struct adv7481_vid_params *vid_params,
+ struct adv7481_hdmi_params *hdmi_params);
+static int get_lane_cnt(struct resolution_config *configs,
+ enum adv7481_resolution size, int w, int h);
+static int get_settle_cnt(struct resolution_config *configs,
+ enum adv7481_resolution size, int w, int h);
static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl)
{
@@ -1005,11 +1028,18 @@ static long adv7481_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
{
struct adv7481_state *state = to_state(sd);
int *ret_val = arg;
+ struct msm_ba_v4l2_ioctl_t adv_arg = *(struct msm_ba_v4l2_ioctl_t *)arg;
long ret = 0;
int param = 0;
+ struct csi_ctrl_params user_csi;
+ struct adv7481_vid_params vid_params;
+ struct adv7481_hdmi_params hdmi_params;
pr_debug("Enter %s with command: 0x%x", __func__, cmd);
+ memset(&vid_params, 0, sizeof(struct adv7481_vid_params));
+ memset(&hdmi_params, 0, sizeof(struct adv7481_hdmi_params));
+
if (!sd)
return -EINVAL;
@@ -1039,6 +1069,28 @@ static long adv7481_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
case VIDIOC_HDMI_RX_CEC_S_ENABLE:
ret = adv7481_cec_powerup(state, arg);
break;
+ case VIDIOC_G_CSI_PARAMS: {
+ if (state->csia_src == ADV7481_IP_HDMI) {
+ ret = adv7481_get_hdmi_timings(state,
+ &vid_params, &hdmi_params);
+ if (ret) {
+ pr_err("%s:Error in adv7481_get_hdmi_timings\n",
+ __func__);
+ return -EINVAL;
+ }
+ }
+ user_csi.settle_count = get_settle_cnt(state->res_configs,
+ RES_MAX, vid_params.act_pix, vid_params.act_lines);
+ user_csi.lane_count = get_lane_cnt(state->res_configs,
+ RES_MAX, vid_params.act_pix, vid_params.act_lines);
+
+ if (copy_to_user((void __user *)adv_arg.ptr,
+ (void *)&user_csi, sizeof(struct csi_ctrl_params))) {
+ pr_err("%s: Failed to copy CSI params\n", __func__);
+ return -EINVAL;
+ }
+ break;
+ }
default:
pr_err("Not a typewriter! Command: 0x%x", cmd);
ret = -ENOTTY;
@@ -1541,6 +1593,65 @@ static bool adv7481_is_timing_locked(struct adv7481_state *state)
return ret;
}
+static int get_settle_cnt(struct resolution_config *configs,
+ enum adv7481_resolution size, int w, int h)
+{
+ int i;
+ int ret = -EINVAL;
+ char res_type[20] = "RES_MAX";
+
+ if (w == 1920 && h == 1080) {
+ strlcpy(res_type, "RES_1080P", sizeof(res_type));
+ } else if (w == 1280 && h == 720) {
+ strlcpy(res_type, "RES_720P", sizeof(res_type));
+ } else if ((w == 720 && h == 576) || (w == 720 && h == 480)) {
+ strlcpy(res_type, "RES_576P_480P", sizeof(res_type));
+ } else {
+ pr_err("%s: Resolution not supported\n", __func__);
+ return ret;
+ }
+
+ for (i = 0; i < size; i++) {
+ if (strcmp(configs[i].resolution, res_type) == 0) {
+ pr_debug("%s: settle count is set to %d\n",
+ __func__, configs[i].settle_cnt);
+ ret = configs[i].settle_cnt;
+ break;
+ }
+ }
+ return ret;
+}
+
+
+static int get_lane_cnt(struct resolution_config *configs,
+ enum adv7481_resolution size, int w, int h)
+{
+ int i;
+ int ret = -EINVAL;
+ char res_type[20] = "RES_MAX";
+
+ if (w == 1920 && h == 1080) {
+ strlcpy(res_type, "RES_1080P", sizeof(res_type));
+ } else if (w == 1280 && h == 720) {
+ strlcpy(res_type, "RES_720P", sizeof(res_type));
+ } else if ((w == 720 && h == 576) || (w == 720 && h == 480)) {
+ strlcpy(res_type, "RES_576P_480P", sizeof(res_type));
+ } else {
+ pr_err("%s: Resolution not supported\n", __func__);
+ return ret;
+ }
+
+ for (i = 0; i < size; i++) {
+ if (strcmp(configs[i].resolution, res_type) == 0) {
+ pr_debug("%s: lane count is set to %d\n",
+ __func__, configs[i].lane_cnt);
+ ret = configs[i].lane_cnt;
+ break;
+ }
+ }
+ return ret;
+}
+
static int adv7481_get_hdmi_timings(struct adv7481_state *state,
struct adv7481_vid_params *vid_params,
struct adv7481_hdmi_params *hdmi_params)
@@ -2032,12 +2143,30 @@ static int adv7481_csi_powerup(struct adv7481_state *state,
static int adv7481_set_op_stream(struct adv7481_state *state, bool on)
{
int ret = 0;
+ struct adv7481_vid_params vid_params;
+ struct adv7481_hdmi_params hdmi_params;
pr_debug("Enter %s: on: %d, a src: %d, b src: %d\n",
__func__, on, state->csia_src, state->csib_src);
+ memset(&vid_params, 0, sizeof(struct adv7481_vid_params));
+ memset(&hdmi_params, 0, sizeof(struct adv7481_hdmi_params));
+
if (on && state->csia_src != ADV7481_IP_NONE)
- if (ADV7481_IP_HDMI == state->csia_src) {
- state->tx_lanes = ADV7481_MIPI_4LANE;
+ if (state->csia_src == ADV7481_IP_HDMI) {
+ ret = adv7481_get_hdmi_timings(state, &vid_params,
+ &hdmi_params);
+ if (ret) {
+ pr_err("%s: Error %d in adv7481_get_hdmi_timings\n",
+ __func__, ret);
+ return -EINVAL;
+ }
+ state->tx_lanes = get_lane_cnt(state->res_configs,
+ RES_MAX, vid_params.act_pix, vid_params.act_lines);
+
+ if (state->tx_lanes < 0) {
+ pr_err("%s: Invalid lane count\n", __func__);
+ return -EINVAL;
+ }
ret = adv7481_set_audio_spdif(state, on);
ret |= adv7481_csi_powerup(state, ADV7481_OP_CSIA);
} else {
@@ -2245,6 +2374,9 @@ static int adv7481_parse_dt(struct platform_device *pdev,
{
struct device_node *np = state->dev->of_node;
uint32_t i = 0;
+ uint32_t lane_count[RES_MAX];
+ uint32_t settle_count[RES_MAX];
+ static const char *resolution_array[RES_MAX];
int gpio_count = 0;
struct resource *adv_addr_res = NULL;
int ret = 0;
@@ -2258,6 +2390,36 @@ static int adv7481_parse_dt(struct platform_device *pdev,
goto exit;
}
pr_debug("%s: cci_master: 0x%x\n", __func__, state->cci_master);
+ /* read CSI data line */
+ ret = of_property_read_u32_array(np, "tx-lanes",
+ lane_count, RES_MAX);
+ if (ret < 0) {
+ pr_err("%s: failed to read data lane array . ret %d\n",
+ __func__, ret);
+ goto exit;
+ }
+ /* read settle count */
+ ret = of_property_read_u32_array(np, "settle-count",
+ settle_count, RES_MAX);
+ if (ret < 0) {
+ pr_err("%s: failed to read settle count . ret %d\n",
+ __func__, ret);
+ goto exit;
+ }
+ /* read resolution array */
+ ret = of_property_read_string_array(np, "res-array",
+ resolution_array, RES_MAX);
+ if (ret < 0) {
+ pr_err("%s: failed to read resolution array . ret %d\n",
+ __func__, ret);
+ goto exit;
+ }
+ for (i = 0; i < RES_MAX; i++) {
+ state->res_configs[i].lane_cnt = (uint32_t)lane_count[i];
+ state->res_configs[i].settle_cnt = (uint32_t)settle_count[i];
+ strlcpy(state->res_configs[i].resolution, resolution_array[i],
+ sizeof(state->res_configs[i].resolution));
+ }
adv_addr_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!adv_addr_res) {
pr_err("%s: failed to read adv7481 resource.\n", __func__);
@@ -2449,7 +2611,6 @@ static int adv7481_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
static int adv7481_suspend(struct device *dev)
{
struct adv7481_state *state;
@@ -2487,10 +2648,6 @@ static int adv7481_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(adv7481_pm_ops, adv7481_suspend, adv7481_resume);
#define ADV7481_PM_OPS (&adv7481_pm_ops)
-#else
-#define ADV7481_PM_OPS NULL
-#endif
-
static struct platform_driver adv7481_driver = {
.driver = {
.owner = THIS_MODULE,
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index 01adcdc52346..a9e2722f5e22 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -2856,6 +2856,9 @@ static int adv76xx_parse_dt(struct adv76xx_state *state)
state->pdata.alt_data_sat = 1;
state->pdata.op_format_mode_sel = ADV7604_OP_FORMAT_MODE0;
state->pdata.bus_order = ADV7604_BUS_ORDER_RGB;
+ state->pdata.dr_str_data = ADV76XX_DR_STR_MEDIUM_HIGH;
+ state->pdata.dr_str_clk = ADV76XX_DR_STR_MEDIUM_HIGH;
+ state->pdata.dr_str_sync = ADV76XX_DR_STR_MEDIUM_HIGH;
return 0;
}
diff --git a/drivers/media/i2c/tvtuner.c b/drivers/media/i2c/tvtuner.c
new file mode 100644
index 000000000000..357491209814
--- /dev/null
+++ b/drivers/media/i2c/tvtuner.c
@@ -0,0 +1,333 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/media.h>
+#include <media/v4l2-ioctl.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
+
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+
+#include <media/msm_ba.h>
+
+#include "tvtuner.h"
+
+#define DRIVER_NAME "tv-tuner"
+
+struct Tvtuner_state {
+ struct device *dev;
+
+ /* V4L2 Data */
+ struct v4l2_subdev sd;
+ struct v4l2_ctrl_handler ctrl_hdl;
+ struct v4l2_dv_timings timings;
+
+ /* media entity controls */
+ struct media_pad pad;
+
+ struct mutex mutex;
+};
+
+
+/* Initialize Tvtuner I2C Settings */
+static int Tvtuner_dev_init(struct Tvtuner_state *state)
+{
+ int ret = 0;
+
+ TUNER_DEBUG("tv_tuner dev init is started\n");
+ return ret;
+}
+
+/* Initialize Tvtuner hardware */
+static int Tvtuner_hw_init(struct Tvtuner_state *state)
+{
+ int ret = 0;
+
+ TUNER_DEBUG("tv_tuner hw init is started\n");
+ return ret;
+}
+
+static int Tvtuner_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ int ret = 0;
+
+ TUNER_DEBUG("tv_tuner set control is started id = 0x%x\n", ctrl->id);
+ return ret;
+}
+
+static int Tvtuner_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ int ret = 0;
+ struct v4l2_mbus_framefmt *fmt = &format->format;
+
+ fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
+ fmt->width = 1280;
+ fmt->height = 720;
+ fmt->colorspace = V4L2_COLORSPACE_SMPTE170M;
+
+ TUNER_DEBUG("tv_tuner get mbus format is started\n");
+ return ret;
+}
+
+static int Tvtuner_g_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *interval)
+{
+ int ret = 0;
+
+ TUNER_DEBUG("tv_tuner get frame interval is started\n");
+ return ret;
+}
+
+static int Tvtuner_s_routing(struct v4l2_subdev *sd, u32 input,
+ u32 output, u32 config)
+{
+ int ret = 0;
+
+ TUNER_DEBUG("tv_tuner s_routing is started\n");
+ return ret;
+}
+
+static int Tvtuner_query_dv_timings(struct v4l2_subdev *sd,
+ struct v4l2_dv_timings *timings)
+{
+ int ret = 0;
+
+ TUNER_DEBUG("tv_tuner query dv timings is started\n");
+ return ret;
+}
+
+static int Tvtuner_query_sd_std(struct v4l2_subdev *sd, v4l2_std_id *std)
+{
+ int ret = 0;
+
+ TUNER_DEBUG("tv_tuner query SD input is started\n");
+ return ret;
+}
+
+static int Tvtuner_g_input_status(struct v4l2_subdev *sd, u32 *status)
+{
+ int ret = 0;
+ *status = 1;
+
+ TUNER_DEBUG("tv_tuner get input status is started\n");
+ return ret;
+}
+
+static int Tvtuner_s_stream(struct v4l2_subdev *sd, int on)
+{
+ int ret = 0;
+
+ TUNER_DEBUG("tv_tuner start stream is started\n");
+ return ret;
+}
+
+static const struct v4l2_subdev_video_ops Tvtuner_video_ops = {
+ .s_routing = Tvtuner_s_routing,
+ .g_frame_interval = Tvtuner_g_frame_interval,
+ .querystd = Tvtuner_query_sd_std,
+ .g_dv_timings = Tvtuner_query_dv_timings,
+ .g_input_status = Tvtuner_g_input_status,
+ .s_stream = Tvtuner_s_stream,
+};
+
+
+static const struct v4l2_ctrl_ops Tvtuner_ctrl_ops = {
+ .s_ctrl = Tvtuner_s_ctrl,
+};
+
+static const struct v4l2_subdev_pad_ops Tvtuner_pad_ops = {
+ .get_fmt = Tvtuner_get_fmt,
+};
+
+static const struct v4l2_subdev_ops Tvtuner_ops = {
+ .video = &Tvtuner_video_ops,
+ .pad = &Tvtuner_pad_ops,
+};
+
+static int Tvtuner_init_v4l2_controls(struct Tvtuner_state *state)
+{
+ int ret = 0;
+
+ TUNER_DEBUG("%s: Exit with ret: %d\n", __func__, ret);
+ return ret;
+}
+
+static int Tvtuner_parse_dt(struct platform_device *pdev,
+ struct Tvtuner_state *state)
+{
+
+ int ret = 0;
+
+ TUNER_DEBUG("%s: tvtuner parse dt called\n", __func__);
+ return ret;
+}
+
+static const struct of_device_id Tvtuner_id[] = {
+ { .compatible = "qcom,tv-tuner", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, Tvtuner_id);
+
+static int Tvtuner_probe(struct platform_device *pdev)
+{
+ struct Tvtuner_state *state;
+ const struct of_device_id *device_id;
+ struct v4l2_subdev *sd;
+ int ret;
+
+ device_id = of_match_device(Tvtuner_id, &pdev->dev);
+ if (!device_id) {
+ TUNER_DEBUG("%s: device_id is NULL\n", __func__);
+ ret = -ENODEV;
+ goto err;
+ }
+
+ /* Create Tvtuner State */
+ state = devm_kzalloc(&pdev->dev,
+ sizeof(struct Tvtuner_state), GFP_KERNEL);
+ if (state == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ platform_set_drvdata(pdev, state);
+ state->dev = &pdev->dev;
+
+ mutex_init(&state->mutex);
+ ret = Tvtuner_parse_dt(pdev, state);
+ if (ret < 0) {
+ TUNER_ERROR("Error parsing dt tree\n");
+ goto err_mem_free;
+ }
+
+ /* Configure and Register V4L2 Sub-device */
+ sd = &state->sd;
+ v4l2_subdev_init(sd, &Tvtuner_ops);
+ sd->owner = pdev->dev.driver->owner;
+ v4l2_set_subdevdata(sd, state);
+ strlcpy(sd->name, DRIVER_NAME, sizeof(sd->name));
+ state->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ state->sd.flags |= V4L2_SUBDEV_FL_HAS_EVENTS;
+
+ /* Register as Media Entity */
+ state->pad.flags = MEDIA_PAD_FL_SOURCE;
+ state->sd.entity.flags |= (unsigned long)MEDIA_ENT_T_V4L2_SUBDEV;
+ ret = media_entity_init(&state->sd.entity, 1, &state->pad, 0);
+ if (ret) {
+ ret = -EIO;
+ TUNER_ERROR("%s(%d): Media entity init failed\n",
+ __func__, __LINE__);
+ goto err_media_entity;
+ }
+
+ /* Initialize HW Config */
+ ret = Tvtuner_hw_init(state);
+ if (ret) {
+ ret = -EIO;
+ TUNER_ERROR("%s: HW Initialisation Failed\n", __func__);
+ goto err_media_entity;
+ }
+
+ ret = Tvtuner_init_v4l2_controls(state);
+ if (ret) {
+ TUNER_ERROR("%s: V4L2 Controls Initialisation Failed %d\n",
+ __func__, ret);
+ }
+
+ /* Initialize SW Init Settings and I2C sub maps */
+ ret = Tvtuner_dev_init(state);
+ if (ret) {
+ ret = -EIO;
+ TUNER_ERROR("%s(%d): SW Initialisation Failed\n",
+ __func__, __LINE__);
+ goto err_media_entity;
+ }
+
+ /* BA registration */
+ TUNER_DEBUG(" register msm-ba driver to tv_tuner");
+ ret = msm_ba_register_subdev_node(sd);
+ if (ret) {
+ ret = -EIO;
+ TUNER_DEBUG("%s: BA init failed\n", __func__);
+ goto err_media_entity;
+ }
+ TUNER_DEBUG("Probe of tvtuner successful!\n");
+
+ return ret;
+
+err_media_entity:
+ media_entity_cleanup(&sd->entity);
+
+err_mem_free:
+ devm_kfree(&pdev->dev, state);
+
+err:
+ return ret;
+}
+
+static int Tvtuner_remove(struct platform_device *pdev)
+{
+ struct Tvtuner_state *state = platform_get_drvdata(pdev);
+
+ msm_ba_unregister_subdev_node(&state->sd);
+ v4l2_device_unregister_subdev(&state->sd);
+ media_entity_cleanup(&state->sd.entity);
+
+ v4l2_ctrl_handler_free(&state->ctrl_hdl);
+
+ mutex_destroy(&state->mutex);
+ devm_kfree(&pdev->dev, state);
+
+ return 0;
+}
+
+static int Tvtuner_suspend(struct device *dev)
+{
+ TUNER_DEBUG("tv_tuner driver is in suspend state\n");
+ return 0;
+}
+
+static int Tvtuner_resume(struct device *dev)
+{
+ TUNER_DEBUG("tv_tuner driver is in resume state\n");
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(Tvtuner_pm_ops, Tvtuner_suspend, Tvtuner_resume);
+#define TVTUNER_PM_OPS (&Tvtuner_pm_ops)
+
+static struct platform_driver Tvtuner_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "tv-tuner",
+ .of_match_table = Tvtuner_id,
+ .pm = TVTUNER_PM_OPS,
+ },
+ .probe = Tvtuner_probe,
+ .remove = Tvtuner_remove,
+};
+
+module_driver(Tvtuner_driver, platform_driver_register,
+ platform_driver_unregister);
+
+MODULE_DESCRIPTION(" TV TUNER Test driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/tvtuner.h b/drivers/media/i2c/tvtuner.h
new file mode 100644
index 000000000000..9a3c15df5936
--- /dev/null
+++ b/drivers/media/i2c/tvtuner.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __TV_TUNER_H__
+#define __TV_TUNER_H__
+
+#define PREFIX "tv_tuner: "
+
+#define TUNER_DEBUG(str, args...) pr_debug(PREFIX str, ##args)
+#define TUNER_ERROR(str, args...) pr_err(PREFIX str, ##args)
+
+#endif
diff --git a/drivers/media/pci/bt8xx/dvb-bt8xx.c b/drivers/media/pci/bt8xx/dvb-bt8xx.c
index d407244fd1bc..bd0f5b195188 100644
--- a/drivers/media/pci/bt8xx/dvb-bt8xx.c
+++ b/drivers/media/pci/bt8xx/dvb-bt8xx.c
@@ -680,6 +680,7 @@ static void frontend_init(struct dvb_bt8xx_card *card, u32 type)
/* DST is not a frontend, attaching the ASIC */
if (dvb_attach(dst_attach, state, &card->dvb_adapter) == NULL) {
pr_err("%s: Could not find a Twinhan DST\n", __func__);
+ kfree(state);
break;
}
/* Attach other DST peripherals if any */
diff --git a/drivers/media/platform/exynos4-is/fimc-is.c b/drivers/media/platform/exynos4-is/fimc-is.c
index 49658ca39e51..a851f20dca23 100644
--- a/drivers/media/platform/exynos4-is/fimc-is.c
+++ b/drivers/media/platform/exynos4-is/fimc-is.c
@@ -815,12 +815,13 @@ static int fimc_is_probe(struct platform_device *pdev)
is->irq = irq_of_parse_and_map(dev->of_node, 0);
if (!is->irq) {
dev_err(dev, "no irq found\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_iounmap;
}
ret = fimc_is_get_clocks(is);
if (ret < 0)
- return ret;
+ goto err_iounmap;
platform_set_drvdata(pdev, is);
@@ -880,6 +881,8 @@ err_irq:
free_irq(is->irq, is);
err_clk:
fimc_is_put_clocks(is);
+err_iounmap:
+ iounmap(is->pmu_regs);
return ret;
}
@@ -935,6 +938,7 @@ static int fimc_is_remove(struct platform_device *pdev)
fimc_is_unregister_subdevs(is);
vb2_dma_contig_cleanup_ctx(is->alloc_ctx);
fimc_is_put_clocks(is);
+ iounmap(is->pmu_regs);
fimc_is_debugfs_remove(is);
release_firmware(is->fw.f_w);
fimc_is_free_cpu_memory(is);
diff --git a/drivers/media/platform/msm/camera_v2/jpeg_dma/msm_jpeg_dma_dev.c b/drivers/media/platform/msm/camera_v2/jpeg_dma/msm_jpeg_dma_dev.c
index 7f6e78710117..ba64433a3fab 100644
--- a/drivers/media/platform/msm/camera_v2/jpeg_dma/msm_jpeg_dma_dev.c
+++ b/drivers/media/platform/msm/camera_v2/jpeg_dma/msm_jpeg_dma_dev.c
@@ -816,9 +816,12 @@ static int msm_jpegdma_s_fmt_vid_out(struct file *file,
static int msm_jpegdma_reqbufs(struct file *file,
void *fh, struct v4l2_requestbuffers *req)
{
+ int ret = 0;
struct jpegdma_ctx *ctx = msm_jpegdma_ctx_from_fh(fh);
-
- return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, req);
+ mutex_lock(&ctx->lock);
+ ret = v4l2_m2m_reqbufs(file, ctx->m2m_ctx, req);
+ mutex_unlock(&ctx->lock);
+ return ret;
}
/*
@@ -925,11 +928,11 @@ static int msm_jpegdma_streamoff(struct file *file,
{
struct jpegdma_ctx *ctx = msm_jpegdma_ctx_from_fh(fh);
int ret;
-
+ mutex_lock(&ctx->lock);
ret = v4l2_m2m_streamoff(file, ctx->m2m_ctx, buf_type);
if (ret < 0)
dev_err(ctx->jdma_device->dev, "Stream off fails\n");
-
+ mutex_unlock(&ctx->lock);
return ret;
}
diff --git a/drivers/media/platform/msm/camera_v2/msm.c b/drivers/media/platform/msm/camera_v2/msm.c
index 6a969401e950..60532929a916 100644
--- a/drivers/media/platform/msm/camera_v2/msm.c
+++ b/drivers/media/platform/msm/camera_v2/msm.c
@@ -1288,7 +1288,7 @@ static ssize_t write_logsync(struct file *file, const char __user *buf,
uint64_t seq_num = 0;
int ret;
- if (copy_from_user(lbuf, buf, sizeof(lbuf)))
+ if (copy_from_user(lbuf, buf, sizeof(lbuf) - 1))
return -EFAULT;
ret = sscanf(lbuf, "%llu", &seq_num);
diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
index eb9e7feb9b13..7a16e9ea041c 100644
--- a/drivers/media/rc/imon.c
+++ b/drivers/media/rc/imon.c
@@ -2419,6 +2419,11 @@ static int imon_probe(struct usb_interface *interface,
mutex_lock(&driver_lock);
first_if = usb_ifnum_to_if(usbdev, 0);
+ if (!first_if) {
+ ret = -ENODEV;
+ goto fail;
+ }
+
first_if_ctx = usb_get_intfdata(first_if);
if (ifnum == 0) {
diff --git a/drivers/media/rc/ir-lirc-codec.c b/drivers/media/rc/ir-lirc-codec.c
index efc21b1da211..ca107033e429 100644
--- a/drivers/media/rc/ir-lirc-codec.c
+++ b/drivers/media/rc/ir-lirc-codec.c
@@ -286,11 +286,14 @@ static long ir_lirc_ioctl(struct file *filep, unsigned int cmd,
if (!dev->max_timeout)
return -ENOSYS;
+ /* Check for multiply overflow */
+ if (val > U32_MAX / 1000)
+ return -EINVAL;
+
tmp = val * 1000;
- if (tmp < dev->min_timeout ||
- tmp > dev->max_timeout)
- return -EINVAL;
+ if (tmp < dev->min_timeout || tmp > dev->max_timeout)
+ return -EINVAL;
dev->timeout = tmp;
break;
diff --git a/drivers/media/usb/as102/as102_fw.c b/drivers/media/usb/as102/as102_fw.c
index 07d08c49f4d4..b2e16bb67572 100644
--- a/drivers/media/usb/as102/as102_fw.c
+++ b/drivers/media/usb/as102/as102_fw.c
@@ -101,18 +101,23 @@ static int as102_firmware_upload(struct as10x_bus_adapter_t *bus_adap,
unsigned char *cmd,
const struct firmware *firmware) {
- struct as10x_fw_pkt_t fw_pkt;
+ struct as10x_fw_pkt_t *fw_pkt;
int total_read_bytes = 0, errno = 0;
unsigned char addr_has_changed = 0;
+ fw_pkt = kmalloc(sizeof(*fw_pkt), GFP_KERNEL);
+ if (!fw_pkt)
+ return -ENOMEM;
+
+
for (total_read_bytes = 0; total_read_bytes < firmware->size; ) {
int read_bytes = 0, data_len = 0;
/* parse intel hex line */
read_bytes = parse_hex_line(
(u8 *) (firmware->data + total_read_bytes),
- fw_pkt.raw.address,
- fw_pkt.raw.data,
+ fw_pkt->raw.address,
+ fw_pkt->raw.data,
&data_len,
&addr_has_changed);
@@ -122,28 +127,28 @@ static int as102_firmware_upload(struct as10x_bus_adapter_t *bus_adap,
/* detect the end of file */
total_read_bytes += read_bytes;
if (total_read_bytes == firmware->size) {
- fw_pkt.u.request[0] = 0x00;
- fw_pkt.u.request[1] = 0x03;
+ fw_pkt->u.request[0] = 0x00;
+ fw_pkt->u.request[1] = 0x03;
/* send EOF command */
errno = bus_adap->ops->upload_fw_pkt(bus_adap,
(uint8_t *)
- &fw_pkt, 2, 0);
+ fw_pkt, 2, 0);
if (errno < 0)
goto error;
} else {
if (!addr_has_changed) {
/* prepare command to send */
- fw_pkt.u.request[0] = 0x00;
- fw_pkt.u.request[1] = 0x01;
+ fw_pkt->u.request[0] = 0x00;
+ fw_pkt->u.request[1] = 0x01;
- data_len += sizeof(fw_pkt.u.request);
- data_len += sizeof(fw_pkt.raw.address);
+ data_len += sizeof(fw_pkt->u.request);
+ data_len += sizeof(fw_pkt->raw.address);
/* send cmd to device */
errno = bus_adap->ops->upload_fw_pkt(bus_adap,
(uint8_t *)
- &fw_pkt,
+ fw_pkt,
data_len,
0);
if (errno < 0)
@@ -152,6 +157,7 @@ static int as102_firmware_upload(struct as10x_bus_adapter_t *bus_adap,
}
}
error:
+ kfree(fw_pkt);
return (errno == 0) ? total_read_bytes : errno;
}
diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c
index 2c5f76d588ac..04ae21278440 100644
--- a/drivers/media/usb/cx231xx/cx231xx-cards.c
+++ b/drivers/media/usb/cx231xx/cx231xx-cards.c
@@ -1672,7 +1672,7 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
nr = dev->devno;
assoc_desc = udev->actconfig->intf_assoc[0];
- if (assoc_desc->bFirstInterface != ifnum) {
+ if (!assoc_desc || assoc_desc->bFirstInterface != ifnum) {
dev_err(d, "Not found matching IAD interface\n");
retval = -ENODEV;
goto err_if;
diff --git a/drivers/media/usb/cx231xx/cx231xx-core.c b/drivers/media/usb/cx231xx/cx231xx-core.c
index 19b0293312a0..07670117f922 100644
--- a/drivers/media/usb/cx231xx/cx231xx-core.c
+++ b/drivers/media/usb/cx231xx/cx231xx-core.c
@@ -356,7 +356,12 @@ int cx231xx_send_vendor_cmd(struct cx231xx *dev,
*/
if ((ven_req->wLength > 4) && ((ven_req->bRequest == 0x4) ||
(ven_req->bRequest == 0x5) ||
- (ven_req->bRequest == 0x6))) {
+ (ven_req->bRequest == 0x6) ||
+
+ /* Internal Master 3 Bus can send
+ * and receive only 4 bytes per time
+ */
+ (ven_req->bRequest == 0x2))) {
unsend_size = 0;
pdata = ven_req->pBuff;
diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c
index 7ed49646a699..7df0707a0455 100644
--- a/drivers/media/usb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/usb/dvb-usb/dib0700_devices.c
@@ -292,7 +292,7 @@ static int stk7700P2_frontend_attach(struct dvb_usb_adapter *adap)
stk7700d_dib7000p_mt2266_config)
!= 0) {
err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__);
- dvb_detach(&state->dib7000p_ops);
+ dvb_detach(state->dib7000p_ops.set_wbd_ref);
return -ENODEV;
}
}
@@ -326,7 +326,7 @@ static int stk7700d_frontend_attach(struct dvb_usb_adapter *adap)
stk7700d_dib7000p_mt2266_config)
!= 0) {
err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__);
- dvb_detach(&state->dib7000p_ops);
+ dvb_detach(state->dib7000p_ops.set_wbd_ref);
return -ENODEV;
}
}
@@ -479,7 +479,7 @@ static int stk7700ph_frontend_attach(struct dvb_usb_adapter *adap)
&stk7700ph_dib7700_xc3028_config) != 0) {
err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n",
__func__);
- dvb_detach(&state->dib7000p_ops);
+ dvb_detach(state->dib7000p_ops.set_wbd_ref);
return -ENODEV;
}
@@ -1010,7 +1010,7 @@ static int stk7070p_frontend_attach(struct dvb_usb_adapter *adap)
&dib7070p_dib7000p_config) != 0) {
err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n",
__func__);
- dvb_detach(&state->dib7000p_ops);
+ dvb_detach(state->dib7000p_ops.set_wbd_ref);
return -ENODEV;
}
@@ -1068,7 +1068,7 @@ static int stk7770p_frontend_attach(struct dvb_usb_adapter *adap)
&dib7770p_dib7000p_config) != 0) {
err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n",
__func__);
- dvb_detach(&state->dib7000p_ops);
+ dvb_detach(state->dib7000p_ops.set_wbd_ref);
return -ENODEV;
}
@@ -3036,7 +3036,7 @@ static int nim7090_frontend_attach(struct dvb_usb_adapter *adap)
if (state->dib7000p_ops.i2c_enumeration(&adap->dev->i2c_adap, 1, 0x10, &nim7090_dib7000p_config) != 0) {
err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__);
- dvb_detach(&state->dib7000p_ops);
+ dvb_detach(state->dib7000p_ops.set_wbd_ref);
return -ENODEV;
}
adap->fe_adap[0].fe = state->dib7000p_ops.init(&adap->dev->i2c_adap, 0x80, &nim7090_dib7000p_config);
@@ -3089,7 +3089,7 @@ static int tfe7090pvr_frontend0_attach(struct dvb_usb_adapter *adap)
/* initialize IC 0 */
if (state->dib7000p_ops.i2c_enumeration(&adap->dev->i2c_adap, 1, 0x20, &tfe7090pvr_dib7000p_config[0]) != 0) {
err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__);
- dvb_detach(&state->dib7000p_ops);
+ dvb_detach(state->dib7000p_ops.set_wbd_ref);
return -ENODEV;
}
@@ -3119,7 +3119,7 @@ static int tfe7090pvr_frontend1_attach(struct dvb_usb_adapter *adap)
i2c = state->dib7000p_ops.get_i2c_master(adap->dev->adapter[0].fe_adap[0].fe, DIBX000_I2C_INTERFACE_GPIO_6_7, 1);
if (state->dib7000p_ops.i2c_enumeration(i2c, 1, 0x10, &tfe7090pvr_dib7000p_config[1]) != 0) {
err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__);
- dvb_detach(&state->dib7000p_ops);
+ dvb_detach(state->dib7000p_ops.set_wbd_ref);
return -ENODEV;
}
@@ -3194,7 +3194,7 @@ static int tfe7790p_frontend_attach(struct dvb_usb_adapter *adap)
1, 0x10, &tfe7790p_dib7000p_config) != 0) {
err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n",
__func__);
- dvb_detach(&state->dib7000p_ops);
+ dvb_detach(state->dib7000p_ops.set_wbd_ref);
return -ENODEV;
}
adap->fe_adap[0].fe = state->dib7000p_ops.init(&adap->dev->i2c_adap,
@@ -3289,7 +3289,7 @@ static int stk7070pd_frontend_attach0(struct dvb_usb_adapter *adap)
stk7070pd_dib7000p_config) != 0) {
err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n",
__func__);
- dvb_detach(&state->dib7000p_ops);
+ dvb_detach(state->dib7000p_ops.set_wbd_ref);
return -ENODEV;
}
@@ -3364,7 +3364,7 @@ static int novatd_frontend_attach(struct dvb_usb_adapter *adap)
stk7070pd_dib7000p_config) != 0) {
err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n",
__func__);
- dvb_detach(&state->dib7000p_ops);
+ dvb_detach(state->dib7000p_ops.set_wbd_ref);
return -ENODEV;
}
}
@@ -3600,7 +3600,7 @@ static int pctv340e_frontend_attach(struct dvb_usb_adapter *adap)
if (state->dib7000p_ops.dib7000pc_detection(&adap->dev->i2c_adap) == 0) {
/* Demodulator not found for some reason? */
- dvb_detach(&state->dib7000p_ops);
+ dvb_detach(state->dib7000p_ops.set_wbd_ref);
return -ENODEV;
}
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index bc45a225e710..3feaa9b154f0 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -1205,6 +1205,16 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
}
EXPORT_SYMBOL(v4l2_ctrl_fill);
+static u32 user_flags(const struct v4l2_ctrl *ctrl)
+{
+ u32 flags = ctrl->flags;
+
+ if (ctrl->is_ptr)
+ flags |= V4L2_CTRL_FLAG_HAS_PAYLOAD;
+
+ return flags;
+}
+
static void fill_event(struct v4l2_event *ev, struct v4l2_ctrl *ctrl, u32 changes)
{
memset(ev->reserved, 0, sizeof(ev->reserved));
@@ -1212,7 +1222,7 @@ static void fill_event(struct v4l2_event *ev, struct v4l2_ctrl *ctrl, u32 change
ev->id = ctrl->id;
ev->u.ctrl.changes = changes;
ev->u.ctrl.type = ctrl->type;
- ev->u.ctrl.flags = ctrl->flags;
+ ev->u.ctrl.flags = user_flags(ctrl);
if (ctrl->is_ptr)
ev->u.ctrl.value64 = 0;
else
@@ -2541,10 +2551,8 @@ int v4l2_query_ext_ctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_query_ext_ctr
else
qc->id = ctrl->id;
strlcpy(qc->name, ctrl->name, sizeof(qc->name));
- qc->flags = ctrl->flags;
+ qc->flags = user_flags(ctrl);
qc->type = ctrl->type;
- if (ctrl->is_ptr)
- qc->flags |= V4L2_CTRL_FLAG_HAS_PAYLOAD;
qc->elem_size = ctrl->elem_size;
qc->elems = ctrl->elems;
qc->nr_of_dims = ctrl->nr_of_dims;
diff --git a/drivers/mfd/ab8500-sysctrl.c b/drivers/mfd/ab8500-sysctrl.c
index 0d1825696153..405ce78c1ef4 100644
--- a/drivers/mfd/ab8500-sysctrl.c
+++ b/drivers/mfd/ab8500-sysctrl.c
@@ -99,7 +99,7 @@ int ab8500_sysctrl_read(u16 reg, u8 *value)
u8 bank;
if (sysctrl_dev == NULL)
- return -EINVAL;
+ return -EPROBE_DEFER;
bank = (reg >> 8);
if (!valid_bank(bank))
@@ -115,11 +115,13 @@ int ab8500_sysctrl_write(u16 reg, u8 mask, u8 value)
u8 bank;
if (sysctrl_dev == NULL)
- return -EINVAL;
+ return -EPROBE_DEFER;
bank = (reg >> 8);
- if (!valid_bank(bank))
+ if (!valid_bank(bank)) {
+ pr_err("invalid bank\n");
return -EINVAL;
+ }
return abx500_mask_and_set_register_interruptible(sysctrl_dev, bank,
(u8)(reg & 0xFF), mask, value);
@@ -180,9 +182,15 @@ static int ab8500_sysctrl_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id ab8500_sysctrl_match[] = {
+ { .compatible = "stericsson,ab8500-sysctrl", },
+ {}
+};
+
static struct platform_driver ab8500_sysctrl_driver = {
.driver = {
.name = "ab8500-sysctrl",
+ .of_match_table = ab8500_sysctrl_match,
},
.probe = ab8500_sysctrl_probe,
.remove = ab8500_sysctrl_remove,
diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
index 9842199e2e6c..89a2dd4d212a 100644
--- a/drivers/mfd/axp20x.c
+++ b/drivers/mfd/axp20x.c
@@ -164,14 +164,14 @@ static struct resource axp22x_pek_resources[] = {
static struct resource axp288_power_button_resources[] = {
{
.name = "PEK_DBR",
- .start = AXP288_IRQ_POKN,
- .end = AXP288_IRQ_POKN,
+ .start = AXP288_IRQ_POKP,
+ .end = AXP288_IRQ_POKP,
.flags = IORESOURCE_IRQ,
},
{
.name = "PEK_DBF",
- .start = AXP288_IRQ_POKP,
- .end = AXP288_IRQ_POKP,
+ .start = AXP288_IRQ_POKN,
+ .end = AXP288_IRQ_POKN,
.flags = IORESOURCE_IRQ,
},
};
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 5d7c0900fa1b..f112c5bc082a 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -257,6 +257,9 @@ static ssize_t at24_read(struct at24_data *at24,
if (unlikely(!count))
return count;
+ if (off + count > at24->chip.byte_len)
+ return -EINVAL;
+
/*
* Read data from chip, protecting against concurrent updates
* from this host, but not from other I2C masters.
@@ -311,6 +314,9 @@ static ssize_t at24_eeprom_write(struct at24_data *at24, const char *buf,
unsigned long timeout, write_time;
unsigned next_page;
+ if (offset + count > at24->chip.byte_len)
+ return -EINVAL;
+
/* Get corresponding I2C address and adjust offset */
client = at24_translate_offset(at24, &offset);
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index 2ff39fbc70d1..df268365e04e 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -1300,6 +1300,9 @@ int mei_cl_notify_request(struct mei_cl *cl, struct file *file, u8 request)
return -EOPNOTSUPP;
}
+ if (!mei_cl_is_connected(cl))
+ return -ENODEV;
+
rets = pm_runtime_get(dev->dev);
if (rets < 0 && rets != -EINPROGRESS) {
pm_runtime_put_noidle(dev->dev);
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index 0c7cbfb862b9..98657d0a6822 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -1729,7 +1729,7 @@ static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data,
if (ptr_svc->svc.listener_id != lstnr) {
pr_warn("Service requested does not exist\n");
__qseecom_qseos_fail_return_resp_tz(data, resp,
- &send_data_rsp, ptr_svc, lstnr);
+ &send_data_rsp, NULL, lstnr);
return -ERESTARTSYS;
}
pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
@@ -1940,7 +1940,7 @@ exit:
}
static int __qseecom_process_blocked_on_listener_smcinvoke(
- struct qseecom_command_scm_resp *resp)
+ struct qseecom_command_scm_resp *resp, uint32_t app_id)
{
struct qseecom_registered_listener_list *list_ptr;
int ret = 0;
@@ -1987,9 +1987,18 @@ static int __qseecom_process_blocked_on_listener_smcinvoke(
&ireq, sizeof(ireq),
&continue_resp, sizeof(continue_resp));
if (ret) {
- pr_err("scm_call for continue blocked req for session %d failed, ret %d\n",
- session_id, ret);
- goto exit;
+ /* retry with legacy cmd */
+ qseecom.smcinvoke_support = false;
+ ireq.app_or_session_id = app_id;
+ ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+ &ireq, sizeof(ireq),
+ &continue_resp, sizeof(continue_resp));
+ qseecom.smcinvoke_support = true;
+ if (ret) {
+ pr_err("cont block req for app %d or session %d fail\n",
+ app_id, session_id);
+ goto exit;
+ }
}
resp->result = QSEOS_RESULT_INCOMPLETE;
exit:
@@ -2006,7 +2015,7 @@ static int __qseecom_process_reentrancy_blocked_on_listener(
resp, ptr_app, data);
else
return __qseecom_process_blocked_on_listener_smcinvoke(
- resp);
+ resp, data->client.app_id);
}
static int __qseecom_reentrancy_process_incomplete_cmd(
struct qseecom_dev_handle *data,
@@ -4786,9 +4795,16 @@ int qseecom_process_listener_from_smcinvoke(struct scm_desc *desc)
resp.resp_type = desc->ret[1]; /*incomplete:unused;blocked:session_id*/
resp.data = desc->ret[2]; /*listener_id*/
+ dummy_private_data.client.app_id = desc->ret[1];
+ dummy_app_entry.app_id = desc->ret[1];
+
mutex_lock(&app_access_lock);
- ret = __qseecom_process_reentrancy(&resp, &dummy_app_entry,
+ if (qseecom.qsee_reentrancy_support)
+ ret = __qseecom_process_reentrancy(&resp, &dummy_app_entry,
&dummy_private_data);
+ else
+ ret = __qseecom_process_incomplete_cmd(&dummy_private_data,
+ &resp);
mutex_unlock(&app_access_lock);
if (ret)
pr_err("Failed on cmd %d for lsnr %d session %d, ret = %d\n",
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index ae54302be8fd..88699f852aa2 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -52,9 +52,28 @@ static void mmc_host_classdev_release(struct device *dev)
kfree(host);
}
+static int mmc_host_prepare(struct device *dev)
+{
+ /*
+ * Since mmc_host is a virtual device, we don't have to do anything.
+ * If we return a positive value, the pm framework will consider that
+ * the runtime suspend and system suspend of this device is same and
+ * will set direct_complete flag as true. We don't want this as the
+ * mmc_host always has positive disable_depth and setting the flag
+ * will not speed up the suspend process.
+ * So return 0.
+ */
+ return 0;
+}
+
+static const struct dev_pm_ops mmc_pm_ops = {
+ .prepare = mmc_host_prepare,
+};
+
static struct class mmc_host_class = {
.name = "mmc_host",
.dev_release = mmc_host_classdev_release,
+ .pm = &mmc_pm_ops,
};
int mmc_register_host_class(void)
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index 6291d5042ef2..6fed41bd016a 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -21,6 +21,7 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/gpio.h>
+#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/io.h>
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 54ab48827258..7ba109e8cf88 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -2663,15 +2663,18 @@ static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const uint8_t *buf)
{
struct nand_chip *chip = mtd->priv;
+ int chipnr = (int)(to >> chip->chip_shift);
struct mtd_oob_ops ops;
int ret;
- /* Wait for the device to get ready */
- panic_nand_wait(mtd, chip, 400);
-
/* Grab the device */
panic_nand_get_device(chip, mtd, FL_WRITING);
+ chip->select_chip(mtd, chipnr);
+
+ /* Wait for the device to get ready */
+ panic_nand_wait(mtd, chip, 400);
+
memset(&ops, 0, sizeof(ops));
ops.len = len;
ops.datbuf = (uint8_t *)buf;
diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c
index e90c6a7333d7..2e4649655181 100644
--- a/drivers/net/appletalk/ipddp.c
+++ b/drivers/net/appletalk/ipddp.c
@@ -191,7 +191,7 @@ static netdev_tx_t ipddp_xmit(struct sk_buff *skb, struct net_device *dev)
*/
static int ipddp_create(struct ipddp_route *new_rt)
{
- struct ipddp_route *rt = kmalloc(sizeof(*rt), GFP_KERNEL);
+ struct ipddp_route *rt = kzalloc(sizeof(*rt), GFP_KERNEL);
if (rt == NULL)
return -ENOMEM;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 5dca77e0ffed..2cb34b0f3856 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -3166,7 +3166,7 @@ u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb)
hash ^= (hash >> 16);
hash ^= (hash >> 8);
- return hash;
+ return hash >> 1;
}
/*-------------------------- Device entry points ----------------------------*/
diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c
index cf7c18947189..d065c0e2d18e 100644
--- a/drivers/net/can/c_can/c_can_pci.c
+++ b/drivers/net/can/c_can/c_can_pci.c
@@ -178,7 +178,6 @@ static int c_can_pci_probe(struct pci_dev *pdev,
break;
case BOSCH_D_CAN:
priv->regs = reg_map_d_can;
- priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
break;
default:
ret = -EINVAL;
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index e36d10520e24..717530eac70c 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -320,7 +320,6 @@ static int c_can_plat_probe(struct platform_device *pdev)
break;
case BOSCH_D_CAN:
priv->regs = reg_map_d_can;
- priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
priv->read_reg = c_can_plat_read_reg_aligned_to_16bit;
priv->write_reg = c_can_plat_write_reg_aligned_to_16bit;
priv->read_reg32 = d_can_plat_read_reg32;
diff --git a/drivers/net/can/spi/k61.c b/drivers/net/can/spi/k61.c
index 9ce0ad854caa..84c13a1c04a5 100644
--- a/drivers/net/can/spi/k61.c
+++ b/drivers/net/can/spi/k61.c
@@ -23,6 +23,7 @@
#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/uaccess.h>
+#include <linux/pm.h>
#define DEBUG_K61 0
#if DEBUG_K61 == 1
@@ -921,11 +922,39 @@ static const struct of_device_id k61_match_table[] = {
{ }
};
+#ifdef CONFIG_PM
+static int k61_suspend(struct device *dev)
+{
+ struct spi_device *spi = to_spi_device(dev);
+
+ enable_irq_wake(spi->irq);
+ return 0;
+}
+
+static int k61_resume(struct device *dev)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct k61_can *priv_data = spi_get_drvdata(spi);
+
+ disable_irq_wake(spi->irq);
+ k61_rx_message(priv_data);
+ return 0;
+}
+
+static const struct dev_pm_ops k61_dev_pm_ops = {
+ .suspend = k61_suspend,
+ .resume = k61_resume,
+};
+#endif
+
static struct spi_driver k61_driver = {
.driver = {
.name = "k61",
.of_match_table = k61_match_table,
.owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .pm = &k61_dev_pm_ops,
+#endif
},
.probe = k61_probe,
.remove = k61_remove,
diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c
index 68ef0a4cd821..1ac2090a1721 100644
--- a/drivers/net/can/sun4i_can.c
+++ b/drivers/net/can/sun4i_can.c
@@ -342,7 +342,7 @@ static int sun4i_can_start(struct net_device *dev)
/* enter the selected mode */
mod_reg_val = readl(priv->base + SUN4I_REG_MSEL_ADDR);
- if (priv->can.ctrlmode & CAN_CTRLMODE_PRESUME_ACK)
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
mod_reg_val |= SUN4I_MSEL_LOOPBACK_MODE;
else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
mod_reg_val |= SUN4I_MSEL_LISTEN_ONLY_MODE;
@@ -539,6 +539,13 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status)
}
stats->rx_over_errors++;
stats->rx_errors++;
+
+ /* reset the CAN IP by entering reset mode
+ * ignoring timeout error
+ */
+ set_reset_mode(dev);
+ set_normal_mode(dev);
+
/* clear bit */
sun4i_can_write_cmdreg(priv, SUN4I_CMD_CLEAR_OR_FLAG);
}
@@ -653,8 +660,9 @@ static irqreturn_t sun4i_can_interrupt(int irq, void *dev_id)
netif_wake_queue(dev);
can_led_event(dev, CAN_LED_EVENT_TX);
}
- if (isrc & SUN4I_INT_RBUF_VLD) {
- /* receive interrupt */
+ if ((isrc & SUN4I_INT_RBUF_VLD) &&
+ !(isrc & SUN4I_INT_DATA_OR)) {
+ /* receive interrupt - don't read if overrun occurred */
while (status & SUN4I_STA_RBUF_RDY) {
/* RX buffer is not empty */
sun4i_can_rx(dev);
@@ -811,7 +819,6 @@ static int sun4ican_probe(struct platform_device *pdev)
priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING |
CAN_CTRLMODE_LISTENONLY |
CAN_CTRLMODE_LOOPBACK |
- CAN_CTRLMODE_PRESUME_ACK |
CAN_CTRLMODE_3_SAMPLES;
priv->base = addr;
priv->clk = clk;
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index 022bfa13ebfa..c2e2821a3346 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -134,6 +134,7 @@ static inline bool kvaser_is_usbcan(const struct usb_device_id *id)
#define CMD_RESET_ERROR_COUNTER 49
#define CMD_TX_ACKNOWLEDGE 50
#define CMD_CAN_ERROR_EVENT 51
+#define CMD_FLUSH_QUEUE_REPLY 68
#define CMD_LEAF_USB_THROTTLE 77
#define CMD_LEAF_LOG_MESSAGE 106
@@ -1297,6 +1298,11 @@ static void kvaser_usb_handle_message(const struct kvaser_usb *dev,
goto warn;
break;
+ case CMD_FLUSH_QUEUE_REPLY:
+ if (dev->family != KVASER_LEAF)
+ goto warn;
+ break;
+
default:
warn: dev_warn(dev->udev->dev.parent,
"Unhandled message (%d)\n", msg->id);
@@ -1607,7 +1613,8 @@ static int kvaser_usb_close(struct net_device *netdev)
if (err)
netdev_warn(netdev, "Cannot flush queue, error %d\n", err);
- if (kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, priv->channel))
+ err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, priv->channel);
+ if (err)
netdev_warn(netdev, "Cannot reset card, error %d\n", err);
err = kvaser_usb_stop_chip(priv);
diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
index 8f8418d2ac4a..a0012c3cb4f6 100644
--- a/drivers/net/ethernet/3com/typhoon.c
+++ b/drivers/net/ethernet/3com/typhoon.c
@@ -2366,9 +2366,9 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
* 4) Get the hardware address.
* 5) Put the card to sleep.
*/
- if (typhoon_reset(ioaddr, WaitSleep) < 0) {
+ err = typhoon_reset(ioaddr, WaitSleep);
+ if (err < 0) {
err_msg = "could not reset 3XP";
- err = -EIO;
goto error_out_dma;
}
@@ -2382,24 +2382,25 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
typhoon_init_interface(tp);
typhoon_init_rings(tp);
- if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
+ err = typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST);
+ if (err < 0) {
err_msg = "cannot boot 3XP sleep image";
- err = -EIO;
goto error_out_reset;
}
INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_MAC_ADDRESS);
- if(typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp) < 0) {
+ err = typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp);
+ if (err < 0) {
err_msg = "cannot read MAC address";
- err = -EIO;
goto error_out_reset;
}
*(__be16 *)&dev->dev_addr[0] = htons(le16_to_cpu(xp_resp[0].parm1));
*(__be32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2));
- if(!is_valid_ether_addr(dev->dev_addr)) {
+ if (!is_valid_ether_addr(dev->dev_addr)) {
err_msg = "Could not obtain valid ethernet address, aborting";
+ err = -EIO;
goto error_out_reset;
}
@@ -2407,7 +2408,8 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
* later when we print out the version reported.
*/
INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
- if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
+ err = typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp);
+ if (err < 0) {
err_msg = "Could not get Sleep Image version";
goto error_out_reset;
}
@@ -2424,9 +2426,9 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if(xp_resp[0].numDesc != 0)
tp->capabilities |= TYPHOON_WAKEUP_NEEDS_RESET;
- if(typhoon_sleep(tp, PCI_D3hot, 0) < 0) {
+ err = typhoon_sleep(tp, PCI_D3hot, 0);
+ if (err < 0) {
err_msg = "cannot put adapter to sleep";
- err = -EIO;
goto error_out_reset;
}
@@ -2449,7 +2451,8 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->features = dev->hw_features |
NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_RXCSUM;
- if(register_netdev(dev) < 0) {
+ err = register_netdev(dev);
+ if (err < 0) {
err_msg = "unable to register netdev";
goto error_out_reset;
}
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 8860e74aa28f..027705117086 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -1045,15 +1045,6 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
goto out;
}
- /* Insert TSB and checksum infos */
- if (priv->tsb_en) {
- skb = bcm_sysport_insert_tsb(skb, dev);
- if (!skb) {
- ret = NETDEV_TX_OK;
- goto out;
- }
- }
-
/* The Ethernet switch we are interfaced with needs packets to be at
* least 64 bytes (including FCS) otherwise they will be discarded when
* they enter the switch port logic. When Broadcom tags are enabled, we
@@ -1061,13 +1052,21 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
* (including FCS and tag) because the length verification is done after
* the Broadcom tag is stripped off the ingress packet.
*/
- if (skb_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) {
+ if (skb_put_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) {
ret = NETDEV_TX_OK;
goto out;
}
- skb_len = skb->len < ETH_ZLEN + ENET_BRCM_TAG_LEN ?
- ETH_ZLEN + ENET_BRCM_TAG_LEN : skb->len;
+ /* Insert TSB and checksum infos */
+ if (priv->tsb_en) {
+ skb = bcm_sysport_insert_tsb(skb, dev);
+ if (!skb) {
+ ret = NETDEV_TX_OK;
+ goto out;
+ }
+ }
+
+ skb_len = skb->len;
mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
if (dma_mapping_error(kdev, mapping)) {
diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
index b1b9ebafb354..a3b2e23921bf 100644
--- a/drivers/net/ethernet/fealnx.c
+++ b/drivers/net/ethernet/fealnx.c
@@ -257,8 +257,8 @@ enum rx_desc_status_bits {
RXFSD = 0x00000800, /* first descriptor */
RXLSD = 0x00000400, /* last descriptor */
ErrorSummary = 0x80, /* error summary */
- RUNT = 0x40, /* runt packet received */
- LONG = 0x20, /* long packet received */
+ RUNTPKT = 0x40, /* runt packet received */
+ LONGPKT = 0x20, /* long packet received */
FAE = 0x10, /* frame align error */
CRC = 0x08, /* crc error */
RXER = 0x04, /* receive error */
@@ -1633,7 +1633,7 @@ static int netdev_rx(struct net_device *dev)
dev->name, rx_status);
dev->stats.rx_errors++; /* end of a packet. */
- if (rx_status & (LONG | RUNT))
+ if (rx_status & (LONGPKT | RUNTPKT))
dev->stats.rx_length_errors++;
if (rx_status & RXER)
dev->stats.rx_frame_errors++;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index ab716042bdd2..458e2d97d096 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -2968,6 +2968,7 @@ static void set_multicast_list(struct net_device *ndev)
struct netdev_hw_addr *ha;
unsigned int i, bit, data, crc, tmp;
unsigned char hash;
+ unsigned int hash_high = 0, hash_low = 0;
if (ndev->flags & IFF_PROMISC) {
tmp = readl(fep->hwp + FEC_R_CNTRL);
@@ -2990,11 +2991,7 @@ static void set_multicast_list(struct net_device *ndev)
return;
}
- /* Clear filter and add the addresses in hash register
- */
- writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
- writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
-
+ /* Add the addresses in hash register */
netdev_for_each_mc_addr(ha, ndev) {
/* calculate crc32 value of mac address */
crc = 0xffffffff;
@@ -3012,16 +3009,14 @@ static void set_multicast_list(struct net_device *ndev)
*/
hash = (crc >> (32 - HASH_BITS)) & 0x3f;
- if (hash > 31) {
- tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
- tmp |= 1 << (hash - 32);
- writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
- } else {
- tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_LOW);
- tmp |= 1 << hash;
- writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
- }
+ if (hash > 31)
+ hash_high |= 1 << (hash - 32);
+ else
+ hash_low |= 1 << hash;
}
+
+ writel(hash_high, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
+ writel(hash_low, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
}
/* Set a MAC change in hardware. */
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index e59d7c283cd4..645ace74429e 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -410,6 +410,9 @@ void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw)
* Checks to see of the link status of the hardware has changed. If a
* change in link status has been detected, then we read the PHY registers
* to get the current speed/duplex if link exists.
+ *
+ * Returns a negative error code (-E1000_ERR_*) or 0 (link down) or 1 (link
+ * up).
**/
s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
{
@@ -423,7 +426,7 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
* Change or Rx Sequence Error interrupt.
*/
if (!mac->get_link_status)
- return 0;
+ return 1;
/* First we want to see if the MII Status Register reports
* link. If so, then we want to get the current speed/duplex
@@ -461,10 +464,12 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
* different link partner.
*/
ret_val = e1000e_config_fc_after_link_up(hw);
- if (ret_val)
+ if (ret_val) {
e_dbg("Error configuring flow control\n");
+ return ret_val;
+ }
- return ret_val;
+ return 1;
}
/**
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 80ec587d510e..5205f1ebe381 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -5017,7 +5017,7 @@ static bool e1000e_has_link(struct e1000_adapter *adapter)
case e1000_media_type_copper:
if (hw->mac.get_link_status) {
ret_val = hw->mac.ops.check_for_link(hw);
- link_active = !hw->mac.get_link_status;
+ link_active = ret_val > 0;
} else {
link_active = true;
}
@@ -5035,7 +5035,7 @@ static bool e1000e_has_link(struct e1000_adapter *adapter)
break;
}
- if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) &&
+ if ((ret_val == -E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) &&
(er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
/* See e1000_kmrn_lock_loss_workaround_ich8lan() */
e_info("Gigabit has been disabled, downgrading speed\n");
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index de13aeacae97..8e674a0988b0 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -1744,6 +1744,7 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
s32 ret_val = 0;
u16 i, phy_status;
+ *success = false;
for (i = 0; i < iterations; i++) {
/* Some PHYs require the MII_BMSR register to be read
* twice due to the link bit being sticky. No harm doing
@@ -1763,16 +1764,16 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
ret_val = e1e_rphy(hw, MII_BMSR, &phy_status);
if (ret_val)
break;
- if (phy_status & BMSR_LSTATUS)
+ if (phy_status & BMSR_LSTATUS) {
+ *success = true;
break;
+ }
if (usec_interval >= 1000)
msleep(usec_interval / 1000);
else
udelay(usec_interval);
}
- *success = (i < iterations);
-
return ret_val;
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 09281558bfbc..c21fa56afd7c 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -1226,7 +1226,7 @@ static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector,
break;
/* prevent any other reads prior to eop_desc */
- read_barrier_depends();
+ smp_rmb();
/* if DD is not set pending work has not been completed */
if (!(eop_desc->flags & FM10K_TXD_FLAG_DONE))
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
index af09a1b272e6..6a2d1454befe 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
@@ -2002,9 +2002,10 @@ static void fm10k_sm_mbx_create_reply(struct fm10k_hw *hw,
* function can also be used to respond to an error as the connection
* resetting would also be a means of dealing with errors.
**/
-static void fm10k_sm_mbx_process_reset(struct fm10k_hw *hw,
- struct fm10k_mbx_info *mbx)
+static s32 fm10k_sm_mbx_process_reset(struct fm10k_hw *hw,
+ struct fm10k_mbx_info *mbx)
{
+ s32 err = 0;
const enum fm10k_mbx_state state = mbx->state;
switch (state) {
@@ -2017,6 +2018,7 @@ static void fm10k_sm_mbx_process_reset(struct fm10k_hw *hw,
case FM10K_STATE_OPEN:
/* flush any incomplete work */
fm10k_sm_mbx_connect_reset(mbx);
+ err = FM10K_ERR_RESET_REQUESTED;
break;
case FM10K_STATE_CONNECT:
/* Update remote value to match local value */
@@ -2026,6 +2028,8 @@ static void fm10k_sm_mbx_process_reset(struct fm10k_hw *hw,
}
fm10k_sm_mbx_create_reply(hw, mbx, mbx->tail);
+
+ return err;
}
/**
@@ -2106,7 +2110,7 @@ static s32 fm10k_sm_mbx_process(struct fm10k_hw *hw,
switch (FM10K_MSG_HDR_FIELD_GET(mbx->mbx_hdr, SM_VER)) {
case 0:
- fm10k_sm_mbx_process_reset(hw, mbx);
+ err = fm10k_sm_mbx_process_reset(hw, mbx);
break;
case FM10K_SM_MBX_VERSION:
err = fm10k_sm_mbx_process_version_1(hw, mbx);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index 7f3fb51bc37b..06f35700840b 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -1072,6 +1072,7 @@ static irqreturn_t fm10k_msix_mbx_pf(int __always_unused irq, void *data)
struct fm10k_hw *hw = &interface->hw;
struct fm10k_mbx_info *mbx = &hw->mbx;
u32 eicr;
+ s32 err = 0;
/* unmask any set bits related to this interrupt */
eicr = fm10k_read_reg(hw, FM10K_EICR);
@@ -1087,12 +1088,15 @@ static irqreturn_t fm10k_msix_mbx_pf(int __always_unused irq, void *data)
/* service mailboxes */
if (fm10k_mbx_trylock(interface)) {
- mbx->ops.process(hw, mbx);
+ err = mbx->ops.process(hw, mbx);
/* handle VFLRE events */
fm10k_iov_event(interface);
fm10k_mbx_unlock(interface);
}
+ if (err == FM10K_ERR_RESET_REQUESTED)
+ interface->flags |= FM10K_FLAG_RESET_REQUESTED;
+
/* if switch toggled state we should reset GLORTs */
if (eicr & FM10K_EICR_SWITCHNOTREADY) {
/* force link down for at least 4 seconds */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 4edbab6ca7ef..b5b228c9a030 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -3595,7 +3595,7 @@ static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
break;
/* prevent any other reads prior to eop_desc */
- read_barrier_depends();
+ smp_rmb();
/* if the descriptor isn't done, no work yet to do */
if (!(eop_desc->cmd_type_offset_bsz &
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 26c55bba4bf3..6dcc3854844d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -663,7 +663,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
break;
/* prevent any other reads prior to eop_desc */
- read_barrier_depends();
+ smp_rmb();
/* we have caught up to head, no work left to do */
if (tx_head == tx_desc)
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 39db70a597ed..1ed27fcd5031 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -172,7 +172,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
break;
/* prevent any other reads prior to eop_desc */
- read_barrier_depends();
+ smp_rmb();
/* we have caught up to head, no work left to do */
if (tx_head == tx_desc)
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 97bf0c3d5c69..f3f3b95d5512 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -223,6 +223,17 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >>
E1000_STATUS_FUNC_SHIFT;
+ /* Make sure the PHY is in a good state. Several people have reported
+ * firmware leaving the PHY's page select register set to something
+ * other than the default of zero, which causes the PHY ID read to
+ * access something other than the intended register.
+ */
+ ret_val = hw->phy.ops.reset(hw);
+ if (ret_val) {
+ hw_dbg("Error resetting the PHY.\n");
+ goto out;
+ }
+
/* Set phy->phy_addr and phy->id. */
ret_val = igb_get_phy_id_82575(hw);
if (ret_val)
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index 29f59c76878a..851225b5dc0f 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -699,9 +699,9 @@ static s32 igb_update_flash_i210(struct e1000_hw *hw)
ret_val = igb_pool_flash_update_done_i210(hw);
if (ret_val)
- hw_dbg("Flash update complete\n");
- else
hw_dbg("Flash update time out\n");
+ else
+ hw_dbg("Flash update complete\n");
out:
return ret_val;
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index a481ea64e287..c55552c3d2f9 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -3172,7 +3172,9 @@ static int __igb_close(struct net_device *netdev, bool suspending)
static int igb_close(struct net_device *netdev)
{
- return __igb_close(netdev, false);
+ if (netif_device_present(netdev))
+ return __igb_close(netdev, false);
+ return 0;
}
/**
@@ -6431,7 +6433,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
break;
/* prevent any other reads prior to eop_desc */
- read_barrier_depends();
+ smp_rmb();
/* if DD is not set pending work has not been completed */
if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
@@ -7325,12 +7327,14 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
int retval = 0;
#endif
+ rtnl_lock();
netif_device_detach(netdev);
if (netif_running(netdev))
__igb_close(netdev, true);
igb_clear_interrupt_scheme(adapter);
+ rtnl_unlock();
#ifdef CONFIG_PM
retval = pci_save_state(pdev);
@@ -7450,16 +7454,15 @@ static int igb_resume(struct device *dev)
wr32(E1000_WUS, ~0);
- if (netdev->flags & IFF_UP) {
- rtnl_lock();
+ rtnl_lock();
+ if (!err && netif_running(netdev))
err = __igb_open(netdev, true);
- rtnl_unlock();
- if (err)
- return err;
- }
- netif_device_attach(netdev);
- return 0;
+ if (!err)
+ netif_device_attach(netdev);
+ rtnl_unlock();
+
+ return err;
}
static int igb_runtime_idle(struct device *dev)
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 297af801f051..519b72c41888 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -809,7 +809,7 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
break;
/* prevent any other reads prior to eop_desc */
- read_barrier_depends();
+ smp_rmb();
/* if DD is not set pending work has not been completed */
if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index f3168bcc7d87..f0de09db8283 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -307,6 +307,7 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
ixgbe_cache_ring_rss(adapter);
}
+#define IXGBE_RSS_64Q_MASK 0x3F
#define IXGBE_RSS_16Q_MASK 0xF
#define IXGBE_RSS_8Q_MASK 0x7
#define IXGBE_RSS_4Q_MASK 0x3
@@ -602,6 +603,7 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
**/
static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
{
+ struct ixgbe_hw *hw = &adapter->hw;
struct ixgbe_ring_feature *f;
u16 rss_i;
@@ -610,7 +612,11 @@ static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
rss_i = f->limit;
f->indices = rss_i;
- f->mask = IXGBE_RSS_16Q_MASK;
+
+ if (hw->mac.type < ixgbe_mac_X550)
+ f->mask = IXGBE_RSS_16Q_MASK;
+ else
+ f->mask = IXGBE_RSS_64Q_MASK;
/* disable ATR by default, it will be configured below */
adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index cd9b284bc83b..a5b443171b8b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1114,7 +1114,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
break;
/* prevent any other reads prior to eop_desc */
- read_barrier_depends();
+ smp_rmb();
/* if DD is not set pending work has not been completed */
if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
@@ -5878,7 +5878,8 @@ static int ixgbe_close(struct net_device *netdev)
ixgbe_ptp_stop(adapter);
- ixgbe_close_suspend(adapter);
+ if (netif_device_present(netdev))
+ ixgbe_close_suspend(adapter);
ixgbe_fdir_filter_exit(adapter);
@@ -5923,14 +5924,12 @@ static int ixgbe_resume(struct pci_dev *pdev)
if (!err && netif_running(netdev))
err = ixgbe_open(netdev);
- rtnl_unlock();
-
- if (err)
- return err;
- netif_device_attach(netdev);
+ if (!err)
+ netif_device_attach(netdev);
+ rtnl_unlock();
- return 0;
+ return err;
}
#endif /* CONFIG_PM */
@@ -5945,14 +5944,14 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
int retval = 0;
#endif
+ rtnl_lock();
netif_device_detach(netdev);
- rtnl_lock();
if (netif_running(netdev))
ixgbe_close_suspend(adapter);
- rtnl_unlock();
ixgbe_clear_interrupt_scheme(adapter);
+ rtnl_unlock();
#ifdef CONFIG_PM
retval = pci_save_state(pdev);
@@ -9221,7 +9220,7 @@ skip_bad_vf_detection:
}
if (netif_running(netdev))
- ixgbe_down(adapter);
+ ixgbe_close_suspend(adapter);
if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
pci_disable_device(pdev);
@@ -9291,10 +9290,12 @@ static void ixgbe_io_resume(struct pci_dev *pdev)
}
#endif
+ rtnl_lock();
if (netif_running(netdev))
- ixgbe_up(adapter);
+ ixgbe_open(netdev);
netif_device_attach(netdev);
+ rtnl_unlock();
}
static const struct pci_error_handlers ixgbe_err_handler = {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index fb8673d63806..48d97cb730d8 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -113,7 +113,7 @@ static s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
u16 reg, u16 *val, bool lock)
{
u32 swfw_mask = hw->phy.phy_semaphore_mask;
- int max_retry = 10;
+ int max_retry = 3;
int retry = 0;
u8 csum_byte;
u8 high_bits;
@@ -1764,6 +1764,8 @@ static s32 ixgbe_read_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset,
u32 swfw_mask = hw->phy.phy_semaphore_mask;
bool nack = true;
+ if (hw->mac.type >= ixgbe_mac_X550)
+ max_retry = 3;
if (ixgbe_is_sfp_probe(hw, byte_offset, dev_addr))
max_retry = IXGBE_SFP_DETECT_RETRIES;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index ebe0ac950b14..31f864fb30c1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -1643,8 +1643,6 @@ static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
return status;
reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
- reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ |
- IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC);
reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR |
IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX);
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 592ff237d692..50bbad37d640 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -312,7 +312,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
break;
/* prevent any other reads prior to eop_desc */
- read_barrier_depends();
+ smp_rmb();
/* if DD is not set pending work has not been completed */
if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 585e90f8341d..f735dfcb64ae 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -831,14 +831,10 @@ static int ravb_poll(struct napi_struct *napi, int budget)
/* Receive error message handling */
priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors;
priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors;
- if (priv->rx_over_errors != ndev->stats.rx_over_errors) {
+ if (priv->rx_over_errors != ndev->stats.rx_over_errors)
ndev->stats.rx_over_errors = priv->rx_over_errors;
- netif_err(priv, rx_err, ndev, "Receive Descriptor Empty\n");
- }
- if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors) {
+ if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors)
ndev->stats.rx_fifo_errors = priv->rx_fifo_errors;
- netif_err(priv, rx_err, ndev, "Receive FIFO Overflow\n");
- }
out:
return budget - quota;
}
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 79de9608ac48..ed96fdefd8e5 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -1117,6 +1117,8 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
case TUNSETSNDBUF:
if (get_user(s, sp))
return -EFAULT;
+ if (s <= 0)
+ return -EINVAL;
q->sk.sk_sndbuf = s;
return 0;
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index e5bb870b5461..dc454138d600 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -1110,7 +1110,17 @@ ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64)
static struct lock_class_key ppp_tx_busylock;
static int ppp_dev_init(struct net_device *dev)
{
+ struct ppp *ppp;
+
dev->qdisc_tx_busylock = &ppp_tx_busylock;
+
+ ppp = netdev_priv(dev);
+ /* Let the netdevice take a reference on the ppp file. This ensures
+ * that ppp_destroy_interface() won't run before the device gets
+ * unregistered.
+ */
+ atomic_inc(&ppp->file.refcnt);
+
return 0;
}
@@ -1133,6 +1143,15 @@ static void ppp_dev_uninit(struct net_device *dev)
wake_up_interruptible(&ppp->file.rwait);
}
+static void ppp_dev_priv_destructor(struct net_device *dev)
+{
+ struct ppp *ppp;
+
+ ppp = netdev_priv(dev);
+ if (atomic_dec_and_test(&ppp->file.refcnt))
+ ppp_destroy_interface(ppp);
+}
+
static const struct net_device_ops ppp_netdev_ops = {
.ndo_init = ppp_dev_init,
.ndo_uninit = ppp_dev_uninit,
@@ -1150,6 +1169,7 @@ static void ppp_setup(struct net_device *dev)
dev->tx_queue_len = 3;
dev->type = ARPHRD_PPP;
dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
+ dev->destructor = ppp_dev_priv_destructor;
netif_keep_dst(dev);
}
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 89ad2b750531..1b0184b3818a 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1685,6 +1685,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
if (!dev)
return -ENOMEM;
+ err = dev_get_valid_name(net, dev, name);
+ if (err < 0)
+ goto err_free_dev;
dev_net_set(dev, net);
dev->rtnl_link_ops = &tun_link_ops;
@@ -2072,6 +2075,10 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
ret = -EFAULT;
break;
}
+ if (sndbuf <= 0) {
+ ret = -EINVAL;
+ break;
+ }
tun->sndbuf = sndbuf;
tun_set_sndbuf(tun);
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 8c408aa2f208..f9343bee1de3 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -221,7 +221,7 @@ skip:
goto bad_desc;
}
- if (header.usb_cdc_ether_desc) {
+ if (header.usb_cdc_ether_desc && info->ether->wMaxSegmentSize) {
dev->hard_mtu = le16_to_cpu(info->ether->wMaxSegmentSize);
/* because of Zaurus, we may be ignoring the host
* side link address we were given.
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index e0e94b855bbe..1228d0da4075 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -724,8 +724,10 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
u8 *buf;
int len;
int temp;
+ int err;
u8 iface_no;
struct usb_cdc_parsed_header hdr;
+ u16 curr_ntb_format;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
@@ -823,6 +825,32 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
goto error2;
}
+ /*
+ * Some Huawei devices have been observed to come out of reset in NDP32 mode.
+ * Let's check if this is the case, and set the device to NDP16 mode again if
+ * needed.
+ */
+ if (ctx->drvflags & CDC_NCM_FLAG_RESET_NTB16) {
+ err = usbnet_read_cmd(dev, USB_CDC_GET_NTB_FORMAT,
+ USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE,
+ 0, iface_no, &curr_ntb_format, 2);
+ if (err < 0) {
+ goto error2;
+ }
+
+ if (curr_ntb_format == USB_CDC_NCM_NTB32_FORMAT) {
+ dev_info(&intf->dev, "resetting NTB format to 16-bit");
+ err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_FORMAT,
+ USB_TYPE_CLASS | USB_DIR_OUT
+ | USB_RECIP_INTERFACE,
+ USB_CDC_NCM_NTB16_FORMAT,
+ iface_no, NULL, 0);
+
+ if (err < 0)
+ goto error2;
+ }
+ }
+
cdc_ncm_find_endpoints(dev, ctx->data);
cdc_ncm_find_endpoints(dev, ctx->control);
if (!dev->in || !dev->out || !dev->status) {
diff --git a/drivers/net/usb/huawei_cdc_ncm.c b/drivers/net/usb/huawei_cdc_ncm.c
index 2680a65cd5e4..63f28908afda 100644
--- a/drivers/net/usb/huawei_cdc_ncm.c
+++ b/drivers/net/usb/huawei_cdc_ncm.c
@@ -80,6 +80,12 @@ static int huawei_cdc_ncm_bind(struct usbnet *usbnet_dev,
* be at the end of the frame.
*/
drvflags |= CDC_NCM_FLAG_NDP_TO_END;
+
+ /* Additionally, it has been reported that some Huawei E3372H devices, with
+ * firmware version 21.318.01.00.541, come out of reset in NTB32 format mode, hence
+ * needing to be set to the NTB16 one again.
+ */
+ drvflags |= CDC_NCM_FLAG_RESET_NTB16;
ret = cdc_ncm_bind_common(usbnet_dev, intf, 1, drvflags);
if (ret)
goto err;
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 958af3b1af7f..e325ca3ad565 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -262,7 +262,7 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
}
/* errors aren't fatal - we can live with the dynamic address */
- if (cdc_ether) {
+ if (cdc_ether && cdc_ether->wMaxSegmentSize) {
dev->hard_mtu = le16_to_cpu(cdc_ether->wMaxSegmentSize);
usbnet_get_ethernet_addr(dev, cdc_ether->iMACAddress);
}
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 55033aed6d6b..079d77678b1c 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -706,8 +706,11 @@ static int ath10k_core_get_board_id_from_otp(struct ath10k *ar)
"boot get otp board id result 0x%08x board_id %d chip_id %d\n",
result, board_id, chip_id);
- if ((result & ATH10K_BMI_BOARD_ID_STATUS_MASK) != 0)
+ if ((result & ATH10K_BMI_BOARD_ID_STATUS_MASK) != 0 ||
+ (board_id == 0)) {
+ ath10k_warn(ar, "board id is not exist in otp, ignore it\n");
return -EOPNOTSUPP;
+ }
ar->id.bmi_ids_valid = true;
ar->id.bmi_board_id = board_id;
@@ -2088,7 +2091,7 @@ void ath10k_core_stop(struct ath10k *ar)
/* try to suspend target */
if (ar->state != ATH10K_STATE_RESTARTING &&
ar->state != ATH10K_STATE_UTF)
- ath10k_wait_for_suspend(ar, WMI_PDEV_SUSPEND_AND_DISABLE_INTR);
+ ath10k_wait_for_suspend(ar, ar->hw_values->pdev_suspend_option);
ath10k_hif_stop(ar);
ath10k_htt_tx_free(&ar->htt);
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index f194d434b97c..2ef2e1ec040a 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -433,6 +433,7 @@ struct ath10k_vif {
struct cfg80211_bitrate_mask bitrate_mask;
struct wmi_ns_arp_offload_req arp_offload;
struct wmi_ns_arp_offload_req ns_offload;
+ struct wmi_gtk_rekey_data gtk_rekey_data;
};
struct ath10k_vif_iter {
diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c
index caf63b8bbba4..1437b5d29a17 100644
--- a/drivers/net/wireless/ath/ath10k/hw.c
+++ b/drivers/net/wireless/ath/ath10k/hw.c
@@ -460,6 +460,7 @@ struct ath10k_hw_ce_regs qcax_ce_regs = {
};
const struct ath10k_hw_values qca988x_values = {
+ .pdev_suspend_option = WMI_PDEV_SUSPEND_AND_DISABLE_INTR,
.rtc_state_val_on = 3,
.ce_count = 8,
.msi_assign_ce_max = 7,
@@ -469,6 +470,7 @@ const struct ath10k_hw_values qca988x_values = {
};
const struct ath10k_hw_values qca6174_values = {
+ .pdev_suspend_option = WMI_PDEV_SUSPEND_AND_DISABLE_INTR,
.rtc_state_val_on = 3,
.ce_count = 8,
.msi_assign_ce_max = 7,
@@ -478,6 +480,7 @@ const struct ath10k_hw_values qca6174_values = {
};
const struct ath10k_hw_values qca99x0_values = {
+ .pdev_suspend_option = WMI_PDEV_SUSPEND_AND_DISABLE_INTR,
.rtc_state_val_on = 5,
.ce_count = 12,
.msi_assign_ce_max = 12,
@@ -487,6 +490,7 @@ const struct ath10k_hw_values qca99x0_values = {
};
const struct ath10k_hw_values qca9888_values = {
+ .pdev_suspend_option = WMI_PDEV_SUSPEND_AND_DISABLE_INTR,
.rtc_state_val_on = 3,
.ce_count = 12,
.msi_assign_ce_max = 12,
@@ -496,13 +500,15 @@ const struct ath10k_hw_values qca9888_values = {
};
const struct ath10k_hw_values qca4019_values = {
- .ce_count = 12,
- .num_target_ce_config_wlan = 10,
- .ce_desc_meta_data_mask = 0xFFF0,
- .ce_desc_meta_data_lsb = 4,
+ .pdev_suspend_option = WMI_PDEV_SUSPEND_AND_DISABLE_INTR,
+ .ce_count = 12,
+ .num_target_ce_config_wlan = 10,
+ .ce_desc_meta_data_mask = 0xFFF0,
+ .ce_desc_meta_data_lsb = 4,
};
const struct ath10k_hw_values wcn3990_values = {
+ .pdev_suspend_option = WMI_PDEV_SUSPEND,
.rtc_state_val_on = 5,
.ce_count = 12,
.msi_assign_ce_max = 12,
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 8aa696ed2e72..a37b956c558f 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -363,6 +363,7 @@ extern struct ath10k_hw_ce_regs qcax_ce_regs;
extern struct fw_flag wcn3990_fw_flags;
struct ath10k_hw_values {
+ u32 pdev_suspend_option;
u32 rtc_state_val_on;
u8 ce_count;
u8 msi_assign_ce_max;
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 9fcc2866d830..28042100ae0a 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -1237,6 +1237,36 @@ static int ath10k_monitor_recalc(struct ath10k *ar)
return ath10k_monitor_stop(ar);
}
+static bool ath10k_mac_can_set_cts_prot(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (!arvif->is_started) {
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "defer cts setup, vdev is not ready yet\n");
+ return false;
+ }
+
+ return true;
+}
+
+static int ath10k_mac_set_cts_prot(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ u32 vdev_param;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ vdev_param = ar->wmi.vdev_param->protection_mode;
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d cts_protection %d\n",
+ arvif->vdev_id, arvif->use_cts_prot);
+
+ return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+ arvif->use_cts_prot ? 1 : 0);
+}
+
static int ath10k_recalc_rtscts_prot(struct ath10k_vif *arvif)
{
struct ath10k *ar = arvif->ar;
@@ -5386,20 +5416,18 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_ERP_CTS_PROT) {
arvif->use_cts_prot = info->use_cts_prot;
- ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d cts_prot %d\n",
- arvif->vdev_id, info->use_cts_prot);
ret = ath10k_recalc_rtscts_prot(arvif);
if (ret)
ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
arvif->vdev_id, ret);
- vdev_param = ar->wmi.vdev_param->protection_mode;
- ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
- info->use_cts_prot ? 1 : 0);
- if (ret)
- ath10k_warn(ar, "failed to set protection mode %d on vdev %i: %d\n",
- info->use_cts_prot, arvif->vdev_id, ret);
+ if (ath10k_mac_can_set_cts_prot(arvif)) {
+ ret = ath10k_mac_set_cts_prot(arvif);
+ if (ret)
+ ath10k_warn(ar, "failed to set cts protection for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ }
}
if (changed & BSS_CHANGED_ERP_SLOT) {
@@ -7463,6 +7491,13 @@ ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
arvif->is_up = true;
}
+ if (ath10k_mac_can_set_cts_prot(arvif)) {
+ ret = ath10k_mac_set_cts_prot(arvif);
+ if (ret)
+ ath10k_warn(ar, "failed to set cts protection for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ }
+
mutex_unlock(&ar->conf_mutex);
return 0;
@@ -7583,6 +7618,7 @@ static const struct ieee80211_ops ath10k_ops = {
.suspend = ath10k_wow_op_suspend,
.resume = ath10k_wow_op_resume,
.set_wakeup = ath10k_wow_op_set_wakeup,
+ .set_rekey_data = ath10k_wow_op_set_rekey_data,
#endif
#ifdef CONFIG_MAC80211_DEBUGFS
.sta_add_debugfs = ath10k_sta_add_debugfs,
diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h
index dcb0da51530a..cf738efd45c5 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-ops.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h
@@ -162,6 +162,8 @@ struct wmi_ops {
const struct wmi_sta_keepalive_arg *arg);
struct sk_buff *(*gen_set_arp_ns_offload)(struct ath10k *ar,
struct ath10k_vif *arvif);
+ struct sk_buff *(*gen_gtk_offload)(struct ath10k *ar,
+ struct ath10k_vif *arvif);
struct sk_buff *(*gen_wow_enable)(struct ath10k *ar);
struct sk_buff *(*gen_wow_add_wakeup_event)(struct ath10k *ar, u32 vdev_id,
enum wmi_wow_wakeup_event event,
@@ -1206,6 +1208,23 @@ ath10k_wmi_set_arp_ns_offload(struct ath10k *ar, struct ath10k_vif *arvif)
}
static inline int
+ath10k_wmi_gtk_offload(struct ath10k *ar, struct ath10k_vif *arvif)
+{
+ struct sk_buff *skb;
+ u32 cmd_id;
+
+ if (!ar->wmi.ops->gen_gtk_offload)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_gtk_offload(ar, arvif);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ cmd_id = ar->wmi.cmd->gtk_offload_cmdid;
+ return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
ath10k_wmi_wow_enable(struct ath10k *ar)
{
struct sk_buff *skb;
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index ad27abc61deb..ba411cba6fc9 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -1156,8 +1156,10 @@ static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar,
struct ath10k_fw_stats_pdev *dst;
src = data;
- if (data_len < sizeof(*src))
+ if (data_len < sizeof(*src)) {
+ kfree(tb);
return -EPROTO;
+ }
data += sizeof(*src);
data_len -= sizeof(*src);
@@ -1177,8 +1179,10 @@ static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar,
struct ath10k_fw_stats_vdev *dst;
src = data;
- if (data_len < sizeof(*src))
+ if (data_len < sizeof(*src)) {
+ kfree(tb);
return -EPROTO;
+ }
data += sizeof(*src);
data_len -= sizeof(*src);
@@ -1196,8 +1200,10 @@ static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar,
struct ath10k_fw_stats_peer *dst;
src = data;
- if (data_len < sizeof(*src))
+ if (data_len < sizeof(*src)) {
+ kfree(tb);
return -EPROTO;
+ }
data += sizeof(*src);
data_len -= sizeof(*src);
@@ -3015,6 +3021,40 @@ ath10k_wmi_tlv_op_gen_tdls_peer_update(struct ath10k *ar,
}
static struct sk_buff *
+ath10k_wmi_op_gen_gtk_offload(struct ath10k *ar, struct ath10k_vif *arvif)
+{
+ struct wmi_tlv_gtk_offload_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ struct wmi_gtk_rekey_data *rekey_data = &arvif->gtk_rekey_data;
+ int len;
+
+ len = sizeof(*cmd) + sizeof(*tlv);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_GTK_OFFLOAD_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ if (rekey_data->enable_offload) {
+ cmd->vdev_id = __cpu_to_le32(arvif->vdev_id);
+ cmd->flags |= __cpu_to_le32(WMI_GTK_OFFLOAD_ENABLE_OPCODE);
+ memcpy(cmd->kek, rekey_data->kek, NL80211_KEK_LEN);
+ memcpy(cmd->kck, rekey_data->kck, NL80211_KCK_LEN);
+ cmd->replay_ctr = __cpu_to_le64(rekey_data->replay_ctr);
+ } else {
+ cmd->vdev_id = __cpu_to_le32(arvif->vdev_id);
+ cmd->flags |= __cpu_to_le32(WMI_GTK_OFFLOAD_DISABLE_OPCODE);
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi GTK offload for vdev: %d\n", arvif->vdev_id);
+ return skb;
+}
+
+static struct sk_buff *
ath10k_wmi_tlv_op_gen_set_arp_ns_offload(struct ath10k *ar,
struct ath10k_vif *arvif)
{
@@ -3778,6 +3818,7 @@ static const struct wmi_ops wmi_tlv_ops = {
.gen_vdev_sta_uapsd = ath10k_wmi_tlv_op_gen_vdev_sta_uapsd,
.gen_sta_keepalive = ath10k_wmi_tlv_op_gen_sta_keepalive,
.gen_set_arp_ns_offload = ath10k_wmi_tlv_op_gen_set_arp_ns_offload,
+ .gen_gtk_offload = ath10k_wmi_op_gen_gtk_offload,
.gen_wow_enable = ath10k_wmi_tlv_op_gen_wow_enable,
.gen_wow_add_wakeup_event = ath10k_wmi_tlv_op_gen_wow_add_wakeup_event,
.gen_wow_host_wakeup_ind = ath10k_wmi_tlv_gen_wow_host_wakeup_ind,
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
index 39951d273244..18327daade8d 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
@@ -1567,6 +1567,14 @@ struct wmi_tlv_arp_ns_offload_cmd {
__le32 num_ns_ext_tuples;
} __packed;
+struct wmi_tlv_gtk_offload_cmd {
+ __le32 vdev_id;
+ __le32 flags;
+ u8 kek[NL80211_KEK_LEN];
+ u8 kck[NL80211_KCK_LEN];
+ __le64 replay_ctr;
+} __packed;
+
struct wmi_tlv_wow_host_wakeup_ind {
__le32 reserved;
} __packed;
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 44c237eec157..2694b6aa8b77 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -2933,6 +2933,20 @@ struct wmi_arp_offload {
struct wmi_mac_addr target_mac;
} __packed;
+/* GTK offload data structure */
+#define WMI_GTK_OFFLOAD_ENABLE_OPCODE BIT(24)
+#define WMI_GTK_OFFLOAD_DISABLE_OPCODE BIT(25)
+#define WMI_GTK_OFFLOAD_ENABLE 1
+#define WMI_GTK_OFFLOAD_DISABLE 0
+
+struct wmi_gtk_rekey_data {
+ bool valid;
+ bool enable_offload;
+ u8 kck[NL80211_KCK_LEN];
+ u8 kek[NL80211_KEK_LEN];
+ __le64 replay_ctr;
+} __packed;
+
struct wmi_start_scan_tlvs {
/* TLV parameters. These includes channel list, ssid list, bssid list,
* extra ies.
diff --git a/drivers/net/wireless/ath/ath10k/wow.c b/drivers/net/wireless/ath/ath10k/wow.c
index 429fcce3dd1e..6bbcf8b79d9a 100644
--- a/drivers/net/wireless/ath/ath10k/wow.c
+++ b/drivers/net/wireless/ath/ath10k/wow.c
@@ -320,6 +320,56 @@ static int ath10k_config_wow_listen_interval(struct ath10k *ar)
return 0;
}
+void ath10k_wow_op_set_rekey_data(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_gtk_rekey_data *data)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+
+ mutex_lock(&ar->conf_mutex);
+ memcpy(&arvif->gtk_rekey_data.kek, data->kek, NL80211_KEK_LEN);
+ memcpy(&arvif->gtk_rekey_data.kck, data->kck, NL80211_KCK_LEN);
+ arvif->gtk_rekey_data.replay_ctr =
+ __cpu_to_le64(*(__le64 *)data->replay_ctr);
+ arvif->gtk_rekey_data.valid = true;
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static int ath10k_wow_config_gtk_offload(struct ath10k *ar, bool gtk_offload)
+{
+ struct ath10k_vif *arvif;
+ struct ieee80211_bss_conf *bss;
+ struct wmi_gtk_rekey_data *rekey_data;
+ int ret;
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
+ continue;
+
+ bss = &arvif->vif->bss_conf;
+ if (!arvif->is_up || !bss->assoc)
+ continue;
+
+ rekey_data = &arvif->gtk_rekey_data;
+ if (!rekey_data->valid)
+ continue;
+
+ if (gtk_offload)
+ rekey_data->enable_offload = WMI_GTK_OFFLOAD_ENABLE;
+ else
+ rekey_data->enable_offload = WMI_GTK_OFFLOAD_DISABLE;
+ ret = ath10k_wmi_gtk_offload(ar, arvif);
+ if (ret) {
+ ath10k_err(ar, "GTK offload failed for vdev_id: %d\n",
+ arvif->vdev_id);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
int ath10k_wow_op_suspend(struct ieee80211_hw *hw,
struct cfg80211_wowlan *wowlan)
{
@@ -334,10 +384,16 @@ int ath10k_wow_op_suspend(struct ieee80211_hw *hw,
goto exit;
}
+ ret = ath10k_wow_config_gtk_offload(ar, true);
+ if (ret) {
+ ath10k_warn(ar, "failed to enable GTK offload: %d\n", ret);
+ goto exit;
+ }
+
ret = ath10k_wow_enable_ns_arp_offload(ar, true);
if (ret) {
ath10k_warn(ar, "failed to enable ARP-NS offload: %d\n", ret);
- goto exit;
+ goto disable_gtk_offload;
}
ret = ath10k_wow_cleanup(ar);
@@ -384,6 +440,8 @@ cleanup:
disable_ns_arp_offload:
ath10k_wow_enable_ns_arp_offload(ar, false);
+disable_gtk_offload:
+ ath10k_wow_config_gtk_offload(ar, false);
exit:
mutex_unlock(&ar->conf_mutex);
return ret ? 1 : 0;
@@ -427,8 +485,14 @@ int ath10k_wow_op_resume(struct ieee80211_hw *hw)
}
ret = ath10k_wow_enable_ns_arp_offload(ar, false);
- if (ret)
+ if (ret) {
ath10k_warn(ar, "failed to disable ARP-NS offload: %d\n", ret);
+ goto exit;
+ }
+
+ ret = ath10k_wow_config_gtk_offload(ar, false);
+ if (ret)
+ ath10k_warn(ar, "failed to disable GTK offload: %d\n", ret);
exit:
if (ret) {
diff --git a/drivers/net/wireless/ath/ath10k/wow.h b/drivers/net/wireless/ath/ath10k/wow.h
index 9745b9ddc7f5..ce79908cce19 100644
--- a/drivers/net/wireless/ath/ath10k/wow.h
+++ b/drivers/net/wireless/ath/ath10k/wow.h
@@ -29,7 +29,9 @@ int ath10k_wow_op_suspend(struct ieee80211_hw *hw,
struct cfg80211_wowlan *wowlan);
int ath10k_wow_op_resume(struct ieee80211_hw *hw);
void ath10k_wow_op_set_wakeup(struct ieee80211_hw *hw, bool enabled);
-
+void ath10k_wow_op_set_rekey_data(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_gtk_rekey_data *data);
#else
static inline int ath10k_wow_init(struct ath10k *ar)
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
index 5fecae0ba52e..83e5aa6a9f28 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
@@ -4295,9 +4295,6 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_AP, 0);
if (err < 0)
brcmf_err("setting AP mode failed %d\n", err);
- err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_INFRA, 0);
- if (err < 0)
- brcmf_err("setting INFRA mode failed %d\n", err);
if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MBSS))
brcmf_fil_iovar_int_set(ifp, "mbss", 0);
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_REGULATORY,
diff --git a/drivers/net/wireless/cnss2/main.c b/drivers/net/wireless/cnss2/main.c
index 4d8ad7c8975f..bcea74ad6685 100644
--- a/drivers/net/wireless/cnss2/main.c
+++ b/drivers/net/wireless/cnss2/main.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -862,7 +862,7 @@ void cnss_wlan_unregister_driver(struct cnss_wlan_driver *driver_ops)
cnss_driver_event_post(plat_priv,
CNSS_DRIVER_EVENT_UNREGISTER_DRIVER,
- CNSS_EVENT_SYNC, NULL);
+ CNSS_EVENT_SYNC_UNINTERRUPTIBLE, NULL);
}
EXPORT_SYMBOL(cnss_wlan_unregister_driver);
@@ -1508,8 +1508,14 @@ static int cnss_driver_recovery_hdlr(struct cnss_plat_data *plat_priv,
cnss_recovery_reason_to_str(recovery_data->reason),
recovery_data->reason);
+ if (!plat_priv->driver_state) {
+ cnss_pr_err("Improper driver state, ignore recovery\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
- cnss_pr_err("Recovery is already in progress!\n");
+ cnss_pr_err("Recovery is already in progress\n");
ret = -EINVAL;
goto out;
}
diff --git a/drivers/net/wireless/cnss2/pci.c b/drivers/net/wireless/cnss2/pci.c
index 9bf4c7e11c66..b99754efcd6e 100644
--- a/drivers/net/wireless/cnss2/pci.c
+++ b/drivers/net/wireless/cnss2/pci.c
@@ -164,15 +164,16 @@ int cnss_resume_pci_link(struct cnss_pci_data *pci_priv)
pci_priv->pci_link_state = PCI_LINK_UP;
+ ret = cnss_set_pci_config_space(pci_priv, RESTORE_PCI_CONFIG_SPACE);
+ if (ret)
+ goto out;
+
ret = pci_enable_device(pci_priv->pci_dev);
if (ret) {
cnss_pr_err("Failed to enable PCI device, err = %d\n", ret);
goto out;
}
- ret = cnss_set_pci_config_space(pci_priv, RESTORE_PCI_CONFIG_SPACE);
- if (ret)
- goto out;
pci_set_master(pci_priv->pci_dev);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
index 0708eedd9671..1c69e8140d9d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
@@ -664,7 +664,7 @@ void rtl92ee_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct sk_buff *skb = NULL;
-
+ bool rtstatus;
u32 totalpacketlen;
u8 u1rsvdpageloc[5] = { 0 };
bool b_dlok = false;
@@ -727,7 +727,9 @@ void rtl92ee_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
memcpy((u8 *)skb_put(skb, totalpacketlen),
&reserved_page_packet, totalpacketlen);
- b_dlok = true;
+ rtstatus = rtl_cmd_send_packet(hw, skb);
+ if (rtstatus)
+ b_dlok = true;
if (b_dlok) {
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD ,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
index bbb789f8990b..738d541a2255 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
@@ -1377,6 +1377,7 @@ static void _rtl8821ae_get_wakeup_reason(struct ieee80211_hw *hw)
ppsc->wakeup_reason = 0;
+ do_gettimeofday(&ts);
rtlhal->last_suspend_sec = ts.tv_sec;
switch (fw_reason) {
diff --git a/drivers/net/wireless/wcnss/wcnss_wlan.c b/drivers/net/wireless/wcnss/wcnss_wlan.c
index b604f61e1a05..04d6b2e6fec1 100644
--- a/drivers/net/wireless/wcnss/wcnss_wlan.c
+++ b/drivers/net/wireless/wcnss/wcnss_wlan.c
@@ -203,6 +203,7 @@ static DEFINE_SPINLOCK(reg_spinlock);
#define WCNSS_MAX_BUILD_VER_LEN 256
#define WCNSS_MAX_CMD_LEN (128)
#define WCNSS_MIN_CMD_LEN (3)
+#define WCNSS_CMD_INFO_LEN 2
/* control messages from userspace */
#define WCNSS_USR_CTRL_MSG_START 0x00000000
@@ -210,7 +211,6 @@ static DEFINE_SPINLOCK(reg_spinlock);
#define WCNSS_USR_WLAN_MAC_ADDR (WCNSS_USR_CTRL_MSG_START + 3)
#define MAC_ADDRESS_STR "%02x:%02x:%02x:%02x:%02x:%02x"
-#define SHOW_MAC_ADDRESS_STR "%02x:%02x:%02x:%02x:%02x:%02x\n"
#define WCNSS_USER_MAC_ADDR_LENGTH 18
/* message types */
@@ -473,11 +473,7 @@ static ssize_t wcnss_wlan_macaddr_store(struct device *dev,
(char *)&macAddr[index], sizeof(char));
}
- pr_info("%s: Write MAC Addr:" MAC_ADDRESS_STR "\n", __func__,
- penv->wlan_nv_macAddr[0], penv->wlan_nv_macAddr[1],
- penv->wlan_nv_macAddr[2], penv->wlan_nv_macAddr[3],
- penv->wlan_nv_macAddr[4], penv->wlan_nv_macAddr[5]);
-
+ pr_info("%s: Write MAC Addr: %pM\n", __func__, penv->wlan_nv_macAddr);
return count;
}
@@ -487,10 +483,7 @@ static ssize_t wcnss_wlan_macaddr_show(struct device *dev,
if (!penv)
return -ENODEV;
- return scnprintf(buf, PAGE_SIZE, SHOW_MAC_ADDRESS_STR,
- penv->wlan_nv_macAddr[0], penv->wlan_nv_macAddr[1],
- penv->wlan_nv_macAddr[2], penv->wlan_nv_macAddr[3],
- penv->wlan_nv_macAddr[4], penv->wlan_nv_macAddr[5]);
+ return scnprintf(buf, PAGE_SIZE, "%pM\n", penv->wlan_nv_macAddr);
}
static DEVICE_ATTR(wcnss_mac_addr, S_IRUSR | S_IWUSR,
@@ -1653,10 +1646,8 @@ int wcnss_get_wlan_mac_address(char mac_addr[WLAN_MAC_ADDR_SIZE])
return -ENODEV;
memcpy(mac_addr, penv->wlan_nv_macAddr, WLAN_MAC_ADDR_SIZE);
- pr_debug("%s: Get MAC Addr:" MAC_ADDRESS_STR "\n", __func__,
- penv->wlan_nv_macAddr[0], penv->wlan_nv_macAddr[1],
- penv->wlan_nv_macAddr[2], penv->wlan_nv_macAddr[3],
- penv->wlan_nv_macAddr[4], penv->wlan_nv_macAddr[5]);
+ pr_debug("%s: Get MAC Addr: %pM\n", __func__, penv->wlan_nv_macAddr);
+
return 0;
}
EXPORT_SYMBOL(wcnss_get_wlan_mac_address);
@@ -2625,57 +2616,57 @@ static int wcnss_ctrl_open(struct inode *inode, struct file *file)
return rc;
}
-
-void process_usr_ctrl_cmd(u8 *buf, size_t len)
+static ssize_t wcnss_ctrl_write(struct file *fp, const char __user
+ *user_buffer, size_t count, loff_t *position)
{
- u16 cmd = buf[0] << 8 | buf[1];
+ int rc = 0;
+ u16 cmd;
+ u8 buf[WCNSS_MAX_CMD_LEN];
- switch (cmd) {
+ if (!penv || !penv->ctrl_device_opened ||
+ WCNSS_MAX_CMD_LEN < count || WCNSS_MIN_CMD_LEN > count)
+ return -EFAULT;
+ mutex_lock(&penv->ctrl_lock);
+ rc = copy_from_user(buf, user_buffer, count);
+ if (rc) {
+ pr_err("%s: Failed to copy ctrl data\n", __func__);
+ goto exit;
+ }
+
+ cmd = buf[0] << 8 | buf[1];
+ switch (cmd) {
case WCNSS_USR_HAS_CAL_DATA:
- if (1 < buf[2])
- pr_err("%s: Invalid data for cal %d\n", __func__,
- buf[2]);
+ if (buf[2] > 1) {
+ pr_err("%s: Invalid cal data %d\n", __func__, buf[2]);
+ rc = -EINVAL;
+ goto exit;
+ }
has_calibrated_data = buf[2];
break;
case WCNSS_USR_WLAN_MAC_ADDR:
- memcpy(&penv->wlan_nv_macAddr, &buf[2],
- sizeof(penv->wlan_nv_macAddr));
+ if ((count - WCNSS_CMD_INFO_LEN) != WLAN_MAC_ADDR_SIZE) {
+ pr_err("%s: Invalid Mac addr %d\n", __func__, buf[2]);
+ rc = -EINVAL;
+ goto exit;
+ }
- pr_debug("%s: MAC Addr:" MAC_ADDRESS_STR "\n", __func__,
- penv->wlan_nv_macAddr[0], penv->wlan_nv_macAddr[1],
- penv->wlan_nv_macAddr[2], penv->wlan_nv_macAddr[3],
- penv->wlan_nv_macAddr[4], penv->wlan_nv_macAddr[5]);
+ memcpy(&penv->wlan_nv_macAddr, &buf[2],
+ sizeof(penv->wlan_nv_macAddr));
+ pr_debug("%s:MAC Addr: %pM\n", __func__, penv->wlan_nv_macAddr);
break;
-
default:
pr_err("%s: Invalid command %d\n", __func__, cmd);
+ rc = -EINVAL;
break;
}
-}
-
-static ssize_t wcnss_ctrl_write(struct file *fp, const char __user
- *user_buffer, size_t count, loff_t *position)
-{
- int rc = 0;
- u8 buf[WCNSS_MAX_CMD_LEN];
-
- if (!penv || !penv->ctrl_device_opened || WCNSS_MAX_CMD_LEN < count
- || WCNSS_MIN_CMD_LEN > count)
- return -EFAULT;
-
- mutex_lock(&penv->ctrl_lock);
- rc = copy_from_user(buf, user_buffer, count);
- if (0 == rc)
- process_usr_ctrl_cmd(buf, count);
+exit:
mutex_unlock(&penv->ctrl_lock);
-
return rc;
}
-
static const struct file_operations wcnss_ctrl_fops = {
.owner = THIS_MODULE,
.open = wcnss_ctrl_open,
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 72ee1c305cc4..02db20b26749 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -67,6 +67,7 @@ module_param(rx_drain_timeout_msecs, uint, 0444);
unsigned int rx_stall_timeout_msecs = 60000;
module_param(rx_stall_timeout_msecs, uint, 0444);
+#define MAX_QUEUES_DEFAULT 8
unsigned int xenvif_max_queues;
module_param_named(max_queues, xenvif_max_queues, uint, 0644);
MODULE_PARM_DESC(max_queues,
@@ -2157,11 +2158,12 @@ static int __init netback_init(void)
if (!xen_domain())
return -ENODEV;
- /* Allow as many queues as there are CPUs if user has not
+ /* Allow as many queues as there are CPUs but max. 8 if user has not
* specified a value.
*/
if (xenvif_max_queues == 0)
- xenvif_max_queues = num_online_cpus();
+ xenvif_max_queues = min_t(unsigned int, MAX_QUEUES_DEFAULT,
+ num_online_cpus());
if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 34a062ccb11d..fd221cc4cb79 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1840,27 +1840,19 @@ static int talk_to_netback(struct xenbus_device *dev,
xennet_destroy_queues(info);
err = xennet_create_queues(info, &num_queues);
- if (err < 0)
- goto destroy_ring;
+ if (err < 0) {
+ xenbus_dev_fatal(dev, err, "creating queues");
+ kfree(info->queues);
+ info->queues = NULL;
+ goto out;
+ }
/* Create shared ring, alloc event channel -- for each queue */
for (i = 0; i < num_queues; ++i) {
queue = &info->queues[i];
err = setup_netfront(dev, queue, feature_split_evtchn);
- if (err) {
- /* setup_netfront() will tidy up the current
- * queue on error, but we need to clean up
- * those already allocated.
- */
- if (i > 0) {
- rtnl_lock();
- netif_set_real_num_tx_queues(info->netdev, i);
- rtnl_unlock();
- goto destroy_ring;
- } else {
- goto out;
- }
- }
+ if (err)
+ goto destroy_ring;
}
again:
@@ -1950,9 +1942,9 @@ abort_transaction_no_dev_fatal:
xenbus_transaction_end(xbt, 1);
destroy_ring:
xennet_disconnect_backend(info);
- kfree(info->queues);
- info->queues = NULL;
+ xennet_destroy_queues(info);
out:
+ device_unregister(&dev->dev);
return err;
}
diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c
index 96526dcfdd37..ff7b9632ad61 100644
--- a/drivers/nvdimm/label.c
+++ b/drivers/nvdimm/label.c
@@ -823,7 +823,7 @@ static int init_labels(struct nd_mapping *nd_mapping, int num_labels)
nsindex = to_namespace_index(ndd, 0);
memset(nsindex, 0, ndd->nsarea.config_size);
for (i = 0; i < 2; i++) {
- int rc = nd_label_write_index(ndd, i, i*2, ND_NSINDEX_INIT);
+ int rc = nd_label_write_index(ndd, i, 3 - i, ND_NSINDEX_INIT);
if (rc)
return rc;
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index aae7379af4e4..c2184104b789 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -1305,7 +1305,7 @@ static umode_t namespace_visible(struct kobject *kobj,
if (a == &dev_attr_resource.attr) {
if (is_namespace_blk(dev))
return 0;
- return a->mode;
+ return 0400;
}
if (is_namespace_pmem(dev) || is_namespace_blk(dev)) {
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 669edbd47602..d6ceb8b91cd6 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -350,8 +350,8 @@ static void async_completion(struct nvme_queue *nvmeq, void *ctx,
struct async_cmd_info *cmdinfo = ctx;
cmdinfo->result = le32_to_cpup(&cqe->result);
cmdinfo->status = le16_to_cpup(&cqe->status) >> 1;
- queue_kthread_work(cmdinfo->worker, &cmdinfo->work);
blk_mq_free_request(cmdinfo->req);
+ queue_kthread_work(cmdinfo->worker, &cmdinfo->work);
}
static inline struct nvme_cmd_info *get_cmd_from_tag(struct nvme_queue *nvmeq,
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
index 53b79c5f0559..379d08f76146 100644
--- a/drivers/pci/host/pci-mvebu.c
+++ b/drivers/pci/host/pci-mvebu.c
@@ -131,6 +131,12 @@ struct mvebu_pcie {
int nports;
};
+struct mvebu_pcie_window {
+ phys_addr_t base;
+ phys_addr_t remap;
+ size_t size;
+};
+
/* Structure representing one PCIe interface */
struct mvebu_pcie_port {
char *name;
@@ -148,10 +154,8 @@ struct mvebu_pcie_port {
struct mvebu_sw_pci_bridge bridge;
struct device_node *dn;
struct mvebu_pcie *pcie;
- phys_addr_t memwin_base;
- size_t memwin_size;
- phys_addr_t iowin_base;
- size_t iowin_size;
+ struct mvebu_pcie_window memwin;
+ struct mvebu_pcie_window iowin;
u32 saved_pcie_stat;
};
@@ -377,23 +381,45 @@ static void mvebu_pcie_add_windows(struct mvebu_pcie_port *port,
}
}
+static void mvebu_pcie_set_window(struct mvebu_pcie_port *port,
+ unsigned int target, unsigned int attribute,
+ const struct mvebu_pcie_window *desired,
+ struct mvebu_pcie_window *cur)
+{
+ if (desired->base == cur->base && desired->remap == cur->remap &&
+ desired->size == cur->size)
+ return;
+
+ if (cur->size != 0) {
+ mvebu_pcie_del_windows(port, cur->base, cur->size);
+ cur->size = 0;
+ cur->base = 0;
+
+ /*
+ * If something tries to change the window while it is enabled
+ * the change will not be done atomically. That would be
+ * difficult to do in the general case.
+ */
+ }
+
+ if (desired->size == 0)
+ return;
+
+ mvebu_pcie_add_windows(port, target, attribute, desired->base,
+ desired->size, desired->remap);
+ *cur = *desired;
+}
+
static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
{
- phys_addr_t iobase;
+ struct mvebu_pcie_window desired = {};
/* Are the new iobase/iolimit values invalid? */
if (port->bridge.iolimit < port->bridge.iobase ||
port->bridge.iolimitupper < port->bridge.iobaseupper ||
!(port->bridge.command & PCI_COMMAND_IO)) {
-
- /* If a window was configured, remove it */
- if (port->iowin_base) {
- mvebu_pcie_del_windows(port, port->iowin_base,
- port->iowin_size);
- port->iowin_base = 0;
- port->iowin_size = 0;
- }
-
+ mvebu_pcie_set_window(port, port->io_target, port->io_attr,
+ &desired, &port->iowin);
return;
}
@@ -410,32 +436,27 @@ static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
* specifications. iobase is the bus address, port->iowin_base
* is the CPU address.
*/
- iobase = ((port->bridge.iobase & 0xF0) << 8) |
- (port->bridge.iobaseupper << 16);
- port->iowin_base = port->pcie->io.start + iobase;
- port->iowin_size = ((0xFFF | ((port->bridge.iolimit & 0xF0) << 8) |
- (port->bridge.iolimitupper << 16)) -
- iobase) + 1;
-
- mvebu_pcie_add_windows(port, port->io_target, port->io_attr,
- port->iowin_base, port->iowin_size,
- iobase);
+ desired.remap = ((port->bridge.iobase & 0xF0) << 8) |
+ (port->bridge.iobaseupper << 16);
+ desired.base = port->pcie->io.start + desired.remap;
+ desired.size = ((0xFFF | ((port->bridge.iolimit & 0xF0) << 8) |
+ (port->bridge.iolimitupper << 16)) -
+ desired.remap) +
+ 1;
+
+ mvebu_pcie_set_window(port, port->io_target, port->io_attr, &desired,
+ &port->iowin);
}
static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
{
+ struct mvebu_pcie_window desired = {.remap = MVEBU_MBUS_NO_REMAP};
+
/* Are the new membase/memlimit values invalid? */
if (port->bridge.memlimit < port->bridge.membase ||
!(port->bridge.command & PCI_COMMAND_MEMORY)) {
-
- /* If a window was configured, remove it */
- if (port->memwin_base) {
- mvebu_pcie_del_windows(port, port->memwin_base,
- port->memwin_size);
- port->memwin_base = 0;
- port->memwin_size = 0;
- }
-
+ mvebu_pcie_set_window(port, port->mem_target, port->mem_attr,
+ &desired, &port->memwin);
return;
}
@@ -445,14 +466,12 @@ static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
* window to setup, according to the PCI-to-PCI bridge
* specifications.
*/
- port->memwin_base = ((port->bridge.membase & 0xFFF0) << 16);
- port->memwin_size =
- (((port->bridge.memlimit & 0xFFF0) << 16) | 0xFFFFF) -
- port->memwin_base + 1;
-
- mvebu_pcie_add_windows(port, port->mem_target, port->mem_attr,
- port->memwin_base, port->memwin_size,
- MVEBU_MBUS_NO_REMAP);
+ desired.base = ((port->bridge.membase & 0xFFF0) << 16);
+ desired.size = (((port->bridge.memlimit & 0xFFF0) << 16) | 0xFFFFF) -
+ desired.base + 1;
+
+ mvebu_pcie_set_window(port, port->mem_target, port->mem_attr, &desired,
+ &port->memwin);
}
/*
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index b83df942794f..193ac13de49b 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1414,8 +1414,16 @@ static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp)
static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp)
{
- if (hpp)
- dev_warn(&dev->dev, "PCI-X settings not supported\n");
+ int pos;
+
+ if (!hpp)
+ return;
+
+ pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
+ if (!pos)
+ return;
+
+ dev_warn(&dev->dev, "PCI-X settings not supported\n");
}
static bool pcie_root_rcb_set(struct pci_dev *dev)
@@ -1441,6 +1449,9 @@ static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
if (!hpp)
return;
+ if (!pci_is_pcie(dev))
+ return;
+
if (hpp->revision > 1) {
dev_warn(&dev->dev, "PCIe settings rev %d not supported\n",
hpp->revision);
diff --git a/drivers/platform/msm/gpio-usbdetect.c b/drivers/platform/msm/gpio-usbdetect.c
index 1628253fb545..97682436f92c 100644
--- a/drivers/platform/msm/gpio-usbdetect.c
+++ b/drivers/platform/msm/gpio-usbdetect.c
@@ -77,8 +77,16 @@ static irqreturn_t gpio_usbdetect_id_irq(int irq, void *data)
static irqreturn_t gpio_usbdetect_id_irq_thread(int irq, void *data)
{
struct gpio_usbdetect *usb = data;
+ bool curr_id_state;
+ static int prev_id_state = -EINVAL;
- if (usb->id_state) {
+ curr_id_state = usb->id_state;
+ if (curr_id_state == prev_id_state) {
+ dev_dbg(&usb->pdev->dev, "no change in ID state\n");
+ return IRQ_HANDLED;
+ }
+
+ if (curr_id_state) {
dev_dbg(&usb->pdev->dev, "stopping usb host\n");
extcon_set_cable_state_(usb->extcon_dev, EXTCON_USB_HOST, 0);
enable_irq(usb->vbus_det_irq);
@@ -88,6 +96,8 @@ static irqreturn_t gpio_usbdetect_id_irq_thread(int irq, void *data)
extcon_set_cable_state_(usb->extcon_dev, EXTCON_USB_SPEED, 1);
extcon_set_cable_state_(usb->extcon_dev, EXTCON_USB_HOST, 1);
}
+
+ prev_id_state = curr_id_state;
return IRQ_HANDLED;
}
diff --git a/drivers/platform/msm/gsi/gsi_dbg.c b/drivers/platform/msm/gsi/gsi_dbg.c
index eaf50ca1cea5..871e0c6f88c1 100644
--- a/drivers/platform/msm/gsi/gsi_dbg.c
+++ b/drivers/platform/msm/gsi/gsi_dbg.c
@@ -261,123 +261,6 @@ static ssize_t gsi_dump_ch(struct file *file,
return count;
}
-static ssize_t gsi_dump_ee(struct file *file,
- const char __user *buf, size_t count, loff_t *ppos)
-{
- uint32_t val;
-
- val = gsi_readl(gsi_ctx->base +
- GSI_GSI_MANAGER_EE_QOS_n_OFFS(gsi_ctx->per.ee));
- TERR("EE%2d QOS 0x%x\n", gsi_ctx->per.ee, val);
- val = gsi_readl(gsi_ctx->base +
- GSI_EE_n_GSI_STATUS_OFFS(gsi_ctx->per.ee));
- TERR("EE%2d STATUS 0x%x\n", gsi_ctx->per.ee, val);
- if (gsi_ctx->per.ver == GSI_VER_1_0) {
- val = gsi_readl(gsi_ctx->base +
- GSI_V1_0_EE_n_GSI_HW_PARAM_OFFS(gsi_ctx->per.ee));
- TERR("EE%2d HW_PARAM 0x%x\n", gsi_ctx->per.ee, val);
- } else if (gsi_ctx->per.ver == GSI_VER_1_2) {
- val = gsi_readl(gsi_ctx->base +
- GSI_V1_2_EE_n_GSI_HW_PARAM_0_OFFS(gsi_ctx->per.ee));
- TERR("EE%2d HW_PARAM_0 0x%x\n", gsi_ctx->per.ee, val);
- val = gsi_readl(gsi_ctx->base +
- GSI_V1_2_EE_n_GSI_HW_PARAM_1_OFFS(gsi_ctx->per.ee));
- TERR("EE%2d HW_PARAM_1 0x%x\n", gsi_ctx->per.ee, val);
- } else if (gsi_ctx->per.ver == GSI_VER_1_3) {
- val = gsi_readl(gsi_ctx->base +
- GSI_V1_3_EE_n_GSI_HW_PARAM_0_OFFS(gsi_ctx->per.ee));
- TERR("EE%2d HW_PARAM_0 0x%x\n", gsi_ctx->per.ee, val);
- val = gsi_readl(gsi_ctx->base +
- GSI_V1_3_EE_n_GSI_HW_PARAM_1_OFFS(gsi_ctx->per.ee));
- TERR("EE%2d HW_PARAM_1 0x%x\n", gsi_ctx->per.ee, val);
- val = gsi_readl(gsi_ctx->base +
- GSI_V1_3_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
- TERR("EE%2d HW_PARAM_2 0x%x\n", gsi_ctx->per.ee, val);
- } else {
- WARN_ON(1);
- }
- val = gsi_readl(gsi_ctx->base +
- GSI_EE_n_GSI_SW_VERSION_OFFS(gsi_ctx->per.ee));
- TERR("EE%2d SW_VERSION 0x%x\n", gsi_ctx->per.ee, val);
- val = gsi_readl(gsi_ctx->base +
- GSI_EE_n_GSI_MCS_CODE_VER_OFFS(gsi_ctx->per.ee));
- TERR("EE%2d MCS_CODE_VER 0x%x\n", gsi_ctx->per.ee, val);
- val = gsi_readl(gsi_ctx->base +
- GSI_EE_n_CNTXT_TYPE_IRQ_MSK_OFFS(gsi_ctx->per.ee));
- TERR("EE%2d TYPE_IRQ_MSK 0x%x\n", gsi_ctx->per.ee, val);
- val = gsi_readl(gsi_ctx->base +
- GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_OFFS(gsi_ctx->per.ee));
- TERR("EE%2d CH_IRQ_MSK 0x%x\n", gsi_ctx->per.ee, val);
- val = gsi_readl(gsi_ctx->base +
- GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS(gsi_ctx->per.ee));
- TERR("EE%2d EV_IRQ_MSK 0x%x\n", gsi_ctx->per.ee, val);
- val = gsi_readl(gsi_ctx->base +
- GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(gsi_ctx->per.ee));
- TERR("EE%2d IEOB_IRQ_MSK 0x%x\n", gsi_ctx->per.ee, val);
- val = gsi_readl(gsi_ctx->base +
- GSI_EE_n_CNTXT_GLOB_IRQ_EN_OFFS(gsi_ctx->per.ee));
- TERR("EE%2d GLOB_IRQ_EN 0x%x\n", gsi_ctx->per.ee, val);
- val = gsi_readl(gsi_ctx->base +
- GSI_EE_n_CNTXT_GSI_IRQ_EN_OFFS(gsi_ctx->per.ee));
- TERR("EE%2d GSI_IRQ_EN 0x%x\n", gsi_ctx->per.ee, val);
- val = gsi_readl(gsi_ctx->base +
- GSI_EE_n_CNTXT_INTSET_OFFS(gsi_ctx->per.ee));
- TERR("EE%2d INTSET 0x%x\n", gsi_ctx->per.ee, val);
- val = gsi_readl(gsi_ctx->base +
- GSI_EE_n_CNTXT_MSI_BASE_LSB_OFFS(gsi_ctx->per.ee));
- TERR("EE%2d MSI_BASE_LSB 0x%x\n", gsi_ctx->per.ee, val);
- val = gsi_readl(gsi_ctx->base +
- GSI_EE_n_CNTXT_MSI_BASE_MSB_OFFS(gsi_ctx->per.ee));
- TERR("EE%2d MSI_BASE_MSB 0x%x\n", gsi_ctx->per.ee, val);
- val = gsi_readl(gsi_ctx->base +
- GSI_EE_n_CNTXT_INT_VEC_OFFS(gsi_ctx->per.ee));
- TERR("EE%2d INT_VEC 0x%x\n", gsi_ctx->per.ee, val);
- val = gsi_readl(gsi_ctx->base +
- GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee));
- TERR("EE%2d SCR0 0x%x\n", gsi_ctx->per.ee, val);
- val = gsi_readl(gsi_ctx->base +
- GSI_EE_n_CNTXT_SCRATCH_1_OFFS(gsi_ctx->per.ee));
- TERR("EE%2d SCR1 0x%x\n", gsi_ctx->per.ee, val);
-
- return count;
-}
-
-static ssize_t gsi_dump_map(struct file *file,
- const char __user *buf, size_t count, loff_t *ppos)
-{
- struct gsi_chan_ctx *ctx;
- uint32_t val1;
- uint32_t val2;
- int i;
-
- TERR("EVT bitmap 0x%lx\n", gsi_ctx->evt_bmap);
- for (i = 0; i < gsi_ctx->max_ch; i++) {
- ctx = &gsi_ctx->chan[i];
-
- if (ctx->allocated) {
- TERR("VIRT CH%2d -> VIRT EV%2d\n", ctx->props.ch_id,
- ctx->evtr ? ctx->evtr->id : GSI_NO_EVT_ERINDEX);
- val1 = gsi_readl(gsi_ctx->base +
- GSI_GSI_DEBUG_EE_n_CH_k_VP_TABLE_OFFS(i,
- gsi_ctx->per.ee));
- TERR("VIRT CH%2d -> PHYS CH%2d\n", ctx->props.ch_id,
- val1 &
- GSI_GSI_DEBUG_EE_n_CH_k_VP_TABLE_PHY_CH_BMSK);
- if (ctx->evtr) {
- val2 = gsi_readl(gsi_ctx->base +
- GSI_GSI_DEBUG_EE_n_EV_k_VP_TABLE_OFFS(
- ctx->evtr->id, gsi_ctx->per.ee));
- TERR("VRT EV%2d -> PHYS EV%2d\n", ctx->evtr->id,
- val2 &
- GSI_GSI_DEBUG_EE_n_CH_k_VP_TABLE_PHY_CH_BMSK);
- }
- TERR("\n");
- }
- }
-
- return count;
-}
-
static void gsi_dump_ch_stats(struct gsi_chan_ctx *ctx)
{
if (!ctx->allocated)
@@ -793,14 +676,6 @@ const struct file_operations gsi_ch_dump_ops = {
.write = gsi_dump_ch,
};
-const struct file_operations gsi_ee_dump_ops = {
- .write = gsi_dump_ee,
-};
-
-const struct file_operations gsi_map_ops = {
- .write = gsi_dump_map,
-};
-
const struct file_operations gsi_stats_ops = {
.write = gsi_dump_stats,
};
@@ -828,7 +703,6 @@ const struct file_operations gsi_ipc_low_ops = {
void gsi_debugfs_init(void)
{
static struct dentry *dfile;
- const mode_t read_only_mode = S_IRUSR | S_IRGRP | S_IROTH;
const mode_t write_only_mode = S_IWUSR | S_IWGRP;
dent = debugfs_create_dir("gsi", 0);
@@ -851,20 +725,6 @@ void gsi_debugfs_init(void)
goto fail;
}
- dfile = debugfs_create_file("ee_dump", read_only_mode, dent,
- 0, &gsi_ee_dump_ops);
- if (!dfile || IS_ERR(dfile)) {
- TERR("fail to create ee_dump file\n");
- goto fail;
- }
-
- dfile = debugfs_create_file("map", read_only_mode, dent,
- 0, &gsi_map_ops);
- if (!dfile || IS_ERR(dfile)) {
- TERR("fail to create map file\n");
- goto fail;
- }
-
dfile = debugfs_create_file("stats", write_only_mode, dent,
0, &gsi_stats_ops);
if (!dfile || IS_ERR(dfile)) {
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c
index c0af295c7362..834f028d3e48 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c
@@ -1039,6 +1039,11 @@ static int __ipa_add_flt_rule(struct ipa_flt_tbl *tbl, enum ipa_ip_type ip,
goto error;
}
}
+ } else {
+ if (rule->rt_tbl_idx > 0) {
+ IPAERR_RL("invalid RT tbl\n");
+ goto error;
+ }
}
entry = kmem_cache_zalloc(ipa_ctx->flt_rule_cache, GFP_KERNEL);
@@ -1160,6 +1165,11 @@ static int __ipa_mdfy_flt_rule(struct ipa_flt_rule_mdfy *frule,
goto error;
}
}
+ } else {
+ if (frule->rule.rt_tbl_idx > 0) {
+ IPAERR_RL("invalid RT tbl\n");
+ goto error;
+ }
}
entry->rule = frule->rule;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
index 49aa7f25347d..fbbb3f20b571 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
@@ -910,8 +910,17 @@ int __ipa_del_hdr(u32 hdr_hdl, bool by_user)
return -EINVAL;
}
- if (by_user)
+ if (by_user) {
+ if (!strcmp(entry->name, IPA_LAN_RX_HDR_NAME)) {
+ IPADBG("Trying to delete hdr %s offset=%u\n",
+ entry->name, entry->offset_entry->offset);
+ if (!entry->offset_entry->offset) {
+ IPAERR("User cannot delete default header\n");
+ return -EPERM;
+ }
+ }
entry->user_deleted = true;
+ }
if (--entry->ref_cnt) {
IPADBG("hdr_hdl %x ref_cnt %d\n", hdr_hdl, entry->ref_cnt);
@@ -1234,13 +1243,18 @@ int ipa2_reset_hdr(void)
/* do not remove the default header */
if (!strcmp(entry->name, IPA_LAN_RX_HDR_NAME)) {
- if (entry->is_hdr_proc_ctx) {
- mutex_unlock(&ipa_ctx->lock);
- WARN_ON(1);
- IPAERR("default header is proc ctx\n");
- return -EFAULT;
+ IPADBG("Trying to remove hdr %s offset=%u\n",
+ entry->name, entry->offset_entry->offset);
+ if (!entry->offset_entry->offset) {
+ if (entry->is_hdr_proc_ctx) {
+ mutex_unlock(&ipa_ctx->lock);
+ WARN_ON(1);
+ IPAERR("default header is proc ctx\n");
+ return -EFAULT;
+ }
+ IPADBG("skip default header\n");
+ continue;
}
- continue;
}
if (ipa_id_find(entry->id) == NULL) {
diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
index b158b2b1c326..80e51ad61417 100644
--- a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
@@ -2647,7 +2647,7 @@ static int rmnet_ipa_set_data_quota_modem(struct wan_ioctl_set_data_quota *data)
if (index == MAX_NUM_OF_MUX_CHANNEL) {
IPAWANERR("%s is an invalid iface name\n",
data->interface_name);
- return -EFAULT;
+ return -ENODEV;
}
mux_id = mux_channel[index].mux_id;
diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa_fd_ioctl.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa_fd_ioctl.c
index 02bdd0334e7f..f2aecdaeff54 100644
--- a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa_fd_ioctl.c
+++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa_fd_ioctl.c
@@ -61,7 +61,7 @@ static dev_t device;
static long wan_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
- int retval = 0;
+ int retval = 0, rc = 0;
u32 pyld_sz;
u8 *param = NULL;
@@ -184,10 +184,14 @@ static long wan_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
- if (rmnet_ipa_set_data_quota(
- (struct wan_ioctl_set_data_quota *)param)) {
+ rc = rmnet_ipa_set_data_quota(
+ (struct wan_ioctl_set_data_quota *)param);
+ if (rc != 0) {
IPAWANERR("WAN_IOC_SET_DATA_QUOTA failed\n");
- retval = -EFAULT;
+ if (rc == -ENODEV)
+ retval = -ENODEV;
+ else
+ retval = -EFAULT;
break;
}
if (copy_to_user((u8 *)arg, param, pyld_sz)) {
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
index 71da7d28a451..cd39a46037f1 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
@@ -505,6 +505,12 @@ static int ipa3_attrib_dump(struct ipa_rule_attrib *attrib,
if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE)
pr_err("ether_type:%x ", attrib->ether_type);
+ if (attrib->attrib_mask & IPA_FLT_TCP_SYN)
+ pr_err("tcp syn ");
+
+ if (attrib->attrib_mask & IPA_FLT_TCP_SYN_L2TP)
+ pr_err("tcp syn l2tp ");
+
pr_err("\n");
return 0;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
index a03d8978c6c2..ced8c8b2d3ab 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
@@ -757,10 +757,16 @@ static int __ipa_validate_flt_rule(const struct ipa_flt_rule *rule,
goto error;
}
}
+ } else {
+ if (rule->rt_tbl_idx > 0) {
+ IPAERR("invalid RT tbl\n");
+ goto error;
+ }
}
if (rule->rule_id) {
- if (!(rule->rule_id & ipahal_get_rule_id_hi_bit())) {
+ if ((rule->rule_id < ipahal_get_rule_id_hi_bit()) ||
+ (rule->rule_id >= ((ipahal_get_rule_id_hi_bit()<<1)-1))) {
IPAERR("invalid rule_id provided 0x%x\n"
"rule_id with bit 0x%x are auto generated\n",
rule->rule_id, ipahal_get_rule_id_hi_bit());
@@ -866,7 +872,8 @@ static int __ipa_add_flt_rule(struct ipa3_flt_tbl *tbl, enum ipa_ip_type ip,
ipa_insert_failed:
list_del(&entry->link);
/* if rule id was allocated from idr, remove it */
- if (!(entry->rule_id & ipahal_get_rule_id_hi_bit()))
+ if ((entry->rule_id < ipahal_get_rule_id_hi_bit()) &&
+ (entry->rule_id >= ipahal_get_low_rule_id()))
idr_remove(&entry->tbl->rule_ids, entry->rule_id);
kmem_cache_free(ipa3_ctx->flt_rule_cache, entry);
@@ -913,7 +920,8 @@ static int __ipa_add_flt_rule_after(struct ipa3_flt_tbl *tbl,
ipa_insert_failed:
list_del(&entry->link);
/* if rule id was allocated from idr, remove it */
- if (!(entry->rule_id & ipahal_get_rule_id_hi_bit()))
+ if ((entry->rule_id < ipahal_get_rule_id_hi_bit()) &&
+ (entry->rule_id >= ipahal_get_low_rule_id()))
idr_remove(&entry->tbl->rule_ids, entry->rule_id);
kmem_cache_free(ipa3_ctx->flt_rule_cache, entry);
@@ -947,7 +955,8 @@ static int __ipa_del_flt_rule(u32 rule_hdl)
entry->tbl->rule_cnt, entry->rule_id);
entry->cookie = 0;
/* if rule id was allocated from idr, remove it */
- if (!(entry->rule_id & ipahal_get_rule_id_hi_bit()))
+ if ((entry->rule_id < ipahal_get_rule_id_hi_bit()) &&
+ (entry->rule_id >= ipahal_get_low_rule_id()))
idr_remove(&entry->tbl->rule_ids, entry->rule_id);
kmem_cache_free(ipa3_ctx->flt_rule_cache, entry);
@@ -1003,6 +1012,11 @@ static int __ipa_mdfy_flt_rule(struct ipa_flt_rule_mdfy *frule,
goto error;
}
}
+ } else {
+ if (frule->rule.rt_tbl_idx > 0) {
+ IPAERR_RL("invalid RT tbl\n");
+ goto error;
+ }
}
entry->rule = frule->rule;
@@ -1367,7 +1381,8 @@ int ipa3_reset_flt(enum ipa_ip_type ip)
if (entry->rt_tbl)
entry->rt_tbl->ref_cnt--;
/* if rule id was allocated from idr, remove it */
- if (!(entry->rule_id & ipahal_get_rule_id_hi_bit()))
+ if ((entry->rule_id < ipahal_get_rule_id_hi_bit()) &&
+ (entry->rule_id >= ipahal_get_low_rule_id()))
idr_remove(&entry->tbl->rule_ids,
entry->rule_id);
entry->cookie = 0;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
index ce35ba02154d..b5b8643f24a9 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
@@ -678,8 +678,17 @@ int __ipa3_del_hdr(u32 hdr_hdl, bool by_user)
return -EINVAL;
}
- if (by_user)
+ if (by_user) {
+ if (!strcmp(entry->name, IPA_LAN_RX_HDR_NAME)) {
+ IPADBG("Trying to delete hdr %s offset=%u\n",
+ entry->name, entry->offset_entry->offset);
+ if (!entry->offset_entry->offset) {
+ IPAERR("User cannot delete default header\n");
+ return -EPERM;
+ }
+ }
entry->user_deleted = true;
+ }
if (--entry->ref_cnt) {
IPADBG("hdr_hdl %x ref_cnt %d\n", hdr_hdl, entry->ref_cnt);
@@ -978,13 +987,18 @@ int ipa3_reset_hdr(void)
/* do not remove the default header */
if (!strcmp(entry->name, IPA_LAN_RX_HDR_NAME)) {
- if (entry->is_hdr_proc_ctx) {
- IPAERR("default header is proc ctx\n");
- mutex_unlock(&ipa3_ctx->lock);
- WARN_ON(1);
- return -EFAULT;
+ IPADBG("Trying to remove hdr %s offset=%u\n",
+ entry->name, entry->offset_entry->offset);
+ if (!entry->offset_entry->offset) {
+ if (entry->is_hdr_proc_ctx) {
+ IPAERR("default header is proc ctx\n");
+ mutex_unlock(&ipa3_ctx->lock);
+ WARN_ON(1);
+ return -EFAULT;
+ }
+ IPADBG("skip default header\n");
+ continue;
}
- continue;
}
if (ipa3_id_find(entry->id) == NULL) {
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
index b7ed529e9160..b5916cd1fbf6 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
@@ -664,6 +664,21 @@ static int ipa_fltrt_generate_hw_rule_bdy_ip4(u16 *en_rule,
ihl_ofst_meq32++;
}
+ if (attrib->attrib_mask & IPA_FLT_TCP_SYN) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+ ihl_ofst_meq32)) {
+ IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+ /* 12 => offset of SYN after v4 header */
+ extra = ipa_write_8(12, extra);
+ rest = ipa_write_32(0x20000, rest);
+ rest = ipa_write_32(0x20000, rest);
+ ihl_ofst_meq32++;
+ }
+
if (attrib->attrib_mask & IPA_FLT_META_DATA) {
*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_METADATA_COMPARE);
rest = ipa_write_32(attrib->meta_data_mask, rest);
@@ -970,6 +985,57 @@ static int ipa_fltrt_generate_hw_rule_bdy_ip6(u16 *en_rule,
ihl_ofst_meq32++;
}
+ if (attrib->attrib_mask & IPA_FLT_TCP_SYN) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+ ihl_ofst_meq32)) {
+ IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+ /* 12 => offset of SYN after v4 header */
+ extra = ipa_write_8(12, extra);
+ rest = ipa_write_32(0x20000, rest);
+ rest = ipa_write_32(0x20000, rest);
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_TCP_SYN_L2TP) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+ ihl_ofst_meq32) || IPA_IS_RAN_OUT_OF_EQ(
+ ipa3_0_ihl_ofst_meq32, ihl_ofst_meq32 + 1)) {
+ IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32 + 1]);
+
+ /* populate TCP protocol eq */
+ if (attrib->ether_type == 0x0800) {
+ extra = ipa_write_8(30, extra);
+ rest = ipa_write_32(0xFF0000, rest);
+ rest = ipa_write_32(0x60000, rest);
+ } else {
+ extra = ipa_write_8(26, extra);
+ rest = ipa_write_32(0xFF00, rest);
+ rest = ipa_write_32(0x600, rest);
+ }
+
+ /* populate TCP SYN eq */
+ if (attrib->ether_type == 0x0800) {
+ extra = ipa_write_8(54, extra);
+ rest = ipa_write_32(0x20000, rest);
+ rest = ipa_write_32(0x20000, rest);
+ } else {
+ extra = ipa_write_8(74, extra);
+ rest = ipa_write_32(0x20000, rest);
+ rest = ipa_write_32(0x20000, rest);
+ }
+ ihl_ofst_meq32 += 2;
+ }
+
if (attrib->attrib_mask & IPA_FLT_META_DATA) {
*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_METADATA_COMPARE);
rest = ipa_write_32(attrib->meta_data_mask, rest);
@@ -1044,6 +1110,27 @@ static int ipa_fltrt_generate_hw_rule_bdy_ip6(u16 *en_rule,
ihl_ofst_rng16++;
}
+ if (attrib->attrib_mask & IPA_FLT_TCP_SYN_L2TP) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+ ihl_ofst_rng16)) {
+ IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+ /* 20 => offset of Ethertype after v4 header */
+ if (attrib->ether_type == 0x0800) {
+ extra = ipa_write_8(21, extra);
+ rest = ipa_write_16(0x0045, rest);
+ rest = ipa_write_16(0x0045, rest);
+ } else {
+ extra = ipa_write_8(20, extra);
+ rest = ipa_write_16(attrib->ether_type, rest);
+ rest = ipa_write_16(attrib->ether_type, rest);
+ }
+ ihl_ofst_rng16++;
+ }
+
if (attrib->attrib_mask & IPA_FLT_FLOW_LABEL) {
*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_FL_EQ);
rest = ipa_write_32(attrib->u.v6.flow_label & 0xFFFFF,
@@ -1480,6 +1567,21 @@ static int ipa_flt_generate_eq_ip4(enum ipa_ip_type ip,
ofst_meq128++;
}
+ if (attrib->attrib_mask & IPA_FLT_TCP_SYN) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+ ihl_ofst_meq32)) {
+ IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+ /* 12 => offset of SYN after v4 header */
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 12;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0x20000;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value = 0x20000;
+ ihl_ofst_meq32++;
+ }
+
if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
IPAHAL_ERR("ran out of meq32 eq\n");
@@ -1843,6 +1945,65 @@ static int ipa_flt_generate_eq_ip6(enum ipa_ip_type ip,
ofst_meq128++;
}
+ if (attrib->attrib_mask & IPA_FLT_TCP_SYN) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+ ihl_ofst_meq32)) {
+ IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+ /* 12 => offset of SYN after v4 header */
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 12;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0x20000;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value = 0x20000;
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_TCP_SYN_L2TP) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+ ihl_ofst_meq32) || IPA_IS_RAN_OUT_OF_EQ(
+ ipa3_0_ihl_ofst_meq32, ihl_ofst_meq32 + 1)) {
+ IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32 + 1]);
+
+ /* populate TCP protocol eq */
+ if (attrib->ether_type == 0x0800) {
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 30;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask =
+ 0xFF0000;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+ 0x60000;
+ } else {
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 26;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask =
+ 0xFF00;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+ 0x600;
+ }
+
+ /* populate TCP SYN eq */
+ if (attrib->ether_type == 0x0800) {
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 54;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask =
+ 0x20000;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+ 0x20000;
+ } else {
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 74;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask =
+ 0x20000;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+ 0x20000;
+ }
+ ihl_ofst_meq32 += 2;
+ }
+
if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
IPAHAL_ERR("ran out of meq32 eq\n");
@@ -1985,6 +2146,32 @@ static int ipa_flt_generate_eq_ip6(enum ipa_ip_type ip,
ihl_ofst_rng16++;
}
+ if (attrib->attrib_mask & IPA_FLT_TCP_SYN_L2TP) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+ ihl_ofst_rng16)) {
+ IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+ if (attrib->ether_type == 0x0800) {
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset
+ = 21;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+ = 0x0045;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+ = 0x0045;
+ } else {
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset =
+ 20;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+ = attrib->ether_type;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+ = attrib->ether_type;
+ }
+ ihl_ofst_rng16++;
+ }
+
if (attrib->attrib_mask & IPA_FLT_FLOW_LABEL) {
*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_FL_EQ);
eq_atrb->fl_eq_present = 1;
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index e49402afb6a2..9c28a6f4b3db 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -2791,7 +2791,7 @@ static int rmnet_ipa3_set_data_quota_modem(
if (index == MAX_NUM_OF_MUX_CHANNEL) {
IPAWANERR("%s is an invalid iface name\n",
data->interface_name);
- return -EFAULT;
+ return -ENODEV;
}
mux_id = rmnet_ipa3_ctx->mux_channel[index].mux_id;
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c
index dc1e5ce511a6..522fe2d49e67 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c
@@ -71,7 +71,7 @@ static long ipa3_wan_ioctl(struct file *filp,
unsigned int cmd,
unsigned long arg)
{
- int retval = 0;
+ int retval = 0, rc = 0;
u32 pyld_sz;
u8 *param = NULL;
@@ -247,10 +247,14 @@ static long ipa3_wan_ioctl(struct file *filp,
retval = -EFAULT;
break;
}
- if (rmnet_ipa3_set_data_quota(
- (struct wan_ioctl_set_data_quota *)param)) {
+ rc = rmnet_ipa3_set_data_quota(
+ (struct wan_ioctl_set_data_quota *)param);
+ if (rc != 0) {
IPAWANERR("WAN_IOC_SET_DATA_QUOTA failed\n");
- retval = -EFAULT;
+ if (retval == -ENODEV)
+ retval = -ENODEV;
+ else
+ retval = -EFAULT;
break;
}
if (copy_to_user((u8 *)arg, param, pyld_sz)) {
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index af2046c87806..847f75601591 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -249,7 +249,7 @@ static int hp_wmi_display_state(void)
int ret = hp_wmi_perform_query(HPWMI_DISPLAY_QUERY, 0, &state,
sizeof(state), sizeof(state));
if (ret)
- return -EINVAL;
+ return ret < 0 ? ret : -EINVAL;
return state;
}
@@ -259,7 +259,7 @@ static int hp_wmi_hddtemp_state(void)
int ret = hp_wmi_perform_query(HPWMI_HDDTEMP_QUERY, 0, &state,
sizeof(state), sizeof(state));
if (ret)
- return -EINVAL;
+ return ret < 0 ? ret : -EINVAL;
return state;
}
@@ -269,7 +269,7 @@ static int hp_wmi_als_state(void)
int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 0, &state,
sizeof(state), sizeof(state));
if (ret)
- return -EINVAL;
+ return ret < 0 ? ret : -EINVAL;
return state;
}
@@ -280,7 +280,7 @@ static int hp_wmi_dock_state(void)
sizeof(state), sizeof(state));
if (ret)
- return -EINVAL;
+ return ret < 0 ? ret : -EINVAL;
return state & 0x1;
}
@@ -291,7 +291,7 @@ static int hp_wmi_tablet_state(void)
int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, &state,
sizeof(state), sizeof(state));
if (ret)
- return ret;
+ return ret < 0 ? ret : -EINVAL;
return (state & 0x4) ? 1 : 0;
}
@@ -324,7 +324,7 @@ static int __init hp_wmi_enable_hotkeys(void)
int ret = hp_wmi_perform_query(HPWMI_BIOS_QUERY, 1, &value,
sizeof(value), 0);
if (ret)
- return -EINVAL;
+ return ret < 0 ? ret : -EINVAL;
return 0;
}
@@ -337,7 +337,7 @@ static int hp_wmi_set_block(void *data, bool blocked)
ret = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1,
&query, sizeof(query), 0);
if (ret)
- return -EINVAL;
+ return ret < 0 ? ret : -EINVAL;
return 0;
}
@@ -429,7 +429,7 @@ static int hp_wmi_post_code_state(void)
int ret = hp_wmi_perform_query(HPWMI_POSTCODEERROR_QUERY, 0, &state,
sizeof(state), sizeof(state));
if (ret)
- return -EINVAL;
+ return ret < 0 ? ret : -EINVAL;
return state;
}
@@ -495,7 +495,7 @@ static ssize_t set_als(struct device *dev, struct device_attribute *attr,
int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 1, &tmp,
sizeof(tmp), sizeof(tmp));
if (ret)
- return -EINVAL;
+ return ret < 0 ? ret : -EINVAL;
return count;
}
@@ -516,7 +516,7 @@ static ssize_t set_postcode(struct device *dev, struct device_attribute *attr,
ret = hp_wmi_perform_query(HPWMI_POSTCODEERROR_QUERY, 1, &tmp,
sizeof(tmp), sizeof(tmp));
if (ret)
- return -EINVAL;
+ return ret < 0 ? ret : -EINVAL;
return count;
}
@@ -573,10 +573,12 @@ static void hp_wmi_notify(u32 value, void *context)
switch (event_id) {
case HPWMI_DOCK_EVENT:
- input_report_switch(hp_wmi_input_dev, SW_DOCK,
- hp_wmi_dock_state());
- input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE,
- hp_wmi_tablet_state());
+ if (test_bit(SW_DOCK, hp_wmi_input_dev->swbit))
+ input_report_switch(hp_wmi_input_dev, SW_DOCK,
+ hp_wmi_dock_state());
+ if (test_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit))
+ input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE,
+ hp_wmi_tablet_state());
input_sync(hp_wmi_input_dev);
break;
case HPWMI_PARK_HDD:
@@ -649,6 +651,7 @@ static int __init hp_wmi_input_setup(void)
{
acpi_status status;
int err;
+ int val;
hp_wmi_input_dev = input_allocate_device();
if (!hp_wmi_input_dev)
@@ -659,17 +662,26 @@ static int __init hp_wmi_input_setup(void)
hp_wmi_input_dev->id.bustype = BUS_HOST;
__set_bit(EV_SW, hp_wmi_input_dev->evbit);
- __set_bit(SW_DOCK, hp_wmi_input_dev->swbit);
- __set_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit);
+
+ /* Dock */
+ val = hp_wmi_dock_state();
+ if (!(val < 0)) {
+ __set_bit(SW_DOCK, hp_wmi_input_dev->swbit);
+ input_report_switch(hp_wmi_input_dev, SW_DOCK, val);
+ }
+
+ /* Tablet mode */
+ val = hp_wmi_tablet_state();
+ if (!(val < 0)) {
+ __set_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit);
+ input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, val);
+ }
err = sparse_keymap_setup(hp_wmi_input_dev, hp_wmi_keymap, NULL);
if (err)
goto err_free_dev;
/* Set initial hardware state */
- input_report_switch(hp_wmi_input_dev, SW_DOCK, hp_wmi_dock_state());
- input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE,
- hp_wmi_tablet_state());
input_sync(hp_wmi_input_dev);
if (!hp_wmi_bios_2009_later() && hp_wmi_bios_2008_later())
@@ -982,10 +994,12 @@ static int hp_wmi_resume_handler(struct device *device)
* changed.
*/
if (hp_wmi_input_dev) {
- input_report_switch(hp_wmi_input_dev, SW_DOCK,
- hp_wmi_dock_state());
- input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE,
- hp_wmi_tablet_state());
+ if (test_bit(SW_DOCK, hp_wmi_input_dev->swbit))
+ input_report_switch(hp_wmi_input_dev, SW_DOCK,
+ hp_wmi_dock_state());
+ if (test_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit))
+ input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE,
+ hp_wmi_tablet_state());
input_sync(hp_wmi_input_dev);
}
diff --git a/drivers/platform/x86/intel_mid_thermal.c b/drivers/platform/x86/intel_mid_thermal.c
index 9f713b832ba3..5c768c4627d3 100644
--- a/drivers/platform/x86/intel_mid_thermal.c
+++ b/drivers/platform/x86/intel_mid_thermal.c
@@ -550,6 +550,7 @@ static const struct platform_device_id therm_id_table[] = {
{ "msic_thermal", 1 },
{ }
};
+MODULE_DEVICE_TABLE(platform, therm_id_table);
static struct platform_driver mid_thermal_driver = {
.driver = {
diff --git a/drivers/power/reset/msm-poweroff.c b/drivers/power/reset/msm-poweroff.c
index 209263ccced7..d052e9518060 100644
--- a/drivers/power/reset/msm-poweroff.c
+++ b/drivers/power/reset/msm-poweroff.c
@@ -328,11 +328,28 @@ static void msm_restart_prepare(const char *cmd)
__raw_writel(0x7766550a, restart_reason);
} else if (!strncmp(cmd, "oem-", 4)) {
unsigned long code;
+ unsigned long reset_reason;
int ret;
ret = kstrtoul(cmd + 4, 16, &code);
- if (!ret)
+ if (!ret) {
+ /* Bit-2 to bit-7 of SOFT_RB_SPARE for hard
+ * reset reason:
+ * Value 0 to 31 for common defined features
+ * Value 32 to 63 for oem specific features
+ */
+ reset_reason = code +
+ PON_RESTART_REASON_OEM_MIN;
+ if (reset_reason > PON_RESTART_REASON_OEM_MAX ||
+ reset_reason < PON_RESTART_REASON_OEM_MIN) {
+ pr_err("Invalid oem reset reason: %lx\n",
+ reset_reason);
+ } else {
+ qpnp_pon_set_restart_reason(
+ reset_reason);
+ }
__raw_writel(0x6f656d00 | (code & 0xff),
restart_reason);
+ }
} else if (!strncmp(cmd, "edl", 3)) {
enable_emergency_dload_mode();
} else {
diff --git a/drivers/power/supply/qcom/fg-core.h b/drivers/power/supply/qcom/fg-core.h
index 1935704fcf09..20191ffa5e68 100644
--- a/drivers/power/supply/qcom/fg-core.h
+++ b/drivers/power/supply/qcom/fg-core.h
@@ -13,6 +13,7 @@
#ifndef __FG_CORE_H__
#define __FG_CORE_H__
+#include <linux/alarmtimer.h>
#include <linux/atomic.h>
#include <linux/bitops.h>
#include <linux/debugfs.h>
@@ -224,6 +225,12 @@ enum slope_limit_status {
SLOPE_LIMIT_NUM_COEFFS,
};
+enum esr_filter_status {
+ ROOM_TEMP = 1,
+ LOW_TEMP,
+ RELAX_TEMP,
+};
+
enum esr_timer_config {
TIMER_RETRY = 0,
TIMER_MAX,
@@ -270,6 +277,9 @@ struct fg_dt_props {
int esr_broad_flt_upct;
int esr_tight_lt_flt_upct;
int esr_broad_lt_flt_upct;
+ int esr_flt_rt_switch_temp;
+ int esr_tight_rt_flt_upct;
+ int esr_broad_rt_flt_upct;
int slope_limit_temp;
int esr_pulse_thresh_ma;
int esr_meas_curr_ma;
@@ -424,8 +434,10 @@ struct fg_chip {
int delta_soc;
int last_msoc;
int last_recharge_volt_mv;
+ int delta_temp_irq_count;
int esr_timer_charging_default[NUM_ESR_TIMERS];
enum slope_limit_status slope_limit_sts;
+ enum esr_filter_status esr_flt_sts;
bool profile_available;
bool profile_loaded;
bool battery_missing;
@@ -446,6 +458,9 @@ struct fg_chip {
struct work_struct status_change_work;
struct delayed_work ttf_work;
struct delayed_work sram_dump_work;
+ struct work_struct esr_filter_work;
+ struct alarm esr_filter_alarm;
+ ktime_t last_delta_temp_time;
};
/* Debugfs data structures are below */
diff --git a/drivers/power/supply/qcom/qpnp-fg-gen3.c b/drivers/power/supply/qcom/qpnp-fg-gen3.c
index b1a57d8853e8..2f958a3438ee 100644
--- a/drivers/power/supply/qcom/qpnp-fg-gen3.c
+++ b/drivers/power/supply/qcom/qpnp-fg-gen3.c
@@ -2182,67 +2182,197 @@ static int fg_slope_limit_config(struct fg_chip *chip, int batt_temp)
return 0;
}
-static int fg_esr_filter_config(struct fg_chip *chip, int batt_temp)
+static int __fg_esr_filter_config(struct fg_chip *chip,
+ enum esr_filter_status esr_flt_sts)
{
- u8 esr_tight_lt_flt, esr_broad_lt_flt;
- bool cold_temp = false;
+ u8 esr_tight_flt, esr_broad_flt;
+ int esr_tight_flt_upct, esr_broad_flt_upct;
int rc;
- /*
- * If the battery temperature is lower than -20 C, then skip modifying
- * ESR filter.
- */
- if (batt_temp < -210)
+ if (esr_flt_sts == chip->esr_flt_sts)
return 0;
- /*
- * If battery temperature is lesser than 10 C (default), then apply the
- * ESR low temperature tight and broad filter values to ESR room
- * temperature tight and broad filters. If battery temperature is higher
- * than 10 C, then apply back the room temperature ESR filter
- * coefficients to ESR room temperature tight and broad filters.
- */
- if (batt_temp > chip->dt.esr_flt_switch_temp
- && chip->esr_flt_cold_temp_en) {
- fg_encode(chip->sp, FG_SRAM_ESR_TIGHT_FILTER,
- chip->dt.esr_tight_flt_upct, &esr_tight_lt_flt);
- fg_encode(chip->sp, FG_SRAM_ESR_BROAD_FILTER,
- chip->dt.esr_broad_flt_upct, &esr_broad_lt_flt);
- } else if (batt_temp <= chip->dt.esr_flt_switch_temp
- && !chip->esr_flt_cold_temp_en) {
- fg_encode(chip->sp, FG_SRAM_ESR_TIGHT_FILTER,
- chip->dt.esr_tight_lt_flt_upct, &esr_tight_lt_flt);
- fg_encode(chip->sp, FG_SRAM_ESR_BROAD_FILTER,
- chip->dt.esr_broad_lt_flt_upct, &esr_broad_lt_flt);
- cold_temp = true;
+ if (esr_flt_sts == ROOM_TEMP) {
+ esr_tight_flt_upct = chip->dt.esr_tight_flt_upct;
+ esr_broad_flt_upct = chip->dt.esr_broad_flt_upct;
+ } else if (esr_flt_sts == LOW_TEMP) {
+ esr_tight_flt_upct = chip->dt.esr_tight_lt_flt_upct;
+ esr_broad_flt_upct = chip->dt.esr_broad_lt_flt_upct;
+ } else if (esr_flt_sts == RELAX_TEMP) {
+ esr_tight_flt_upct = chip->dt.esr_tight_rt_flt_upct;
+ esr_broad_flt_upct = chip->dt.esr_broad_rt_flt_upct;
} else {
+ pr_err("Unknown esr filter config\n");
return 0;
}
+ fg_encode(chip->sp, FG_SRAM_ESR_TIGHT_FILTER, esr_tight_flt_upct,
+ &esr_tight_flt);
rc = fg_sram_write(chip, chip->sp[FG_SRAM_ESR_TIGHT_FILTER].addr_word,
chip->sp[FG_SRAM_ESR_TIGHT_FILTER].addr_byte,
- &esr_tight_lt_flt,
+ &esr_tight_flt,
chip->sp[FG_SRAM_ESR_TIGHT_FILTER].len, FG_IMA_DEFAULT);
if (rc < 0) {
pr_err("Error in writing ESR LT tight filter, rc=%d\n", rc);
return rc;
}
+ fg_encode(chip->sp, FG_SRAM_ESR_BROAD_FILTER, esr_broad_flt_upct,
+ &esr_broad_flt);
rc = fg_sram_write(chip, chip->sp[FG_SRAM_ESR_BROAD_FILTER].addr_word,
chip->sp[FG_SRAM_ESR_BROAD_FILTER].addr_byte,
- &esr_broad_lt_flt,
+ &esr_broad_flt,
chip->sp[FG_SRAM_ESR_BROAD_FILTER].len, FG_IMA_DEFAULT);
if (rc < 0) {
pr_err("Error in writing ESR LT broad filter, rc=%d\n", rc);
return rc;
}
- chip->esr_flt_cold_temp_en = cold_temp;
- fg_dbg(chip, FG_STATUS, "applied %s ESR filter values\n",
- cold_temp ? "cold" : "normal");
+ chip->esr_flt_sts = esr_flt_sts;
+ fg_dbg(chip, FG_STATUS, "applied ESR filter %d values\n", esr_flt_sts);
+ return 0;
+}
+
+#define DT_IRQ_COUNT 3
+#define DELTA_TEMP_IRQ_TIME_MS 300000
+#define ESR_FILTER_ALARM_TIME_MS 900000
+static int fg_esr_filter_config(struct fg_chip *chip, int batt_temp,
+ bool override)
+{
+ enum esr_filter_status esr_flt_sts = ROOM_TEMP;
+ bool qnovo_en, input_present, count_temp_irq = false;
+ s64 time_ms;
+ int rc;
+
+ /*
+ * If the battery temperature is lower than -20 C, then skip modifying
+ * ESR filter.
+ */
+ if (batt_temp < -210)
+ return 0;
+
+ qnovo_en = is_qnovo_en(chip);
+ input_present = is_input_present(chip);
+
+ /*
+ * If Qnovo is enabled, after hitting a lower battery temperature of
+ * say 6 C, count the delta battery temperature interrupts for a
+ * certain period of time when the battery temperature increases.
+ * Switch to relaxed filter coefficients once the temperature increase
+ * is qualified so that ESR accuracy can be improved.
+ */
+ if (qnovo_en && !override) {
+ if (input_present) {
+ if (chip->esr_flt_sts == RELAX_TEMP) {
+ /* do nothing */
+ return 0;
+ }
+
+ count_temp_irq = true;
+ if (chip->delta_temp_irq_count) {
+ /* Don't count when temperature is dropping. */
+ if (batt_temp <= chip->last_batt_temp)
+ count_temp_irq = false;
+ } else {
+ /*
+ * Starting point for counting. Check if the
+ * temperature is qualified.
+ */
+ if (batt_temp > chip->dt.esr_flt_rt_switch_temp)
+ count_temp_irq = false;
+ else
+ chip->last_delta_temp_time =
+ ktime_get();
+ }
+ } else {
+ chip->delta_temp_irq_count = 0;
+ rc = alarm_try_to_cancel(&chip->esr_filter_alarm);
+ if (rc < 0)
+ pr_err("Couldn't cancel esr_filter_alarm\n");
+ }
+ }
+
+ /*
+ * If battery temperature is lesser than 10 C (default), then apply the
+ * ESR low temperature tight and broad filter values to ESR room
+ * temperature tight and broad filters. If battery temperature is higher
+ * than 10 C, then apply back the room temperature ESR filter
+ * coefficients to ESR room temperature tight and broad filters.
+ */
+ if (batt_temp > chip->dt.esr_flt_switch_temp)
+ esr_flt_sts = ROOM_TEMP;
+ else
+ esr_flt_sts = LOW_TEMP;
+
+ if (count_temp_irq) {
+ time_ms = ktime_ms_delta(ktime_get(),
+ chip->last_delta_temp_time);
+ chip->delta_temp_irq_count++;
+ fg_dbg(chip, FG_STATUS, "dt_irq_count: %d\n",
+ chip->delta_temp_irq_count);
+
+ if (chip->delta_temp_irq_count >= DT_IRQ_COUNT
+ && time_ms <= DELTA_TEMP_IRQ_TIME_MS) {
+ fg_dbg(chip, FG_STATUS, "%d interrupts in %lld ms\n",
+ chip->delta_temp_irq_count, time_ms);
+ esr_flt_sts = RELAX_TEMP;
+ }
+ }
+
+ rc = __fg_esr_filter_config(chip, esr_flt_sts);
+ if (rc < 0)
+ return rc;
+
+ if (esr_flt_sts == RELAX_TEMP)
+ alarm_start_relative(&chip->esr_filter_alarm,
+ ms_to_ktime(ESR_FILTER_ALARM_TIME_MS));
+
return 0;
}
+#define FG_ESR_FILTER_RESTART_MS 60000
+static void esr_filter_work(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip, esr_filter_work);
+ int rc, batt_temp;
+
+ rc = fg_get_battery_temp(chip, &batt_temp);
+ if (rc < 0) {
+ pr_err("Error in getting batt_temp\n");
+ alarm_start_relative(&chip->esr_filter_alarm,
+ ms_to_ktime(FG_ESR_FILTER_RESTART_MS));
+ }
+
+ rc = fg_esr_filter_config(chip, batt_temp, true);
+ if (rc < 0) {
+ pr_err("Error in configuring ESR filter rc:%d\n", rc);
+ alarm_start_relative(&chip->esr_filter_alarm,
+ ms_to_ktime(FG_ESR_FILTER_RESTART_MS));
+ }
+
+ chip->delta_temp_irq_count = 0;
+ pm_relax(chip->dev);
+}
+
+static enum alarmtimer_restart fg_esr_filter_alarm_cb(struct alarm *alarm,
+ ktime_t now)
+{
+ struct fg_chip *chip = container_of(alarm, struct fg_chip,
+ esr_filter_alarm);
+
+ fg_dbg(chip, FG_STATUS, "ESR filter alarm triggered %lld\n",
+ ktime_to_ms(now));
+ /*
+ * We cannot vote for awake votable here as that takes a mutex lock
+ * and this is executed in an atomic context.
+ */
+ pm_stay_awake(chip->dev);
+ schedule_work(&chip->esr_filter_work);
+
+ return ALARMTIMER_NORESTART;
+}
+
static int fg_esr_fcc_config(struct fg_chip *chip)
{
union power_supply_propval prop = {0, };
@@ -4293,14 +4423,14 @@ static irqreturn_t fg_delta_batt_temp_irq_handler(int irq, void *data)
union power_supply_propval prop = {0, };
int rc, batt_temp;
- fg_dbg(chip, FG_IRQ, "irq %d triggered\n", irq);
rc = fg_get_battery_temp(chip, &batt_temp);
if (rc < 0) {
pr_err("Error in getting batt_temp\n");
return IRQ_HANDLED;
}
+ fg_dbg(chip, FG_IRQ, "irq %d triggered bat_temp: %d\n", irq, batt_temp);
- rc = fg_esr_filter_config(chip, batt_temp);
+ rc = fg_esr_filter_config(chip, batt_temp, false);
if (rc < 0)
pr_err("Error in configuring ESR filter rc:%d\n", rc);
@@ -4689,8 +4819,11 @@ static int fg_parse_ki_coefficients(struct fg_chip *chip)
#define DEFAULT_ESR_FLT_TEMP_DECIDEGC 100
#define DEFAULT_ESR_TIGHT_FLT_UPCT 3907
#define DEFAULT_ESR_BROAD_FLT_UPCT 99610
-#define DEFAULT_ESR_TIGHT_LT_FLT_UPCT 48829
-#define DEFAULT_ESR_BROAD_LT_FLT_UPCT 148438
+#define DEFAULT_ESR_TIGHT_LT_FLT_UPCT 30000
+#define DEFAULT_ESR_BROAD_LT_FLT_UPCT 30000
+#define DEFAULT_ESR_FLT_RT_DECIDEGC 60
+#define DEFAULT_ESR_TIGHT_RT_FLT_UPCT 5860
+#define DEFAULT_ESR_BROAD_RT_FLT_UPCT 156250
#define DEFAULT_ESR_CLAMP_MOHMS 20
#define DEFAULT_ESR_PULSE_THRESH_MA 110
#define DEFAULT_ESR_MEAS_CURR_MA 120
@@ -5016,6 +5149,27 @@ static int fg_parse_dt(struct fg_chip *chip)
else
chip->dt.esr_broad_lt_flt_upct = temp;
+ rc = of_property_read_u32(node, "qcom,fg-esr-rt-filter-switch-temp",
+ &temp);
+ if (rc < 0)
+ chip->dt.esr_flt_rt_switch_temp = DEFAULT_ESR_FLT_RT_DECIDEGC;
+ else
+ chip->dt.esr_flt_rt_switch_temp = temp;
+
+ rc = of_property_read_u32(node, "qcom,fg-esr-tight-rt-filter-micro-pct",
+ &temp);
+ if (rc < 0)
+ chip->dt.esr_tight_rt_flt_upct = DEFAULT_ESR_TIGHT_RT_FLT_UPCT;
+ else
+ chip->dt.esr_tight_rt_flt_upct = temp;
+
+ rc = of_property_read_u32(node, "qcom,fg-esr-broad-rt-filter-micro-pct",
+ &temp);
+ if (rc < 0)
+ chip->dt.esr_broad_rt_flt_upct = DEFAULT_ESR_BROAD_RT_FLT_UPCT;
+ else
+ chip->dt.esr_broad_rt_flt_upct = temp;
+
rc = fg_parse_slope_limit_coefficients(chip);
if (rc < 0)
pr_err("Error in parsing slope limit coeffs, rc=%d\n", rc);
@@ -5047,6 +5201,7 @@ static int fg_parse_dt(struct fg_chip *chip)
static void fg_cleanup(struct fg_chip *chip)
{
+ alarm_try_to_cancel(&chip->esr_filter_alarm);
power_supply_unreg_notifier(&chip->nb);
debugfs_remove_recursive(chip->dfs_root);
if (chip->awake_votable)
@@ -5159,6 +5314,9 @@ static int fg_gen3_probe(struct platform_device *pdev)
INIT_WORK(&chip->status_change_work, status_change_work);
INIT_DELAYED_WORK(&chip->ttf_work, ttf_work);
INIT_DELAYED_WORK(&chip->sram_dump_work, sram_dump_work);
+ INIT_WORK(&chip->esr_filter_work, esr_filter_work);
+ alarm_init(&chip->esr_filter_alarm, ALARM_BOOTTIME,
+ fg_esr_filter_alarm_cb);
rc = fg_memif_init(chip);
if (rc < 0) {
@@ -5230,7 +5388,7 @@ static int fg_gen3_probe(struct platform_device *pdev)
if (!rc) {
pr_info("battery SOC:%d voltage: %duV temp: %d id: %dKOhms\n",
msoc, volt_uv, batt_temp, chip->batt_id_ohms / 1000);
- rc = fg_esr_filter_config(chip, batt_temp);
+ rc = fg_esr_filter_config(chip, batt_temp, false);
if (rc < 0)
pr_err("Error in configuring ESR filter rc:%d\n", rc);
}
diff --git a/drivers/power/supply/qcom/smb-lib.c b/drivers/power/supply/qcom/smb-lib.c
index 7acf5fab573b..6d3316b934de 100644
--- a/drivers/power/supply/qcom/smb-lib.c
+++ b/drivers/power/supply/qcom/smb-lib.c
@@ -669,6 +669,7 @@ static void smblib_uusb_removal(struct smb_charger *chg)
vote(chg->pl_enable_votable_indirect, USBIN_I_VOTER, false, 0);
vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER, false, 0);
vote(chg->usb_icl_votable, SW_QC3_VOTER, false, 0);
+ vote(chg->usb_icl_votable, USBIN_USBIN_BOOST_VOTER, false, 0);
cancel_delayed_work_sync(&chg->hvdcp_detect_work);
@@ -1451,6 +1452,8 @@ int smblib_vbus_regulator_enable(struct regulator_dev *rdev)
rc = _smblib_vbus_regulator_enable(rdev);
if (rc >= 0)
chg->otg_en = true;
+ else
+ vote(chg->usb_icl_votable, USBIN_USBIN_BOOST_VOTER, false, 0);
unlock:
mutex_unlock(&chg->otg_oc_lock);
@@ -3980,6 +3983,7 @@ static void smblib_handle_typec_removal(struct smb_charger *chg)
vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER, false, 0);
vote(chg->awake_votable, PL_DELAY_VOTER, false, 0);
+ vote(chg->usb_icl_votable, USBIN_USBIN_BOOST_VOTER, false, 0);
chg->vconn_attempts = 0;
chg->otg_attempts = 0;
chg->pulse_cnt = 0;
diff --git a/drivers/power/supply/qcom/smb1351-charger.c b/drivers/power/supply/qcom/smb1351-charger.c
index 2f8833539cb4..ce41ad97bda1 100644
--- a/drivers/power/supply/qcom/smb1351-charger.c
+++ b/drivers/power/supply/qcom/smb1351-charger.c
@@ -1417,6 +1417,7 @@ static enum power_supply_property smb1351_parallel_properties[] = {
POWER_SUPPLY_PROP_CHARGE_TYPE,
POWER_SUPPLY_PROP_PARALLEL_MODE,
POWER_SUPPLY_PROP_INPUT_SUSPEND,
+ POWER_SUPPLY_PROP_MODEL_NAME,
};
static int smb1351_parallel_set_chg_suspend(struct smb1351_charger *chip,
@@ -1711,6 +1712,9 @@ static int smb1351_parallel_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_INPUT_SUSPEND:
val->intval = chip->parallel_charger_suspended;
break;
+ case POWER_SUPPLY_PROP_MODEL_NAME:
+ val->strval = "smb1351";
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/pwm/pwm-qpnp.c b/drivers/pwm/pwm-qpnp.c
index 5e808150a3dd..8edb8a61795a 100644
--- a/drivers/pwm/pwm-qpnp.c
+++ b/drivers/pwm/pwm-qpnp.c
@@ -1324,6 +1324,15 @@ static int _pwm_enable(struct qpnp_pwm_chip *chip)
chip->qpnp_lpg_registers[QPNP_ENABLE_CONTROL]) ||
chip->flags & QPNP_PWM_LUT_NOT_SUPPORTED) {
rc = qpnp_lpg_configure_pwm_state(chip, QPNP_PWM_ENABLE);
+ if (rc) {
+ pr_err("Failed to enable PWM mode, rc=%d\n", rc);
+ return rc;
+ }
+ rc = qpnp_lpg_glitch_removal(chip, true);
+ if (rc) {
+ pr_err("Failed to enable glitch removal, rc=%d\n", rc);
+ return rc;
+ }
} else if (!(chip->flags & QPNP_PWM_LUT_NOT_SUPPORTED)) {
rc = qpnp_lpg_configure_lut_state(chip, QPNP_LUT_ENABLE);
}
diff --git a/drivers/regulator/fan53555.c b/drivers/regulator/fan53555.c
index 28a2619b965d..0781c7154de6 100644
--- a/drivers/regulator/fan53555.c
+++ b/drivers/regulator/fan53555.c
@@ -449,7 +449,10 @@ static const struct i2c_device_id fan53555_id[] = {
.name = "fan53555",
.driver_data = FAN53555_VENDOR_FAIRCHILD
}, {
- .name = "syr82x",
+ .name = "syr827",
+ .driver_data = FAN53555_VENDOR_SILERGY
+ }, {
+ .name = "syr828",
.driver_data = FAN53555_VENDOR_SILERGY
}, {
.name = "hl7509",
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 84c13dffa3a8..e7a6f1222642 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1635,8 +1635,11 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
/* check for for attention message */
if (scsw_dstat(&irb->scsw) & DEV_STAT_ATTENTION) {
device = dasd_device_from_cdev_locked(cdev);
- device->discipline->check_attention(device, irb->esw.esw1.lpum);
- dasd_put_device(device);
+ if (!IS_ERR(device)) {
+ device->discipline->check_attention(device,
+ irb->esw.esw1.lpum);
+ dasd_put_device(device);
+ }
}
if (!cqr)
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 741f3ee81cfe..5006cb6ce62d 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -909,7 +909,6 @@ void qeth_clear_thread_running_bit(struct qeth_card *, unsigned long);
int qeth_core_hardsetup_card(struct qeth_card *);
void qeth_print_status_message(struct qeth_card *);
int qeth_init_qdio_queues(struct qeth_card *);
-int qeth_send_startlan(struct qeth_card *);
int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
int (*reply_cb)
(struct qeth_card *, struct qeth_reply *, unsigned long),
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index d10bf3da8e5f..e5b9506698b1 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -2955,7 +2955,7 @@ int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
}
EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd);
-int qeth_send_startlan(struct qeth_card *card)
+static int qeth_send_startlan(struct qeth_card *card)
{
int rc;
struct qeth_cmd_buffer *iob;
@@ -2968,7 +2968,6 @@ int qeth_send_startlan(struct qeth_card *card)
rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
return rc;
}
-EXPORT_SYMBOL_GPL(qeth_send_startlan);
static int qeth_default_setadapterparms_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
@@ -5080,6 +5079,20 @@ retriable:
goto out;
}
+ rc = qeth_send_startlan(card);
+ if (rc) {
+ QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
+ if (rc == IPA_RC_LAN_OFFLINE) {
+ dev_warn(&card->gdev->dev,
+ "The LAN is offline\n");
+ card->lan_online = 0;
+ } else {
+ rc = -ENODEV;
+ goto out;
+ }
+ } else
+ card->lan_online = 1;
+
card->options.ipa4.supported_funcs = 0;
card->options.ipa6.supported_funcs = 0;
card->options.adp.supported_funcs = 0;
@@ -5091,14 +5104,14 @@ retriable:
if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
rc = qeth_query_setadapterparms(card);
if (rc < 0) {
- QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
+ QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc);
goto out;
}
}
if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
rc = qeth_query_setdiagass(card);
if (rc < 0) {
- QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc);
+ QETH_DBF_TEXT_(SETUP, 2, "8err%d", rc);
goto out;
}
}
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index bf1e0e39334d..58bcb3c9a86a 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -1203,21 +1203,6 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
/* softsetup */
QETH_DBF_TEXT(SETUP, 2, "softsetp");
- rc = qeth_send_startlan(card);
- if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
- if (rc == 0xe080) {
- dev_warn(&card->gdev->dev,
- "The LAN is offline\n");
- card->lan_online = 0;
- goto contin;
- }
- rc = -ENODEV;
- goto out_remove;
- } else
- card->lan_online = 1;
-
-contin:
if ((card->info.type == QETH_CARD_TYPE_OSD) ||
(card->info.type == QETH_CARD_TYPE_OSX)) {
if (qeth_l2_start_ipassists(card))
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 285fe0b2c753..bf3c1b2301db 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -3298,21 +3298,6 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
/* softsetup */
QETH_DBF_TEXT(SETUP, 2, "softsetp");
- rc = qeth_send_startlan(card);
- if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
- if (rc == 0xe080) {
- dev_warn(&card->gdev->dev,
- "The LAN is offline\n");
- card->lan_online = 0;
- goto contin;
- }
- rc = -ENODEV;
- goto out_remove;
- } else
- card->lan_online = 1;
-
-contin:
rc = qeth_l3_setadapter_parms(card);
if (rc)
QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index c00ac4650dce..38c8e308d4c8 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -358,6 +358,8 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
adapter->next_port_scan = jiffies;
+ adapter->erp_action.adapter = adapter;
+
if (zfcp_qdio_setup(adapter))
goto failed;
@@ -514,6 +516,9 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
port->dev.groups = zfcp_port_attr_groups;
port->dev.release = zfcp_port_release;
+ port->erp_action.adapter = adapter;
+ port->erp_action.port = port;
+
if (dev_set_name(&port->dev, "0x%016llx", (unsigned long long)wwpn)) {
kfree(port);
goto err_out;
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 7ccfce559034..3b23d6754598 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -193,9 +193,8 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE,
&zfcp_sdev->status);
erp_action = &zfcp_sdev->erp_action;
- memset(erp_action, 0, sizeof(struct zfcp_erp_action));
- erp_action->port = port;
- erp_action->sdev = sdev;
+ WARN_ON_ONCE(erp_action->port != port);
+ WARN_ON_ONCE(erp_action->sdev != sdev);
if (!(atomic_read(&zfcp_sdev->status) &
ZFCP_STATUS_COMMON_RUNNING))
act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
@@ -208,8 +207,8 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
zfcp_erp_action_dismiss_port(port);
atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status);
erp_action = &port->erp_action;
- memset(erp_action, 0, sizeof(struct zfcp_erp_action));
- erp_action->port = port;
+ WARN_ON_ONCE(erp_action->port != port);
+ WARN_ON_ONCE(erp_action->sdev != NULL);
if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_RUNNING))
act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
break;
@@ -219,7 +218,8 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
zfcp_erp_action_dismiss_adapter(adapter);
atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status);
erp_action = &adapter->erp_action;
- memset(erp_action, 0, sizeof(struct zfcp_erp_action));
+ WARN_ON_ONCE(erp_action->port != NULL);
+ WARN_ON_ONCE(erp_action->sdev != NULL);
if (!(atomic_read(&adapter->status) &
ZFCP_STATUS_COMMON_RUNNING))
act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
@@ -229,7 +229,11 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
return NULL;
}
- erp_action->adapter = adapter;
+ WARN_ON_ONCE(erp_action->adapter != adapter);
+ memset(&erp_action->list, 0, sizeof(erp_action->list));
+ memset(&erp_action->timer, 0, sizeof(erp_action->timer));
+ erp_action->step = ZFCP_ERP_STEP_UNINITIALIZED;
+ erp_action->fsf_req_id = 0;
erp_action->action = need;
erp_action->status = act_status;
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 9bd9b9a29dfc..a9b8104b982e 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -115,10 +115,15 @@ static int zfcp_scsi_slave_alloc(struct scsi_device *sdev)
struct zfcp_unit *unit;
int npiv = adapter->connection_features & FSF_FEATURE_NPIV_MODE;
+ zfcp_sdev->erp_action.adapter = adapter;
+ zfcp_sdev->erp_action.sdev = sdev;
+
port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
if (!port)
return -ENXIO;
+ zfcp_sdev->erp_action.port = port;
+
unit = zfcp_unit_find(port, zfcp_scsi_dev_lun(sdev));
if (unit)
put_device(&unit->dev);
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index e4c243748a97..de33801ca31e 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -2977,16 +2977,11 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
return;
BUG_ON(fibptr == NULL);
- dev = fibptr->dev;
-
- scsi_dma_unmap(scsicmd);
- /* expose physical device if expose_physicald flag is on */
- if (scsicmd->cmnd[0] == INQUIRY && !(scsicmd->cmnd[1] & 0x01)
- && expose_physicals > 0)
- aac_expose_phy_device(scsicmd);
+ dev = fibptr->dev;
srbreply = (struct aac_srb_reply *) fib_data(fibptr);
+
scsicmd->sense_buffer[0] = '\0'; /* Initialize sense valid flag to false */
if (fibptr->flags & FIB_CONTEXT_FLAG_FASTRESP) {
@@ -2999,158 +2994,176 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
*/
scsi_set_resid(scsicmd, scsi_bufflen(scsicmd)
- le32_to_cpu(srbreply->data_xfer_length));
- /*
- * First check the fib status
- */
+ }
- if (le32_to_cpu(srbreply->status) != ST_OK) {
- int len;
- printk(KERN_WARNING "aac_srb_callback: srb failed, status = %d\n", le32_to_cpu(srbreply->status));
- len = min_t(u32, le32_to_cpu(srbreply->sense_data_size),
- SCSI_SENSE_BUFFERSIZE);
- scsicmd->result = DID_ERROR << 16
- | COMMAND_COMPLETE << 8
- | SAM_STAT_CHECK_CONDITION;
- memcpy(scsicmd->sense_buffer,
- srbreply->sense_data, len);
- }
+ scsi_dma_unmap(scsicmd);
- /*
- * Next check the srb status
- */
- switch ((le32_to_cpu(srbreply->srb_status))&0x3f) {
- case SRB_STATUS_ERROR_RECOVERY:
- case SRB_STATUS_PENDING:
- case SRB_STATUS_SUCCESS:
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
- break;
- case SRB_STATUS_DATA_OVERRUN:
- switch (scsicmd->cmnd[0]) {
- case READ_6:
- case WRITE_6:
- case READ_10:
- case WRITE_10:
- case READ_12:
- case WRITE_12:
- case READ_16:
- case WRITE_16:
- if (le32_to_cpu(srbreply->data_xfer_length)
- < scsicmd->underflow)
- printk(KERN_WARNING"aacraid: SCSI CMD underflow\n");
- else
- printk(KERN_WARNING"aacraid: SCSI CMD Data Overrun\n");
- scsicmd->result = DID_ERROR << 16
- | COMMAND_COMPLETE << 8;
- break;
- case INQUIRY: {
- scsicmd->result = DID_OK << 16
- | COMMAND_COMPLETE << 8;
- break;
- }
- default:
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
- break;
- }
- break;
- case SRB_STATUS_ABORTED:
- scsicmd->result = DID_ABORT << 16 | ABORT << 8;
- break;
- case SRB_STATUS_ABORT_FAILED:
- /*
- * Not sure about this one - but assuming the
- * hba was trying to abort for some reason
- */
- scsicmd->result = DID_ERROR << 16 | ABORT << 8;
- break;
- case SRB_STATUS_PARITY_ERROR:
- scsicmd->result = DID_PARITY << 16
- | MSG_PARITY_ERROR << 8;
- break;
- case SRB_STATUS_NO_DEVICE:
- case SRB_STATUS_INVALID_PATH_ID:
- case SRB_STATUS_INVALID_TARGET_ID:
- case SRB_STATUS_INVALID_LUN:
- case SRB_STATUS_SELECTION_TIMEOUT:
- scsicmd->result = DID_NO_CONNECT << 16
- | COMMAND_COMPLETE << 8;
- break;
+ /* expose physical device if expose_physicald flag is on */
+ if (scsicmd->cmnd[0] == INQUIRY && !(scsicmd->cmnd[1] & 0x01)
+ && expose_physicals > 0)
+ aac_expose_phy_device(scsicmd);
- case SRB_STATUS_COMMAND_TIMEOUT:
- case SRB_STATUS_TIMEOUT:
- scsicmd->result = DID_TIME_OUT << 16
- | COMMAND_COMPLETE << 8;
- break;
+ /*
+ * First check the fib status
+ */
- case SRB_STATUS_BUSY:
- scsicmd->result = DID_BUS_BUSY << 16
- | COMMAND_COMPLETE << 8;
- break;
+ if (le32_to_cpu(srbreply->status) != ST_OK) {
+ int len;
- case SRB_STATUS_BUS_RESET:
- scsicmd->result = DID_RESET << 16
- | COMMAND_COMPLETE << 8;
- break;
+ pr_warn("aac_srb_callback: srb failed, status = %d\n",
+ le32_to_cpu(srbreply->status));
+ len = min_t(u32, le32_to_cpu(srbreply->sense_data_size),
+ SCSI_SENSE_BUFFERSIZE);
+ scsicmd->result = DID_ERROR << 16
+ | COMMAND_COMPLETE << 8
+ | SAM_STAT_CHECK_CONDITION;
+ memcpy(scsicmd->sense_buffer,
+ srbreply->sense_data, len);
+ }
- case SRB_STATUS_MESSAGE_REJECTED:
+ /*
+ * Next check the srb status
+ */
+ switch ((le32_to_cpu(srbreply->srb_status))&0x3f) {
+ case SRB_STATUS_ERROR_RECOVERY:
+ case SRB_STATUS_PENDING:
+ case SRB_STATUS_SUCCESS:
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
+ break;
+ case SRB_STATUS_DATA_OVERRUN:
+ switch (scsicmd->cmnd[0]) {
+ case READ_6:
+ case WRITE_6:
+ case READ_10:
+ case WRITE_10:
+ case READ_12:
+ case WRITE_12:
+ case READ_16:
+ case WRITE_16:
+ if (le32_to_cpu(srbreply->data_xfer_length)
+ < scsicmd->underflow)
+ pr_warn("aacraid: SCSI CMD underflow\n");
+ else
+ pr_warn("aacraid: SCSI CMD Data Overrun\n");
scsicmd->result = DID_ERROR << 16
- | MESSAGE_REJECT << 8;
+ | COMMAND_COMPLETE << 8;
+ break;
+ case INQUIRY:
+ scsicmd->result = DID_OK << 16
+ | COMMAND_COMPLETE << 8;
break;
- case SRB_STATUS_REQUEST_FLUSHED:
- case SRB_STATUS_ERROR:
- case SRB_STATUS_INVALID_REQUEST:
- case SRB_STATUS_REQUEST_SENSE_FAILED:
- case SRB_STATUS_NO_HBA:
- case SRB_STATUS_UNEXPECTED_BUS_FREE:
- case SRB_STATUS_PHASE_SEQUENCE_FAILURE:
- case SRB_STATUS_BAD_SRB_BLOCK_LENGTH:
- case SRB_STATUS_DELAYED_RETRY:
- case SRB_STATUS_BAD_FUNCTION:
- case SRB_STATUS_NOT_STARTED:
- case SRB_STATUS_NOT_IN_USE:
- case SRB_STATUS_FORCE_ABORT:
- case SRB_STATUS_DOMAIN_VALIDATION_FAIL:
default:
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
+ break;
+ }
+ break;
+ case SRB_STATUS_ABORTED:
+ scsicmd->result = DID_ABORT << 16 | ABORT << 8;
+ break;
+ case SRB_STATUS_ABORT_FAILED:
+ /*
+ * Not sure about this one - but assuming the
+ * hba was trying to abort for some reason
+ */
+ scsicmd->result = DID_ERROR << 16 | ABORT << 8;
+ break;
+ case SRB_STATUS_PARITY_ERROR:
+ scsicmd->result = DID_PARITY << 16
+ | MSG_PARITY_ERROR << 8;
+ break;
+ case SRB_STATUS_NO_DEVICE:
+ case SRB_STATUS_INVALID_PATH_ID:
+ case SRB_STATUS_INVALID_TARGET_ID:
+ case SRB_STATUS_INVALID_LUN:
+ case SRB_STATUS_SELECTION_TIMEOUT:
+ scsicmd->result = DID_NO_CONNECT << 16
+ | COMMAND_COMPLETE << 8;
+ break;
+
+ case SRB_STATUS_COMMAND_TIMEOUT:
+ case SRB_STATUS_TIMEOUT:
+ scsicmd->result = DID_TIME_OUT << 16
+ | COMMAND_COMPLETE << 8;
+ break;
+
+ case SRB_STATUS_BUSY:
+ scsicmd->result = DID_BUS_BUSY << 16
+ | COMMAND_COMPLETE << 8;
+ break;
+
+ case SRB_STATUS_BUS_RESET:
+ scsicmd->result = DID_RESET << 16
+ | COMMAND_COMPLETE << 8;
+ break;
+
+ case SRB_STATUS_MESSAGE_REJECTED:
+ scsicmd->result = DID_ERROR << 16
+ | MESSAGE_REJECT << 8;
+ break;
+ case SRB_STATUS_REQUEST_FLUSHED:
+ case SRB_STATUS_ERROR:
+ case SRB_STATUS_INVALID_REQUEST:
+ case SRB_STATUS_REQUEST_SENSE_FAILED:
+ case SRB_STATUS_NO_HBA:
+ case SRB_STATUS_UNEXPECTED_BUS_FREE:
+ case SRB_STATUS_PHASE_SEQUENCE_FAILURE:
+ case SRB_STATUS_BAD_SRB_BLOCK_LENGTH:
+ case SRB_STATUS_DELAYED_RETRY:
+ case SRB_STATUS_BAD_FUNCTION:
+ case SRB_STATUS_NOT_STARTED:
+ case SRB_STATUS_NOT_IN_USE:
+ case SRB_STATUS_FORCE_ABORT:
+ case SRB_STATUS_DOMAIN_VALIDATION_FAIL:
+ default:
#ifdef AAC_DETAILED_STATUS_INFO
- printk(KERN_INFO "aacraid: SRB ERROR(%u) %s scsi cmd 0x%x - scsi status 0x%x\n",
- le32_to_cpu(srbreply->srb_status) & 0x3F,
- aac_get_status_string(
- le32_to_cpu(srbreply->srb_status) & 0x3F),
- scsicmd->cmnd[0],
- le32_to_cpu(srbreply->scsi_status));
+ pr_info("aacraid: SRB ERROR(%u) %s scsi cmd 0x%x -scsi status 0x%x\n",
+ le32_to_cpu(srbreply->srb_status) & 0x3F,
+ aac_get_status_string(
+ le32_to_cpu(srbreply->srb_status) & 0x3F),
+ scsicmd->cmnd[0],
+ le32_to_cpu(srbreply->scsi_status));
#endif
- if ((scsicmd->cmnd[0] == ATA_12)
- || (scsicmd->cmnd[0] == ATA_16)) {
- if (scsicmd->cmnd[2] & (0x01 << 5)) {
- scsicmd->result = DID_OK << 16
- | COMMAND_COMPLETE << 8;
- break;
- } else {
- scsicmd->result = DID_ERROR << 16
- | COMMAND_COMPLETE << 8;
- break;
- }
+ /*
+ * When the CC bit is SET by the host in ATA pass thru CDB,
+ * driver is supposed to return DID_OK
+ *
+ * When the CC bit is RESET by the host, driver should
+ * return DID_ERROR
+ */
+ if ((scsicmd->cmnd[0] == ATA_12)
+ || (scsicmd->cmnd[0] == ATA_16)) {
+
+ if (scsicmd->cmnd[2] & (0x01 << 5)) {
+ scsicmd->result = DID_OK << 16
+ | COMMAND_COMPLETE << 8;
+ break;
} else {
scsicmd->result = DID_ERROR << 16
| COMMAND_COMPLETE << 8;
- break;
+ break;
}
+ } else {
+ scsicmd->result = DID_ERROR << 16
+ | COMMAND_COMPLETE << 8;
+ break;
}
- if (le32_to_cpu(srbreply->scsi_status)
- == SAM_STAT_CHECK_CONDITION) {
- int len;
+ }
+ if (le32_to_cpu(srbreply->scsi_status)
+ == SAM_STAT_CHECK_CONDITION) {
+ int len;
- scsicmd->result |= SAM_STAT_CHECK_CONDITION;
- len = min_t(u32, le32_to_cpu(srbreply->sense_data_size),
- SCSI_SENSE_BUFFERSIZE);
+ scsicmd->result |= SAM_STAT_CHECK_CONDITION;
+ len = min_t(u32, le32_to_cpu(srbreply->sense_data_size),
+ SCSI_SENSE_BUFFERSIZE);
#ifdef AAC_DETAILED_STATUS_INFO
- printk(KERN_WARNING "aac_srb_callback: check condition, status = %d len=%d\n",
- le32_to_cpu(srbreply->status), len);
+ pr_warn("aac_srb_callback: check condition, status = %d len=%d\n",
+ le32_to_cpu(srbreply->status), len);
#endif
- memcpy(scsicmd->sense_buffer,
- srbreply->sense_data, len);
- }
+ memcpy(scsicmd->sense_buffer,
+ srbreply->sense_data, len);
}
+
/*
* OR in the scsi status (already shifted up a bit)
*/
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index f6446d759d7f..4639dac64e7f 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -5148,6 +5148,19 @@ lpfc_free_sysfs_attr(struct lpfc_vport *vport)
*/
/**
+ * lpfc_get_host_symbolic_name - Copy symbolic name into the scsi host
+ * @shost: kernel scsi host pointer.
+ **/
+static void
+lpfc_get_host_symbolic_name(struct Scsi_Host *shost)
+{
+ struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
+
+ lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
+ sizeof fc_host_symbolic_name(shost));
+}
+
+/**
* lpfc_get_host_port_id - Copy the vport DID into the scsi host port id
* @shost: kernel scsi host pointer.
**/
@@ -5684,6 +5697,8 @@ struct fc_function_template lpfc_transport_functions = {
.show_host_supported_fc4s = 1,
.show_host_supported_speeds = 1,
.show_host_maxframe_size = 1,
+
+ .get_host_symbolic_name = lpfc_get_host_symbolic_name,
.show_host_symbolic_name = 1,
/* dynamic attributes the driver supports */
@@ -5751,6 +5766,8 @@ struct fc_function_template lpfc_vport_transport_functions = {
.show_host_supported_fc4s = 1,
.show_host_supported_speeds = 1,
.show_host_maxframe_size = 1,
+
+ .get_host_symbolic_name = lpfc_get_host_symbolic_name,
.show_host_symbolic_name = 1,
/* dynamic attributes the driver supports */
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index c74f74ab981c..d278362448ca 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1982,6 +1982,9 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
if (sp->cmn.fcphHigh < FC_PH3)
sp->cmn.fcphHigh = FC_PH3;
+ sp->cmn.valid_vendor_ver_level = 0;
+ memset(sp->vendorVersion, 0, sizeof(sp->vendorVersion));
+
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"Issue PLOGI: did:x%x",
did, 0, 0);
@@ -3966,6 +3969,9 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
} else {
memcpy(pcmd, &vport->fc_sparam,
sizeof(struct serv_parm));
+
+ sp->cmn.valid_vendor_ver_level = 0;
+ memset(sp->vendorVersion, 0, sizeof(sp->vendorVersion));
}
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 2cce88e967ce..a8ad97300177 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -360,6 +360,12 @@ struct csp {
* Word 1 Bit 30 in PLOGI request is random offset
*/
#define virtual_fabric_support randomOffset /* Word 1, bit 30 */
+/*
+ * Word 1 Bit 29 in common service parameter is overloaded.
+ * Word 1 Bit 29 in FLOGI response is multiple NPort assignment
+ * Word 1 Bit 29 in FLOGI/PLOGI request is Valid Vendor Version Level
+ */
+#define valid_vendor_ver_level response_multiple_NPort /* Word 1, bit 29 */
#ifdef __BIG_ENDIAN_BITFIELD
uint16_t request_multiple_Nport:1; /* FC Word 1, bit 31 */
uint16_t randomOffset:1; /* FC Word 1, bit 30 */
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 38e90d9c2ced..8379fbbc60db 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -118,6 +118,8 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
+ /* ensure WQE bcopy flushed before doorbell write */
+ wmb();
/* Update the host index before invoking device */
host_index = q->host_index;
@@ -9805,6 +9807,7 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
iabt->ulpCommand = CMD_CLOSE_XRI_CN;
abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
+ abtsiocbp->vport = vport;
lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
"0339 Abort xri x%x, original iotag x%x, "
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 769012663a8f..861c57bc4520 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -528,6 +528,12 @@ enable_vport(struct fc_vport *fc_vport)
spin_lock_irq(shost->host_lock);
vport->load_flag |= FC_LOADING;
+ if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI) {
+ spin_unlock_irq(shost->host_lock);
+ lpfc_issue_init_vpi(vport);
+ goto out;
+ }
+
vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
spin_unlock_irq(shost->host_lock);
@@ -548,6 +554,8 @@ enable_vport(struct fc_vport *fc_vport)
} else {
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
}
+
+out:
lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
"1827 Vport Enabled.\n");
return VPORT_OK;
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index bb6518159d12..55189ce57411 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -848,7 +848,7 @@ sg_fill_request_table(Sg_fd *sfp, sg_req_info_t *rinfo)
val = 0;
list_for_each_entry(srp, &sfp->rq_list, entry) {
- if (val > SG_MAX_QUEUE)
+ if (val >= SG_MAX_QUEUE)
break;
rinfo[val].req_state = srp->done + 1;
rinfo[val].problem =
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index f429547aef7b..348678218e7f 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -2838,6 +2838,7 @@ static const struct of_device_id ufs_qcom_of_match[] = {
{ .compatible = "qcom,ufshc"},
{},
};
+MODULE_DEVICE_TABLE(of, ufs_qcom_of_match);
static const struct dev_pm_ops ufs_qcom_pm_ops = {
.suspend = ufshcd_pltfrm_suspend,
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 7fd8ffe17707..8ee607606866 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -5980,7 +5980,7 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
out:
ufshcd_scsi_unblock_requests(hba);
- pm_runtime_put_sync(hba->dev);
+ pm_runtime_put(hba->dev);
return;
}
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 5a7cf839b4fd..2ef26f880d47 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -1074,7 +1074,6 @@ static inline void *ufshcd_get_variant(struct ufs_hba *hba)
BUG_ON(!hba);
return hba->priv;
}
-
extern int ufshcd_runtime_suspend(struct ufs_hba *hba);
extern int ufshcd_runtime_resume(struct ufs_hba *hba);
extern int ufshcd_runtime_idle(struct ufs_hba *hba);
diff --git a/drivers/soc/qcom/glink.c b/drivers/soc/qcom/glink.c
index b8464fdfd310..f21e9c4c4f4e 100644
--- a/drivers/soc/qcom/glink.c
+++ b/drivers/soc/qcom/glink.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -2991,7 +2991,7 @@ static int glink_tx_common(void *handle, void *pkt_priv,
if (!wait_for_completion_timeout(
&ctx->int_req_ack_complete,
ctx->rx_intent_req_timeout_jiffies)) {
- GLINK_ERR_CH(ctx,
+ GLINK_ERR(
"%s: Intent request ack with size: %zu not granted for lcid\n",
__func__, size);
ret = -ETIMEDOUT;
@@ -3011,7 +3011,7 @@ static int glink_tx_common(void *handle, void *pkt_priv,
if (!wait_for_completion_timeout(
&ctx->int_req_complete,
ctx->rx_intent_req_timeout_jiffies)) {
- GLINK_ERR_CH(ctx,
+ GLINK_ERR(
"%s: Intent request with size: %zu not granted for lcid\n",
__func__, size);
ret = -ETIMEDOUT;
diff --git a/drivers/soc/qcom/glink_smem_native_xprt.c b/drivers/soc/qcom/glink_smem_native_xprt.c
index f2c273b0f4e0..a678e03235c0 100644
--- a/drivers/soc/qcom/glink_smem_native_xprt.c
+++ b/drivers/soc/qcom/glink_smem_native_xprt.c
@@ -1982,6 +1982,7 @@ static int tx_data(struct glink_transport_if *if_ptr, uint16_t cmd_id,
/* Need enough space to write the command and some data */
if (size <= sizeof(cmd)) {
einfo->tx_resume_needed = true;
+ send_tx_blocked_signal(einfo);
spin_unlock_irqrestore(&einfo->write_lock, flags);
srcu_read_unlock(&einfo->use_ref, rcu_id);
return -EAGAIN;
diff --git a/drivers/soc/qcom/hab/Makefile b/drivers/soc/qcom/hab/Makefile
index 83fc54d42202..77825be16fc4 100644
--- a/drivers/soc/qcom/hab/Makefile
+++ b/drivers/soc/qcom/hab/Makefile
@@ -9,6 +9,7 @@ msm_hab-objs = \
hab_mem_linux.o \
hab_pipe.o \
qvm_comm.o \
- hab_qvm.o
+ hab_qvm.o \
+ hab_parser.o
obj-$(CONFIG_MSM_HAB) += msm_hab.o
diff --git a/drivers/soc/qcom/hab/hab.c b/drivers/soc/qcom/hab/hab.c
index c6df36f5c0a2..040730d63a83 100644
--- a/drivers/soc/qcom/hab/hab.c
+++ b/drivers/soc/qcom/hab/hab.c
@@ -21,25 +21,32 @@
.openlock = __SPIN_LOCK_UNLOCKED(&hab_devices[__num__].openlock)\
}
-/* the following has to match habmm definitions, order does not matter */
+/*
+ * The following has to match habmm definitions, order does not matter if
+ * hab config does not care either. When hab config is not present, the default
+ * is as guest VM all pchans are pchan opener (FE)
+ */
static struct hab_device hab_devices[] = {
HAB_DEVICE_CNSTR(DEVICE_AUD1_NAME, MM_AUD_1, 0),
HAB_DEVICE_CNSTR(DEVICE_AUD2_NAME, MM_AUD_2, 1),
HAB_DEVICE_CNSTR(DEVICE_AUD3_NAME, MM_AUD_3, 2),
HAB_DEVICE_CNSTR(DEVICE_AUD4_NAME, MM_AUD_4, 3),
- HAB_DEVICE_CNSTR(DEVICE_CAM_NAME, MM_CAM, 4),
- HAB_DEVICE_CNSTR(DEVICE_DISP1_NAME, MM_DISP_1, 5),
- HAB_DEVICE_CNSTR(DEVICE_DISP2_NAME, MM_DISP_2, 6),
- HAB_DEVICE_CNSTR(DEVICE_DISP3_NAME, MM_DISP_3, 7),
- HAB_DEVICE_CNSTR(DEVICE_DISP4_NAME, MM_DISP_4, 8),
- HAB_DEVICE_CNSTR(DEVICE_DISP5_NAME, MM_DISP_5, 9),
- HAB_DEVICE_CNSTR(DEVICE_GFX_NAME, MM_GFX, 10),
- HAB_DEVICE_CNSTR(DEVICE_VID_NAME, MM_VID, 11),
- HAB_DEVICE_CNSTR(DEVICE_MISC_NAME, MM_MISC, 12),
- HAB_DEVICE_CNSTR(DEVICE_QCPE1_NAME, MM_QCPE_VM1, 13),
- HAB_DEVICE_CNSTR(DEVICE_QCPE2_NAME, MM_QCPE_VM2, 14),
- HAB_DEVICE_CNSTR(DEVICE_QCPE3_NAME, MM_QCPE_VM3, 15),
- HAB_DEVICE_CNSTR(DEVICE_QCPE4_NAME, MM_QCPE_VM4, 16)
+ HAB_DEVICE_CNSTR(DEVICE_CAM1_NAME, MM_CAM_1, 4),
+ HAB_DEVICE_CNSTR(DEVICE_CAM2_NAME, MM_CAM_2, 5),
+ HAB_DEVICE_CNSTR(DEVICE_DISP1_NAME, MM_DISP_1, 6),
+ HAB_DEVICE_CNSTR(DEVICE_DISP2_NAME, MM_DISP_2, 7),
+ HAB_DEVICE_CNSTR(DEVICE_DISP3_NAME, MM_DISP_3, 8),
+ HAB_DEVICE_CNSTR(DEVICE_DISP4_NAME, MM_DISP_4, 9),
+ HAB_DEVICE_CNSTR(DEVICE_DISP5_NAME, MM_DISP_5, 10),
+ HAB_DEVICE_CNSTR(DEVICE_GFX_NAME, MM_GFX, 11),
+ HAB_DEVICE_CNSTR(DEVICE_VID_NAME, MM_VID, 12),
+ HAB_DEVICE_CNSTR(DEVICE_MISC_NAME, MM_MISC, 13),
+ HAB_DEVICE_CNSTR(DEVICE_QCPE1_NAME, MM_QCPE_VM1, 14),
+ HAB_DEVICE_CNSTR(DEVICE_QCPE2_NAME, MM_QCPE_VM2, 15),
+ HAB_DEVICE_CNSTR(DEVICE_QCPE3_NAME, MM_QCPE_VM3, 16),
+ HAB_DEVICE_CNSTR(DEVICE_QCPE4_NAME, MM_QCPE_VM4, 17),
+ HAB_DEVICE_CNSTR(DEVICE_CLK1_NAME, MM_CLK_VM1, 18),
+ HAB_DEVICE_CNSTR(DEVICE_CLK2_NAME, MM_CLK_VM2, 19),
};
struct hab_driver hab_driver = {
@@ -71,6 +78,7 @@ struct uhab_context *hab_ctx_alloc(int kernel)
kref_init(&ctx->refcount);
ctx->import_ctx = habmem_imp_hyp_open();
if (!ctx->import_ctx) {
+ pr_err("habmem_imp_hyp_open failed\n");
kfree(ctx);
return NULL;
}
@@ -148,6 +156,7 @@ struct virtual_channel *frontend_open(struct uhab_context *ctx,
dev = find_hab_device(mm_id);
if (dev == NULL) {
+ pr_err("HAB device %d is not initialized\n", mm_id);
ret = -EINVAL;
goto err;
}
@@ -161,6 +170,7 @@ struct virtual_channel *frontend_open(struct uhab_context *ctx,
vchan = hab_vchan_alloc(ctx, pchan);
if (!vchan) {
+ pr_err("vchan alloc failed\n");
ret = -ENOMEM;
goto err;
}
@@ -187,6 +197,9 @@ struct virtual_channel *frontend_open(struct uhab_context *ctx,
vchan->otherend_id = recv_request->vchan_id;
hab_open_request_free(recv_request);
+ vchan->session_id = open_id;
+ pr_debug("vchan->session_id:%d\n", vchan->session_id);
+
/* Send Ack sequence */
hab_open_request_init(&request, HAB_PAYLOAD_TYPE_ACK, pchan,
0, sub_id, open_id);
@@ -221,6 +234,7 @@ struct virtual_channel *backend_listen(struct uhab_context *ctx,
dev = find_hab_device(mm_id);
if (dev == NULL) {
+ pr_err("failed to find dev based on id %d\n", mm_id);
ret = -EINVAL;
goto err;
}
@@ -249,6 +263,9 @@ struct virtual_channel *backend_listen(struct uhab_context *ctx,
vchan->otherend_id = otherend_vchan_id;
+ vchan->session_id = open_id;
+ pr_debug("vchan->session_id:%d\n", vchan->session_id);
+
/* Send Init-Ack sequence */
hab_open_request_init(&request, HAB_PAYLOAD_TYPE_INIT_ACK,
pchan, vchan->id, sub_id, open_id);
@@ -259,7 +276,7 @@ struct virtual_channel *backend_listen(struct uhab_context *ctx,
/* Wait for Ack sequence */
hab_open_request_init(&request, HAB_PAYLOAD_TYPE_ACK,
pchan, 0, sub_id, open_id);
- ret = hab_open_listen(ctx, dev, &request, &recv_request, HZ);
+ ret = hab_open_listen(ctx, dev, &request, &recv_request, 0);
if (ret != -EAGAIN)
break;
@@ -280,6 +297,7 @@ struct virtual_channel *backend_listen(struct uhab_context *ctx,
hab_pchan_put(pchan);
return vchan;
err:
+ pr_err("listen on mmid %d failed\n", mm_id);
if (vchan)
hab_vchan_put(vchan);
if (pchan)
@@ -304,12 +322,19 @@ long hab_vchan_send(struct uhab_context *ctx,
}
vchan = hab_get_vchan_fromvcid(vcid, ctx);
- if (!vchan || vchan->otherend_closed)
- return -ENODEV;
+ if (!vchan || vchan->otherend_closed) {
+ ret = -ENODEV;
+ goto err;
+ }
HAB_HEADER_SET_SIZE(header, sizebytes);
- HAB_HEADER_SET_TYPE(header, HAB_PAYLOAD_TYPE_MSG);
+ if (flags & HABMM_SOCKET_SEND_FLAGS_XING_VM_STAT)
+ HAB_HEADER_SET_TYPE(header, HAB_PAYLOAD_TYPE_PROFILE);
+ else
+ HAB_HEADER_SET_TYPE(header, HAB_PAYLOAD_TYPE_MSG);
+
HAB_HEADER_SET_ID(header, vchan->otherend_id);
+ HAB_HEADER_SET_SESSION_ID(header, vchan->session_id);
while (1) {
ret = physical_channel_send(vchan->pchan, &header, data);
@@ -321,7 +346,11 @@ long hab_vchan_send(struct uhab_context *ctx,
schedule();
}
- hab_vchan_put(vchan);
+
+err:
+ if (vchan)
+ hab_vchan_put(vchan);
+
return ret;
}
@@ -335,7 +364,7 @@ struct hab_message *hab_vchan_recv(struct uhab_context *ctx,
int nonblocking_flag = flags & HABMM_SOCKET_RECV_FLAGS_NON_BLOCKING;
vchan = hab_get_vchan_fromvcid(vcid, ctx);
- if (!vchan || vchan->otherend_closed)
+ if (!vchan)
return ERR_PTR(-ENODEV);
if (nonblocking_flag) {
@@ -351,6 +380,8 @@ struct hab_message *hab_vchan_recv(struct uhab_context *ctx,
if (!message) {
if (nonblocking_flag)
ret = -EAGAIN;
+ else if (vchan->otherend_closed)
+ ret = -ENODEV;
else
ret = -EPIPE;
}
@@ -369,7 +400,11 @@ int hab_vchan_open(struct uhab_context *ctx,
int32_t *vcid,
uint32_t flags)
{
- struct virtual_channel *vchan;
+ struct virtual_channel *vchan = NULL;
+ struct hab_device *dev;
+
+ pr_debug("Open mmid=%d, loopback mode=%d, loopback num=%d\n",
+ mmid, hab_driver.b_loopback, hab_driver.loopback_num);
if (!vcid)
return -EINVAL;
@@ -383,14 +418,29 @@ int hab_vchan_open(struct uhab_context *ctx,
vchan = frontend_open(ctx, mmid, LOOPBACK_DOM);
}
} else {
- if (hab_driver.b_server_dom)
- vchan = backend_listen(ctx, mmid);
- else
- vchan = frontend_open(ctx, mmid, 0);
+ dev = find_hab_device(mmid);
+
+ if (dev) {
+ struct physical_channel *pchan =
+ hab_pchan_find_domid(dev, HABCFG_VMID_DONT_CARE);
+
+ if (pchan->is_be)
+ vchan = backend_listen(ctx, mmid);
+ else
+ vchan = frontend_open(ctx, mmid,
+ HABCFG_VMID_DONT_CARE);
+ } else {
+ pr_err("failed to find device, mmid %d\n", mmid);
+ }
}
- if (IS_ERR(vchan))
+ if (IS_ERR(vchan)) {
+ pr_err("vchan open failed over mmid=%d\n", mmid);
return PTR_ERR(vchan);
+ }
+
+ pr_debug("vchan id %x, remote id %x\n",
+ vchan->id, vchan->otherend_id);
write_lock(&ctx->ctx_lock);
list_add_tail(&vchan->node, &ctx->vchannels);
@@ -403,12 +453,13 @@ int hab_vchan_open(struct uhab_context *ctx,
void hab_send_close_msg(struct virtual_channel *vchan)
{
- struct hab_header header;
+ struct hab_header header = {0};
if (vchan && !vchan->otherend_closed) {
HAB_HEADER_SET_SIZE(header, 0);
HAB_HEADER_SET_TYPE(header, HAB_PAYLOAD_TYPE_CLOSE);
HAB_HEADER_SET_ID(header, vchan->otherend_id);
+ HAB_HEADER_SET_SESSION_ID(header, vchan->session_id);
physical_channel_send(vchan->pchan, &header, NULL);
}
}
@@ -442,6 +493,220 @@ void hab_vchan_close(struct uhab_context *ctx, int32_t vcid)
write_unlock(&ctx->ctx_lock);
}
+/*
+ * To name the pchan - the pchan has two ends, either FE or BE locally.
+ * if is_be is true, then this is listener for BE. pchane name use remote
+ * FF's vmid from the table.
+ * if is_be is false, then local is FE as opener. pchan name use local FE's
+ * vmid (self)
+ */
+static int hab_initialize_pchan_entry(struct hab_device *mmid_device,
+ int vmid_local, int vmid_remote, int is_be)
+{
+ char pchan_name[MAX_VMID_NAME_SIZE];
+ struct physical_channel *pchan = NULL;
+ int ret;
+ int vmid = is_be ? vmid_remote : vmid_local;
+
+ if (!mmid_device) {
+ pr_err("habdev %pK, vmid local %d, remote %d, is be %d\n",
+ mmid_device, vmid_local, vmid_remote, is_be);
+ return -EINVAL;
+ }
+
+ snprintf(pchan_name, MAX_VMID_NAME_SIZE, "vm%d-", vmid);
+ strlcat(pchan_name, mmid_device->name, MAX_VMID_NAME_SIZE);
+
+ ret = habhyp_commdev_alloc((void **)&pchan, is_be, pchan_name,
+ vmid_remote, mmid_device);
+ if (ret == 0) {
+ pr_debug("pchan %s added, vmid local %d, remote %d, is_be %d, total %d\n",
+ pchan_name, vmid_local, vmid_remote, is_be,
+ mmid_device->pchan_cnt);
+ } else {
+ pr_err("failed %d to allocate pchan %s, vmid local %d, remote %d, is_be %d, total %d\n",
+ ret, pchan_name, vmid_local, vmid_remote,
+ is_be, mmid_device->pchan_cnt);
+ }
+
+ return ret;
+}
+
+static void hab_generate_pchan(struct local_vmid *settings, int i, int j)
+{
+ int k, ret = 0;
+
+ pr_debug("%d as mmid %d in vmid %d\n",
+ HABCFG_GET_MMID(settings, i, j), j, i);
+
+ switch (HABCFG_GET_MMID(settings, i, j)) {
+ case MM_AUD_START/100:
+ for (k = MM_AUD_START + 1; k < MM_AUD_END; k++) {
+ /*
+ * if this local pchan end is BE, then use
+ * remote FE's vmid. If local end is FE, then
+ * use self vmid
+ */
+ ret += hab_initialize_pchan_entry(
+ find_hab_device(k),
+ settings->self,
+ HABCFG_GET_VMID(settings, i),
+ HABCFG_GET_BE(settings, i, j));
+ }
+ break;
+
+ case MM_CAM_START/100:
+ for (k = MM_CAM_START + 1; k < MM_CAM_END; k++) {
+ ret += hab_initialize_pchan_entry(
+ find_hab_device(k),
+ settings->self,
+ HABCFG_GET_VMID(settings, i),
+ HABCFG_GET_BE(settings, i, j));
+ }
+ break;
+
+ case MM_DISP_START/100:
+ for (k = MM_DISP_START + 1; k < MM_DISP_END; k++) {
+ ret += hab_initialize_pchan_entry(
+ find_hab_device(k),
+ settings->self,
+ HABCFG_GET_VMID(settings, i),
+ HABCFG_GET_BE(settings, i, j));
+ }
+ break;
+
+ case MM_GFX_START/100:
+ for (k = MM_GFX_START + 1; k < MM_GFX_END; k++) {
+ ret += hab_initialize_pchan_entry(
+ find_hab_device(k),
+ settings->self,
+ HABCFG_GET_VMID(settings, i),
+ HABCFG_GET_BE(settings, i, j));
+ }
+ break;
+
+ case MM_VID_START/100:
+ for (k = MM_VID_START + 1; k < MM_VID_END; k++) {
+ ret += hab_initialize_pchan_entry(
+ find_hab_device(k),
+ settings->self,
+ HABCFG_GET_VMID(settings, i),
+ HABCFG_GET_BE(settings, i, j));
+ }
+ break;
+
+ case MM_MISC_START/100:
+ for (k = MM_MISC_START + 1; k < MM_MISC_END; k++) {
+ ret += hab_initialize_pchan_entry(
+ find_hab_device(k),
+ settings->self,
+ HABCFG_GET_VMID(settings, i),
+ HABCFG_GET_BE(settings, i, j));
+ }
+ break;
+
+ case MM_QCPE_START/100:
+ for (k = MM_QCPE_START + 1; k < MM_QCPE_END; k++) {
+ ret += hab_initialize_pchan_entry(
+ find_hab_device(k),
+ settings->self,
+ HABCFG_GET_VMID(settings, i),
+ HABCFG_GET_BE(settings, i, j));
+ }
+ break;
+
+ case MM_CLK_START/100:
+ for (k = MM_CLK_START + 1; k < MM_CLK_END; k++) {
+ ret += hab_initialize_pchan_entry(
+ find_hab_device(k),
+ settings->self,
+ HABCFG_GET_VMID(settings, i),
+ HABCFG_GET_BE(settings, i, j));
+ }
+ break;
+
+ default:
+ pr_err("failed to find mmid %d, i %d, j %d\n",
+ HABCFG_GET_MMID(settings, i, j), i, j);
+
+ break;
+ }
+}
+
+/*
+ * generate pchan list based on hab settings table.
+ * return status 0: success, otherwise failure
+ */
+static int hab_generate_pchan_list(struct local_vmid *settings)
+{
+ int i, j;
+
+ /* scan by valid VMs, then mmid */
+ pr_debug("self vmid is %d\n", settings->self);
+ for (i = 0; i < HABCFG_VMID_MAX; i++) {
+ if (HABCFG_GET_VMID(settings, i) != HABCFG_VMID_INVALID &&
+ HABCFG_GET_VMID(settings, i) != settings->self) {
+ pr_debug("create pchans for vm %d\n", i);
+
+ for (j = 1; j <= HABCFG_MMID_AREA_MAX; j++) {
+ if (HABCFG_GET_MMID(settings, i, j)
+ != HABCFG_VMID_INVALID)
+ hab_generate_pchan(settings, i, j);
+ }
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * This function checks hypervisor plug-in readiness, read in hab configs,
+ * and configure pchans
+ */
+int do_hab_parse(void)
+{
+ int result;
+ int i;
+ struct hab_device *device;
+ int pchan_total = 0;
+
+ /* first check if hypervisor plug-in is ready */
+ result = hab_hypervisor_register();
+ if (result) {
+ pr_err("register HYP plug-in failed, ret %d\n", result);
+ return result;
+ }
+
+ /* Initialize open Q before first pchan starts */
+ for (i = 0; i < hab_driver.ndevices; i++) {
+ device = &hab_driver.devp[i];
+ init_waitqueue_head(&device->openq);
+ }
+
+ /* read in hab config and create pchans*/
+ memset(&hab_driver.settings, HABCFG_VMID_INVALID,
+ sizeof(hab_driver.settings));
+
+ pr_debug("prepare default gvm 2 settings...\n");
+ fill_default_gvm_settings(&hab_driver.settings, 2,
+ MM_AUD_START, MM_ID_MAX);
+
+ /* now generate hab pchan list */
+ result = hab_generate_pchan_list(&hab_driver.settings);
+ if (result) {
+ pr_err("generate pchan list failed, ret %d\n", result);
+ } else {
+ for (i = 0; i < hab_driver.ndevices; i++) {
+ device = &hab_driver.devp[i];
+ pchan_total += device->pchan_cnt;
+ }
+ pr_debug("ret %d, total %d pchans added, ndevices %d\n",
+ result, pchan_total, hab_driver.ndevices);
+ }
+
+ return result;
+}
+
static int hab_open(struct inode *inodep, struct file *filep)
{
int result = 0;
@@ -468,6 +733,8 @@ static int hab_release(struct inode *inodep, struct file *filep)
if (!ctx)
return 0;
+ pr_debug("inode %pK, filep %pK\n", inodep, filep);
+
write_lock(&ctx->ctx_lock);
list_for_each_entry_safe(vchan, tmp, &ctx->vchannels, node) {
@@ -635,9 +902,7 @@ static const struct dma_map_ops hab_dma_ops = {
static int __init hab_init(void)
{
int result;
- int i;
dev_t dev;
- struct hab_device *device;
result = alloc_chrdev_region(&hab_driver.major, 0, 1, "hab");
@@ -676,24 +941,22 @@ static int __init hab_init(void)
goto err;
}
- for (i = 0; i < hab_driver.ndevices; i++) {
- device = &hab_driver.devp[i];
- init_waitqueue_head(&device->openq);
- }
-
- hab_hypervisor_register();
+ /* read in hab config, then configure pchans */
+ result = do_hab_parse();
- hab_driver.kctx = hab_ctx_alloc(1);
- if (!hab_driver.kctx) {
- pr_err("hab_ctx_alloc failed");
- result = -ENOMEM;
- hab_hypervisor_unregister();
- goto err;
- }
+ if (!result) {
+ hab_driver.kctx = hab_ctx_alloc(1);
+ if (!hab_driver.kctx) {
+ pr_err("hab_ctx_alloc failed");
+ result = -ENOMEM;
+ hab_hypervisor_unregister();
+ goto err;
+ }
- set_dma_ops(hab_driver.dev, &hab_dma_ops);
+ set_dma_ops(hab_driver.dev, &hab_dma_ops);
- return result;
+ return result;
+ }
err:
if (!IS_ERR_OR_NULL(hab_driver.dev))
@@ -703,6 +966,7 @@ err:
cdev_del(&hab_driver.cdev);
unregister_chrdev_region(dev, 1);
+ pr_err("Error in hab init, result %d\n", result);
return result;
}
diff --git a/drivers/soc/qcom/hab/hab.h b/drivers/soc/qcom/hab/hab.h
index 805e5b4a7008..19a8584edd35 100644
--- a/drivers/soc/qcom/hab/hab.h
+++ b/drivers/soc/qcom/hab/hab.h
@@ -13,7 +13,7 @@
#ifndef __HAB_H
#define __HAB_H
-#define pr_fmt(fmt) "hab: " fmt
+#define pr_fmt(fmt) "|hab:%s:%d|" fmt, __func__, __LINE__
#include <linux/types.h>
@@ -47,6 +47,7 @@ enum hab_payload_type {
HAB_PAYLOAD_TYPE_EXPORT_ACK,
HAB_PAYLOAD_TYPE_PROFILE,
HAB_PAYLOAD_TYPE_CLOSE,
+ HAB_PAYLOAD_TYPE_MAX,
};
#define LOOPBACK_DOM 0xFF
@@ -61,7 +62,8 @@ enum hab_payload_type {
#define DEVICE_AUD2_NAME "hab_aud2"
#define DEVICE_AUD3_NAME "hab_aud3"
#define DEVICE_AUD4_NAME "hab_aud4"
-#define DEVICE_CAM_NAME "hab_cam"
+#define DEVICE_CAM1_NAME "hab_cam1"
+#define DEVICE_CAM2_NAME "hab_cam2"
#define DEVICE_DISP1_NAME "hab_disp1"
#define DEVICE_DISP2_NAME "hab_disp2"
#define DEVICE_DISP3_NAME "hab_disp3"
@@ -74,6 +76,48 @@ enum hab_payload_type {
#define DEVICE_QCPE2_NAME "hab_qcpe_vm2"
#define DEVICE_QCPE3_NAME "hab_qcpe_vm3"
#define DEVICE_QCPE4_NAME "hab_qcpe_vm4"
+#define DEVICE_CLK1_NAME "hab_clock_vm1"
+#define DEVICE_CLK2_NAME "hab_clock_vm2"
+
+/* make sure concascaded name is less than this value */
+#define MAX_VMID_NAME_SIZE 30
+
+#define HABCFG_FILE_SIZE_MAX 256
+#define HABCFG_MMID_AREA_MAX (MM_ID_MAX/100)
+
+#define HABCFG_VMID_MAX 16
+#define HABCFG_VMID_INVALID (-1)
+#define HABCFG_VMID_DONT_CARE (-2)
+
+#define HABCFG_ID_LINE_LIMIT ","
+#define HABCFG_ID_VMID "VMID="
+#define HABCFG_ID_BE "BE="
+#define HABCFG_ID_FE "FE="
+#define HABCFG_ID_MMID "MMID="
+#define HABCFG_ID_RANGE "-"
+#define HABCFG_ID_DONTCARE "X"
+
+#define HABCFG_FOUND_VMID 1
+#define HABCFG_FOUND_FE_MMIDS 2
+#define HABCFG_FOUND_BE_MMIDS 3
+#define HABCFG_FOUND_NOTHING (-1)
+
+#define HABCFG_BE_FALSE 0
+#define HABCFG_BE_TRUE 1
+
+#define HABCFG_GET_VMID(_local_cfg_, _vmid_) \
+ ((settings)->vmid_mmid_list[_vmid_].vmid)
+#define HABCFG_GET_MMID(_local_cfg_, _vmid_, _mmid_) \
+ ((settings)->vmid_mmid_list[_vmid_].mmid[_mmid_])
+#define HABCFG_GET_BE(_local_cfg_, _vmid_, _mmid_) \
+ ((settings)->vmid_mmid_list[_vmid_].is_listener[_mmid_])
+
+struct hab_header {
+ uint32_t id_type_size;
+ uint32_t session_id;
+ uint32_t signature;
+ uint32_t sequence;
+} __packed;
/* "Size" of the HAB_HEADER_ID and HAB_VCID_ID must match */
#define HAB_HEADER_SIZE_SHIFT 0
@@ -96,34 +140,44 @@ enum hab_payload_type {
#define HAB_VCID_GET_ID(vcid) \
(((vcid) & HAB_VCID_ID_MASK) >> HAB_VCID_ID_SHIFT)
+
+#define HAB_HEADER_SET_SESSION_ID(header, sid) ((header).session_id = (sid))
+
#define HAB_HEADER_SET_SIZE(header, size) \
- ((header).info = (((header).info) & (~HAB_HEADER_SIZE_MASK)) | \
- (((size) << HAB_HEADER_SIZE_SHIFT) & HAB_HEADER_SIZE_MASK))
+ ((header).id_type_size = ((header).id_type_size & \
+ (~HAB_HEADER_SIZE_MASK)) | \
+ (((size) << HAB_HEADER_SIZE_SHIFT) & \
+ HAB_HEADER_SIZE_MASK))
#define HAB_HEADER_SET_TYPE(header, type) \
- ((header).info = (((header).info) & (~HAB_HEADER_TYPE_MASK)) | \
- (((type) << HAB_HEADER_TYPE_SHIFT) & HAB_HEADER_TYPE_MASK))
+ ((header).id_type_size = ((header).id_type_size & \
+ (~HAB_HEADER_TYPE_MASK)) | \
+ (((type) << HAB_HEADER_TYPE_SHIFT) & \
+ HAB_HEADER_TYPE_MASK))
#define HAB_HEADER_SET_ID(header, id) \
- ((header).info = (((header).info) & (~HAB_HEADER_ID_MASK)) | \
- ((HAB_VCID_GET_ID(id) << HAB_HEADER_ID_SHIFT) \
- & HAB_HEADER_ID_MASK))
+ ((header).id_type_size = ((header).id_type_size & \
+ (~HAB_HEADER_ID_MASK)) | \
+ ((HAB_VCID_GET_ID(id) << HAB_HEADER_ID_SHIFT) & \
+ HAB_HEADER_ID_MASK))
#define HAB_HEADER_GET_SIZE(header) \
- ((((header).info) & HAB_HEADER_SIZE_MASK) >> HAB_HEADER_SIZE_SHIFT)
+ (((header).id_type_size & \
+ HAB_HEADER_SIZE_MASK) >> HAB_HEADER_SIZE_SHIFT)
#define HAB_HEADER_GET_TYPE(header) \
- ((((header).info) & HAB_HEADER_TYPE_MASK) >> HAB_HEADER_TYPE_SHIFT)
+ (((header).id_type_size & \
+ HAB_HEADER_TYPE_MASK) >> HAB_HEADER_TYPE_SHIFT)
#define HAB_HEADER_GET_ID(header) \
- (((((header).info) & HAB_HEADER_ID_MASK) >> \
+ ((((header).id_type_size & HAB_HEADER_ID_MASK) >> \
(HAB_HEADER_ID_SHIFT - HAB_VCID_ID_SHIFT)) & HAB_VCID_ID_MASK)
-struct hab_header {
- uint32_t info;
-};
+#define HAB_HEADER_GET_SESSION_ID(header) ((header).session_id)
struct physical_channel {
+ char name[MAX_VMID_NAME_SIZE];
+ int is_be;
struct kref refcount;
struct hab_device *habdev;
struct list_head node;
@@ -138,6 +192,10 @@ struct physical_channel {
int closed;
spinlock_t rxbuf_lock;
+
+ /* vchans over this pchan */
+ struct list_head vchannels;
+ rwlock_t vchans_lock;
};
struct hab_open_send_data {
@@ -179,9 +237,10 @@ struct hab_message {
};
struct hab_device {
- const char *name;
+ char name[MAX_VMID_NAME_SIZE];
unsigned int id;
struct list_head pchannels;
+ int pchan_cnt;
struct mutex pchan_lock;
struct list_head openq_list;
spinlock_t openlock;
@@ -211,19 +270,37 @@ struct uhab_context {
int kernel;
};
+/*
+ * array to describe the VM and its MMID configuration as what is connected to
+ * so this is describing a pchan's remote side
+ */
+struct vmid_mmid_desc {
+ int vmid; /* remote vmid */
+ int mmid[HABCFG_MMID_AREA_MAX+1]; /* selected or not */
+ int is_listener[HABCFG_MMID_AREA_MAX+1]; /* yes or no */
+};
+
+struct local_vmid {
+ int32_t self; /* only this field is for local */
+ struct vmid_mmid_desc vmid_mmid_list[HABCFG_VMID_MAX];
+};
+
struct hab_driver {
struct device *dev;
struct cdev cdev;
dev_t major;
struct class *class;
- int irq;
-
int ndevices;
struct hab_device *devp;
struct uhab_context *kctx;
+
+ struct local_vmid settings; /* parser results */
+
int b_server_dom;
int loopback_num;
int b_loopback;
+
+ void *hyp_priv; /* hypervisor plug-in storage */
};
struct virtual_channel {
@@ -243,12 +320,14 @@ struct virtual_channel {
struct physical_channel *pchan;
struct uhab_context *ctx;
struct list_head node;
+ struct list_head pnode;
struct list_head rx_list;
wait_queue_head_t rx_queue;
spinlock_t rx_lock;
int id;
int otherend_id;
int otherend_closed;
+ uint32_t session_id;
};
/*
@@ -271,7 +350,7 @@ struct export_desc {
void *kva;
int payload_count;
unsigned char payload[1];
-};
+} __packed;
int hab_vchan_open(struct uhab_context *ctx,
unsigned int mmid, int32_t *vcid, uint32_t flags);
@@ -286,6 +365,7 @@ struct hab_message *hab_vchan_recv(struct uhab_context *ctx,
int vcid,
unsigned int flags);
void hab_vchan_stop(struct virtual_channel *vchan);
+void hab_vchans_stop(struct physical_channel *pchan);
void hab_vchan_stop_notify(struct virtual_channel *vchan);
int hab_mem_export(struct uhab_context *ctx,
@@ -350,7 +430,7 @@ void hab_open_request_init(struct hab_open_request *request,
int open_id);
int hab_open_request_send(struct hab_open_request *request);
int hab_open_request_add(struct physical_channel *pchan,
- struct hab_header *header);
+ size_t sizebytes, int request_type);
void hab_open_request_free(struct hab_open_request *request);
int hab_open_listen(struct uhab_context *ctx,
struct hab_device *dev,
@@ -361,7 +441,7 @@ int hab_open_listen(struct uhab_context *ctx,
struct virtual_channel *hab_vchan_alloc(struct uhab_context *ctx,
struct physical_channel *pchan);
struct virtual_channel *hab_vchan_get(struct physical_channel *pchan,
- uint32_t vchan_id);
+ struct hab_header *header);
void hab_vchan_put(struct virtual_channel *vchan);
struct virtual_channel *hab_get_vchan_fromvcid(int32_t vcid,
@@ -394,6 +474,9 @@ static inline void hab_ctx_put(struct uhab_context *ctx)
void hab_send_close_msg(struct virtual_channel *vchan);
int hab_hypervisor_register(void);
void hab_hypervisor_unregister(void);
+int habhyp_commdev_alloc(void **commdev, int is_be, char *name,
+ int vmid_remote, struct hab_device *mmid_device);
+int habhyp_commdev_dealloc(void *commdev);
int physical_channel_read(struct physical_channel *pchan,
void *payload,
@@ -407,6 +490,13 @@ void physical_channel_rx_dispatch(unsigned long physical_channel);
int loopback_pchan_create(char *dev_name);
+int hab_parse(struct local_vmid *settings);
+
+int do_hab_parse(void);
+
+int fill_default_gvm_settings(struct local_vmid *settings,
+ int vmid_local, int mmid_start, int mmid_end);
+
bool hab_is_loopback(void);
/* Global singleton HAB instance */
diff --git a/drivers/soc/qcom/hab/hab_mem_linux.c b/drivers/soc/qcom/hab/hab_mem_linux.c
index ab4b9d0885cb..ecc3f52a6662 100644
--- a/drivers/soc/qcom/hab/hab_mem_linux.c
+++ b/drivers/soc/qcom/hab/hab_mem_linux.c
@@ -35,6 +35,7 @@ struct importer_context {
int cnt; /* pages allocated for local file */
struct list_head imp_list;
struct file *filp;
+ rwlock_t implist_lock;
};
void *habmm_hyp_allocate_grantable(int page_count,
@@ -73,8 +74,12 @@ static int habmem_get_dma_pages(unsigned long address,
int fd;
vma = find_vma(current->mm, address);
- if (!vma || !vma->vm_file)
+ if (!vma || !vma->vm_file) {
+ pr_err("cannot find vma\n");
goto err;
+ }
+
+ pr_debug("vma flags %lx\n", vma->vm_flags);
/* Look for the fd that matches this the vma file */
fd = iterate_fd(current->files, 0, match_file, vma->vm_file);
@@ -103,6 +108,7 @@ static int habmem_get_dma_pages(unsigned long address,
for_each_sg(sg_table->sgl, s, sg_table->nents, i) {
page = sg_page(s);
+ pr_debug("sgl length %d\n", s->length);
for (j = page_offset; j < (s->length >> PAGE_SHIFT); j++) {
pages[rc] = nth_page(page, j);
@@ -136,6 +142,12 @@ err:
return rc;
}
+/*
+ * exporter - grant & revoke
+ * degenerate sharabled page list based on CPU friendly virtual "address".
+ * The result as an array is stored in ppdata to return to caller
+ * page size 4KB is assumed
+ */
int habmem_hyp_grant_user(unsigned long address,
int page_count,
int flags,
@@ -220,6 +232,7 @@ void *habmem_imp_hyp_open(void)
if (!priv)
return NULL;
+ rwlock_init(&priv->implist_lock);
INIT_LIST_HEAD(&priv->imp_list);
return priv;
@@ -261,7 +274,7 @@ long habmem_imp_hyp_map(void *imp_ctx,
uint32_t userflags)
{
struct page **pages;
- struct compressed_pfns *pfn_table = impdata;
+ struct compressed_pfns *pfn_table = (struct compressed_pfns *)impdata;
struct pages_list *pglist;
struct importer_context *priv = imp_ctx;
unsigned long pfn;
@@ -310,6 +323,9 @@ long habmem_imp_hyp_map(void *imp_ctx,
kfree(pglist);
pr_err("%ld pages vmap failed\n", pglist->npages);
return -ENOMEM;
+ } else {
+ pr_debug("%ld pages vmap pass, return %pK\n",
+ pglist->npages, pglist->kva);
}
pglist->uva = NULL;
@@ -320,8 +336,11 @@ long habmem_imp_hyp_map(void *imp_ctx,
pglist->kva = NULL;
}
+ write_lock(&priv->implist_lock);
list_add_tail(&pglist->list, &priv->imp_list);
priv->cnt++;
+ write_unlock(&priv->implist_lock);
+ pr_debug("index returned %llx\n", *index);
return 0;
}
@@ -333,11 +352,15 @@ long habmm_imp_hyp_unmap(void *imp_ctx,
int kernel)
{
struct importer_context *priv = imp_ctx;
- struct pages_list *pglist;
+ struct pages_list *pglist, *tmp;
int found = 0;
uint64_t pg_index = index >> PAGE_SHIFT;
- list_for_each_entry(pglist, &priv->imp_list, list) {
+ write_lock(&priv->implist_lock);
+ list_for_each_entry_safe(pglist, tmp, &priv->imp_list, list) {
+ pr_debug("node pglist %pK, kernel %d, pg_index %llx\n",
+ pglist, pglist->kernel, pg_index);
+
if (kernel) {
if (pglist->kva == (void *)((uintptr_t)index))
found = 1;
@@ -353,11 +376,15 @@ long habmm_imp_hyp_unmap(void *imp_ctx,
}
}
+ write_unlock(&priv->implist_lock);
if (!found) {
pr_err("failed to find export id on index %llx\n", index);
return -EINVAL;
}
+ pr_debug("detach pglist %pK, index %llx, kernel %d, list cnt %d\n",
+ pglist, pglist->index, pglist->kernel, priv->cnt);
+
if (kernel)
if (pglist->kva)
vunmap(pglist->kva);
@@ -393,6 +420,8 @@ static int hab_map_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
}
+ pr_debug("Fault page index %d\n", page_idx);
+
page = pglist->pages[page_idx];
get_page(page);
vmf->page = page;
@@ -422,15 +451,20 @@ int habmem_imp_hyp_mmap(struct file *filp, struct vm_area_struct *vma)
struct pages_list *pglist;
int bfound = 0;
+ pr_debug("mmap request start %lX, len %ld, index %lX\n",
+ vma->vm_start, length, vma->vm_pgoff);
+
+ read_lock(&imp_ctx->implist_lock);
list_for_each_entry(pglist, &imp_ctx->imp_list, list) {
if (pglist->index == vma->vm_pgoff) {
bfound = 1;
break;
}
}
+ read_unlock(&imp_ctx->implist_lock);
if (!bfound) {
- pr_err("Failed to find pglist vm_pgoff: %d\n", vma->vm_pgoff);
+ pr_err("Failed to find pglist vm_pgoff: %ld\n", vma->vm_pgoff);
return -EINVAL;
}
diff --git a/drivers/soc/qcom/hab/hab_mimex.c b/drivers/soc/qcom/hab/hab_mimex.c
index aaef9aa9f414..67601590908e 100644
--- a/drivers/soc/qcom/hab/hab_mimex.c
+++ b/drivers/soc/qcom/hab/hab_mimex.c
@@ -31,11 +31,11 @@ static int hab_export_ack_find(struct uhab_context *ctx,
struct hab_export_ack *expect_ack)
{
int ret = 0;
- struct hab_export_ack_recvd *ack_recvd;
+ struct hab_export_ack_recvd *ack_recvd, *tmp;
spin_lock_bh(&ctx->expq_lock);
- list_for_each_entry(ack_recvd, &ctx->exp_rxq, node) {
+ list_for_each_entry_safe(ack_recvd, tmp, &ctx->exp_rxq, node) {
if (ack_recvd->ack.export_id == expect_ack->export_id &&
ack_recvd->ack.vcid_local == expect_ack->vcid_local &&
ack_recvd->ack.vcid_remote == expect_ack->vcid_remote) {
@@ -197,6 +197,7 @@ static int habmem_export_vchan(struct uhab_context *ctx,
HAB_HEADER_SET_SIZE(header, sizebytes);
HAB_HEADER_SET_TYPE(header, HAB_PAYLOAD_TYPE_EXPORT);
HAB_HEADER_SET_ID(header, vchan->otherend_id);
+ HAB_HEADER_SET_SESSION_ID(header, vchan->session_id);
ret = physical_channel_send(vchan->pchan, &header, exp);
if (ret != 0) {
@@ -228,6 +229,8 @@ int hab_mem_export(struct uhab_context *ctx,
if (!ctx || !param || param->sizebytes > HAB_MAX_EXPORT_SIZE)
return -EINVAL;
+ pr_debug("vc %X, mem size %d\n", param->vcid, param->sizebytes);
+
vchan = hab_get_vchan_fromvcid(param->vcid, ctx);
if (!vchan || !vchan->pchan) {
ret = -ENODEV;
@@ -303,7 +306,10 @@ int hab_mem_unexport(struct uhab_context *ctx,
return -EINVAL;
ret = habmem_hyp_revoke(exp->payload, exp->payload_count);
-
+ if (ret) {
+ pr_err("Error found in revoke grant with ret %d", ret);
+ return ret;
+ }
habmem_remove_export(exp);
return ret;
}
@@ -335,6 +341,10 @@ int hab_mem_import(struct uhab_context *ctx,
return ret;
}
+ pr_debug("call map id: %d pcnt %d remote_dom %d 1st_ref:0x%X\n",
+ exp->export_id, exp->payload_count, exp->domid_local,
+ *((uint32_t *)exp->payload));
+
ret = habmem_imp_hyp_map(ctx->import_ctx,
exp->payload,
exp->payload_count,
@@ -349,6 +359,8 @@ int hab_mem_import(struct uhab_context *ctx,
exp->domid_local, *((uint32_t *)exp->payload));
return ret;
}
+ pr_debug("import index %llx, kva %llx, kernel %d\n",
+ exp->import_index, param->kva, kernel);
param->index = exp->import_index;
param->kva = (uint64_t)exp->kva;
@@ -373,6 +385,9 @@ int hab_mem_unimport(struct uhab_context *ctx,
list_del(&exp->node);
ctx->import_total--;
found = 1;
+
+ pr_debug("found id:%d payload cnt:%d kernel:%d\n",
+ exp->export_id, exp->payload_count, kernel);
break;
}
}
@@ -385,7 +400,10 @@ int hab_mem_unimport(struct uhab_context *ctx,
exp->import_index,
exp->payload_count,
kernel);
-
+ if (ret) {
+ pr_err("unmap fail id:%d pcnt:%d kernel:%d\n",
+ exp->export_id, exp->payload_count, kernel);
+ }
param->kva = (uint64_t)exp->kva;
kfree(exp);
}
diff --git a/drivers/soc/qcom/hab/hab_msg.c b/drivers/soc/qcom/hab/hab_msg.c
index f08cc83fe9fc..700239a25652 100644
--- a/drivers/soc/qcom/hab/hab_msg.c
+++ b/drivers/soc/qcom/hab/hab_msg.c
@@ -55,13 +55,12 @@ hab_msg_dequeue(struct virtual_channel *vchan, int wait_flag)
vchan->otherend_closed);
}
- if (!ret && !vchan->otherend_closed) {
+ /* return all the received messages before the remote close */
+ if (!ret && !hab_rx_queue_empty(vchan)) {
spin_lock_bh(&vchan->rx_lock);
- if (!list_empty(&vchan->rx_list)) {
- message = list_first_entry(&vchan->rx_list,
+ message = list_first_entry(&vchan->rx_list,
struct hab_message, node);
- list_del(&message->node);
- }
+ list_del(&message->node);
spin_unlock_bh(&vchan->rx_lock);
}
@@ -91,8 +90,9 @@ static int hab_export_enqueue(struct virtual_channel *vchan,
return 0;
}
-static int hab_send_export_ack(struct physical_channel *pchan,
- struct export_desc *exp)
+static int hab_send_export_ack(struct virtual_channel *vchan,
+ struct physical_channel *pchan,
+ struct export_desc *exp)
{
struct hab_export_ack exp_ack = {
.export_id = exp->export_id,
@@ -104,11 +104,12 @@ static int hab_send_export_ack(struct physical_channel *pchan,
HAB_HEADER_SET_SIZE(header, sizeof(exp_ack));
HAB_HEADER_SET_TYPE(header, HAB_PAYLOAD_TYPE_EXPORT_ACK);
HAB_HEADER_SET_ID(header, exp->vcid_local);
+ HAB_HEADER_SET_SESSION_ID(header, vchan->session_id);
return physical_channel_send(pchan, &header, &exp_ack);
}
static int hab_receive_create_export_ack(struct physical_channel *pchan,
- struct uhab_context *ctx)
+ struct uhab_context *ctx, size_t sizebytes)
{
struct hab_export_ack_recvd *ack_recvd =
kzalloc(sizeof(*ack_recvd), GFP_ATOMIC);
@@ -116,11 +117,20 @@ static int hab_receive_create_export_ack(struct physical_channel *pchan,
if (!ack_recvd)
return -ENOMEM;
+ if (sizeof(ack_recvd->ack) != sizebytes)
+ pr_err("exp ack size %lu is not as arrived %zu\n",
+ sizeof(ack_recvd->ack), sizebytes);
+
if (physical_channel_read(pchan,
&ack_recvd->ack,
- sizeof(ack_recvd->ack)) != sizeof(ack_recvd->ack))
+ sizebytes) != sizebytes)
return -EIO;
+ pr_debug("receive export id %d, local vc %X, vd remote %X\n",
+ ack_recvd->ack.export_id,
+ ack_recvd->ack.vcid_local,
+ ack_recvd->ack.vcid_remote);
+
spin_lock_bh(&ctx->expq_lock);
list_add_tail(&ack_recvd->node, &ctx->exp_rxq);
spin_unlock_bh(&ctx->expq_lock);
@@ -137,20 +147,48 @@ void hab_msg_recv(struct physical_channel *pchan,
size_t sizebytes = HAB_HEADER_GET_SIZE(*header);
uint32_t payload_type = HAB_HEADER_GET_TYPE(*header);
uint32_t vchan_id = HAB_HEADER_GET_ID(*header);
+ uint32_t session_id = HAB_HEADER_GET_SESSION_ID(*header);
struct virtual_channel *vchan = NULL;
struct export_desc *exp_desc;
+ struct timeval tv;
/* get the local virtual channel if it isn't an open message */
if (payload_type != HAB_PAYLOAD_TYPE_INIT &&
payload_type != HAB_PAYLOAD_TYPE_INIT_ACK &&
payload_type != HAB_PAYLOAD_TYPE_ACK) {
- vchan = hab_vchan_get(pchan, vchan_id);
+
+ /* sanity check the received message */
+ if (payload_type >= HAB_PAYLOAD_TYPE_MAX ||
+ vchan_id > (HAB_HEADER_ID_MASK >> HAB_HEADER_ID_SHIFT)
+ || !vchan_id || !session_id) {
+ pr_err("Invalid message received, payload type %d, vchan id %x, sizebytes %zx, session %d\n",
+ payload_type, vchan_id, sizebytes, session_id);
+ }
+
+ vchan = hab_vchan_get(pchan, header);
if (!vchan) {
+ pr_debug("vchan is not found, payload type %d, vchan id %x, sizebytes %zx, session %d\n",
+ payload_type, vchan_id, sizebytes, session_id);
+
+ if (sizebytes)
+ pr_err("message is dropped\n");
+
return;
} else if (vchan->otherend_closed) {
hab_vchan_put(vchan);
+ pr_debug("vchan remote is closed, payload type %d, vchan id %x, sizebytes %zx, session %d\n",
+ payload_type, vchan_id, sizebytes, session_id);
+
+ if (sizebytes)
+ pr_err("message is dropped\n");
+
return;
}
+ } else {
+ if (sizebytes != sizeof(struct hab_open_send_data)) {
+ pr_err("Invalid open request received, payload type %d, vchan id %x, sizebytes %zx, session %d\n",
+ payload_type, vchan_id, sizebytes, session_id);
+ }
}
switch (payload_type) {
@@ -165,9 +203,12 @@ void hab_msg_recv(struct physical_channel *pchan,
case HAB_PAYLOAD_TYPE_INIT:
case HAB_PAYLOAD_TYPE_INIT_ACK:
case HAB_PAYLOAD_TYPE_ACK:
- ret = hab_open_request_add(pchan, header);
- if (ret)
+ ret = hab_open_request_add(pchan, sizebytes, payload_type);
+ if (ret) {
+ pr_err("open request add failed, ret %d, payload type %d, sizebytes %zx\n",
+ ret, payload_type, sizebytes);
break;
+ }
wake_up_interruptible(&dev->openq);
break;
@@ -185,22 +226,49 @@ void hab_msg_recv(struct physical_channel *pchan,
exp_desc->domid_local = pchan->dom_id;
hab_export_enqueue(vchan, exp_desc);
- hab_send_export_ack(pchan, exp_desc);
+ hab_send_export_ack(vchan, pchan, exp_desc);
break;
case HAB_PAYLOAD_TYPE_EXPORT_ACK:
- ret = hab_receive_create_export_ack(pchan, vchan->ctx);
- if (ret)
+ ret = hab_receive_create_export_ack(pchan, vchan->ctx,
+ sizebytes);
+ if (ret) {
+ pr_err("failed to handled export ack %d\n", ret);
break;
-
+ }
wake_up_interruptible(&vchan->ctx->exp_wq);
break;
case HAB_PAYLOAD_TYPE_CLOSE:
+ /* remote request close */
+ pr_debug("remote side request close\n");
+ pr_debug(" vchan id %X, other end %X, session %d\n",
+ vchan->id, vchan->otherend_id, session_id);
hab_vchan_stop(vchan);
break;
+ case HAB_PAYLOAD_TYPE_PROFILE:
+ do_gettimeofday(&tv);
+
+ /* pull down the incoming data */
+ message = hab_msg_alloc(pchan, sizebytes);
+ if (!message) {
+ pr_err("msg alloc failed\n");
+ break;
+ }
+
+ ((uint64_t *)message->data)[2] = tv.tv_sec;
+ ((uint64_t *)message->data)[3] = tv.tv_usec;
+ hab_msg_queue(vchan, message);
+ break;
+
default:
+ pr_err("unknown msg is received\n");
+ pr_err("payload type %d, vchan id %x\n",
+ payload_type, vchan_id);
+ pr_err("sizebytes %zx, session %d\n",
+ sizebytes, session_id);
+
break;
}
if (vchan)
diff --git a/drivers/soc/qcom/hab/hab_open.c b/drivers/soc/qcom/hab/hab_open.c
index 66468aa43afd..35f3281604e2 100644
--- a/drivers/soc/qcom/hab/hab_open.c
+++ b/drivers/soc/qcom/hab/hab_open.c
@@ -42,7 +42,7 @@ int hab_open_request_send(struct hab_open_request *request)
}
int hab_open_request_add(struct physical_channel *pchan,
- struct hab_header *header)
+ size_t sizebytes, int request_type)
{
struct hab_open_node *node;
struct hab_device *dev = pchan->habdev;
@@ -53,12 +53,11 @@ int hab_open_request_add(struct physical_channel *pchan,
if (!node)
return -ENOMEM;
- if (physical_channel_read(pchan, &data, HAB_HEADER_GET_SIZE(*header)) !=
- HAB_HEADER_GET_SIZE(*header))
+ if (physical_channel_read(pchan, &data, sizebytes) != sizebytes)
return -EIO;
request = &node->request;
- request->type = HAB_HEADER_GET_TYPE(*header);
+ request->type = request_type;
request->pchan = pchan;
request->vchan_id = data.vchan_id;
request->sub_id = data.sub_id;
diff --git a/drivers/soc/qcom/hab/hab_parser.c b/drivers/soc/qcom/hab/hab_parser.c
new file mode 100644
index 000000000000..a38d9bcf26b9
--- /dev/null
+++ b/drivers/soc/qcom/hab/hab_parser.c
@@ -0,0 +1,65 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "hab.h"
+
+/*
+ * set valid mmid value in tbl to show this is valid entry. All inputs here are
+ * normalized to 1 based integer
+ */
+static int fill_vmid_mmid_tbl(struct vmid_mmid_desc *tbl, int32_t vm_start,
+ int32_t vm_range, int32_t mmid_start,
+ int32_t mmid_range, int32_t be)
+{
+ int ret = 0;
+ int i, j;
+
+ for (i = vm_start; i < vm_start+vm_range; i++) {
+ tbl[i].vmid = i; /* set valid vmid value to make it usable */
+ for (j = mmid_start; j < mmid_start + mmid_range; j++) {
+ /* sanity check */
+ if (tbl[i].mmid[j] != HABCFG_VMID_INVALID) {
+ pr_err("overwrite previous setting, i %d, j %d, be %d\n",
+ i, j, tbl[i].is_listener[j]);
+ }
+ tbl[i].mmid[j] = j;
+ tbl[i].is_listener[j] = be; /* BE IS listen */
+ }
+ }
+
+ return ret;
+}
+
+void dump_settings(struct local_vmid *settings)
+{
+ int i, j;
+
+ pr_debug("self vmid is %d\n", settings->self);
+ for (i = 0; i < HABCFG_VMID_MAX; i++) {
+ pr_debug("remote vmid %d\n",
+ settings->vmid_mmid_list[i].vmid);
+ for (j = 0; j <= HABCFG_MMID_AREA_MAX; j++) {
+ pr_debug("mmid %d, is_be %d\n",
+ settings->vmid_mmid_list[i].mmid[j],
+ settings->vmid_mmid_list[i].is_listener[j]);
+ }
+ }
+}
+
+int fill_default_gvm_settings(struct local_vmid *settings, int vmid_local,
+ int mmid_start, int mmid_end) {
+ settings->self = vmid_local;
+ /* default gvm always talks to host as vm0 */
+ return fill_vmid_mmid_tbl(settings->vmid_mmid_list, 0, 1,
+ mmid_start/100, (mmid_end-mmid_start)/100+1, HABCFG_BE_FALSE);
+}
diff --git a/drivers/soc/qcom/hab/hab_pchan.c b/drivers/soc/qcom/hab/hab_pchan.c
index 1ad727f7d90f..36bc29b7bd0c 100644
--- a/drivers/soc/qcom/hab/hab_pchan.c
+++ b/drivers/soc/qcom/hab/hab_pchan.c
@@ -31,10 +31,13 @@ hab_pchan_alloc(struct hab_device *habdev, int otherend_id)
pchan->closed = 1;
pchan->hyp_data = NULL;
+ INIT_LIST_HEAD(&pchan->vchannels);
+ rwlock_init(&pchan->vchans_lock);
spin_lock_init(&pchan->rxbuf_lock);
mutex_lock(&habdev->pchan_lock);
list_add_tail(&pchan->node, &habdev->pchannels);
+ habdev->pchan_cnt++;
mutex_unlock(&habdev->pchan_lock);
return pchan;
@@ -47,6 +50,7 @@ static void hab_pchan_free(struct kref *ref)
mutex_lock(&pchan->habdev->pchan_lock);
list_del(&pchan->node);
+ pchan->habdev->pchan_cnt--;
mutex_unlock(&pchan->habdev->pchan_lock);
kfree(pchan->hyp_data);
kfree(pchan);
@@ -59,11 +63,14 @@ hab_pchan_find_domid(struct hab_device *dev, int dom_id)
mutex_lock(&dev->pchan_lock);
list_for_each_entry(pchan, &dev->pchannels, node)
- if (pchan->dom_id == dom_id)
+ if (pchan->dom_id == dom_id || dom_id == HABCFG_VMID_DONT_CARE)
break;
- if (pchan->dom_id != dom_id)
+ if (pchan->dom_id != dom_id && dom_id != HABCFG_VMID_DONT_CARE) {
+ pr_err("dom_id mismatch requested %d, existing %d\n",
+ dom_id, pchan->dom_id);
pchan = NULL;
+ }
if (pchan && !kref_get_unless_zero(&pchan->refcount))
pchan = NULL;
diff --git a/drivers/soc/qcom/hab/hab_qvm.c b/drivers/soc/qcom/hab/hab_qvm.c
index a37590f23c61..fec06cbbd0c7 100644
--- a/drivers/soc/qcom/hab/hab_qvm.c
+++ b/drivers/soc/qcom/hab/hab_qvm.c
@@ -21,9 +21,51 @@
#include <linux/of.h>
#include <linux/of_platform.h>
-#define DEFAULT_HAB_SHMEM_IRQ 7
-#define SHMEM_PHYSICAL_ADDR 0x1c050000
+struct shmem_irq_config {
+ unsigned long factory_addr; /* from gvm settings when provided */
+ int irq; /* from gvm settings when provided */
+};
+
+/*
+ * this is for platform does not provide probe features. the size should match
+ * hab device side (all mmids)
+ */
+static struct shmem_irq_config pchan_factory_settings[] = {
+ {0x1b000000, 7},
+ {0x1b001000, 8},
+ {0x1b002000, 9},
+ {0x1b003000, 10},
+ {0x1b004000, 11},
+ {0x1b005000, 12},
+ {0x1b006000, 13},
+ {0x1b007000, 14},
+ {0x1b008000, 15},
+ {0x1b009000, 16},
+ {0x1b00a000, 17},
+ {0x1b00b000, 18},
+ {0x1b00c000, 19},
+ {0x1b00d000, 20},
+ {0x1b00e000, 21},
+ {0x1b00f000, 22},
+ {0x1b010000, 23},
+ {0x1b011000, 24},
+ {0x1b012000, 25},
+ {0x1b013000, 26},
+
+};
+
+static struct qvm_plugin_info {
+ struct shmem_irq_config *pchan_settings;
+ int setting_size;
+ int curr;
+ int probe_cnt;
+} qvm_priv_info = {
+ pchan_factory_settings,
+ ARRAY_SIZE(pchan_factory_settings),
+ 0,
+ ARRAY_SIZE(pchan_factory_settings)
+};
static irqreturn_t shm_irq_handler(int irq, void *_pchan)
{
@@ -43,22 +85,22 @@ static irqreturn_t shm_irq_handler(int irq, void *_pchan)
return rc;
}
+/*
+ * this is only for guest
+ */
static uint64_t get_guest_factory_paddr(struct qvm_channel *dev,
- const char *name, uint32_t pages)
+ unsigned long factory_addr, int irq, const char *name, uint32_t pages)
{
int i;
- dev->guest_factory = ioremap(SHMEM_PHYSICAL_ADDR, PAGE_SIZE);
-
- if (!dev->guest_factory) {
- pr_err("Couldn't map guest_factory\n");
- return 0;
- }
+ pr_debug("name = %s, factory paddr = 0x%lx, irq %d, pages %d\n",
+ name, factory_addr, irq, pages);
+ dev->guest_factory = (struct guest_shm_factory *)factory_addr;
if (dev->guest_factory->signature != GUEST_SHM_SIGNATURE) {
- pr_err("shmem factory signature incorrect: %ld != %lu\n",
- GUEST_SHM_SIGNATURE, dev->guest_factory->signature);
- iounmap(dev->guest_factory);
+ pr_err("signature error: %ld != %llu, factory addr %lx\n",
+ GUEST_SHM_SIGNATURE, dev->guest_factory->signature,
+ factory_addr);
return 0;
}
@@ -77,16 +119,22 @@ static uint64_t get_guest_factory_paddr(struct qvm_channel *dev,
/* See if we successfully created/attached to the region. */
if (dev->guest_factory->status != GSS_OK) {
pr_err("create failed: %d\n", dev->guest_factory->status);
- iounmap(dev->guest_factory);
return 0;
}
- pr_debug("shm creation size %x\n", dev->guest_factory->size);
+ pr_debug("shm creation size %x, paddr=%llx, vector %d, dev %pK\n",
+ dev->guest_factory->size,
+ dev->guest_factory->shmem,
+ dev->guest_intr,
+ dev);
+
+ dev->factory_addr = factory_addr;
+ dev->irq = irq;
return dev->guest_factory->shmem;
}
-static int create_dispatcher(struct physical_channel *pchan, int id)
+static int create_dispatcher(struct physical_channel *pchan)
{
struct qvm_channel *dev = (struct qvm_channel *)pchan->hyp_data;
int ret;
@@ -94,21 +142,45 @@ static int create_dispatcher(struct physical_channel *pchan, int id)
tasklet_init(&dev->task, physical_channel_rx_dispatch,
(unsigned long) pchan);
- ret = request_irq(hab_driver.irq, shm_irq_handler, IRQF_SHARED,
- hab_driver.devp[id].name, pchan);
+ pr_debug("request_irq: irq = %d, pchan name = %s",
+ dev->irq, pchan->name);
+ ret = request_irq(dev->irq, shm_irq_handler, IRQF_SHARED,
+ pchan->name, pchan);
if (ret)
pr_err("request_irq for %s failed: %d\n",
- hab_driver.devp[id].name, ret);
+ pchan->name, ret);
return ret;
}
-static struct physical_channel *habhyp_commdev_alloc(int id)
+void hab_pipe_reset(struct physical_channel *pchan)
{
- struct qvm_channel *dev;
- struct physical_channel *pchan = NULL;
- int ret = 0, channel = 0;
+ struct hab_pipe_endpoint *pipe_ep;
+ struct qvm_channel *dev = (struct qvm_channel *)pchan->hyp_data;
+
+ pipe_ep = hab_pipe_init(dev->pipe, PIPE_SHMEM_SIZE,
+ pchan->is_be ? 0 : 1);
+ if (dev->pipe_ep != pipe_ep)
+ pr_warn("The pipe endpoint must not change\n");
+}
+
+/*
+ * allocate hypervisor plug-in specific resource for pchan, and call hab pchan
+ * alloc common function. hab driver struct is directly accessed.
+ * commdev: pointer to store the pchan address
+ * id: index to hab_device (mmids)
+ * is_be: pchan local endpoint role
+ * name: pchan name
+ * return: status 0: success, otherwise: failures
+ */
+int habhyp_commdev_alloc(void **commdev, int is_be, char *name,
+ int vmid_remote, struct hab_device *mmid_device)
+{
+ struct qvm_channel *dev = NULL;
+ struct qvm_plugin_info *qvm_priv = hab_driver.hyp_priv;
+ struct physical_channel **pchan = (struct physical_channel **)commdev;
+ int ret = 0, coid = 0, channel = 0;
char *shmdata;
uint32_t pipe_alloc_size =
hab_pipe_calc_required_bytes(PIPE_SHMEM_SIZE);
@@ -119,15 +191,27 @@ static struct physical_channel *habhyp_commdev_alloc(int id)
int total_pages;
struct page **pages;
+ pr_debug("habhyp_commdev_alloc: pipe_alloc_size is %d\n",
+ pipe_alloc_size);
+
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
spin_lock_init(&dev->io_lock);
paddr = get_guest_factory_paddr(dev,
- hab_driver.devp[id].name,
+ qvm_priv->pchan_settings[qvm_priv->curr].factory_addr,
+ qvm_priv->pchan_settings[qvm_priv->curr].irq,
+ name,
pipe_alloc_pages);
+ qvm_priv->curr++;
+ if (qvm_priv->curr > qvm_priv->probe_cnt) {
+ pr_err("factory setting %d overflow probed cnt %d\n",
+ qvm_priv->curr, qvm_priv->probe_cnt);
+ ret = -1;
+ goto err;
+ }
total_pages = dev->guest_factory->size + 1;
pages = kmalloc_array(total_pages, sizeof(struct page *), GFP_KERNEL);
@@ -147,72 +231,138 @@ static struct physical_channel *habhyp_commdev_alloc(int id)
}
shmdata = (char *)dev->guest_ctrl + PAGE_SIZE;
+
+ pr_debug("ctrl page 0x%llx mapped at 0x%pK, idx %d\n",
+ paddr, dev->guest_ctrl, dev->guest_ctrl->idx);
+ pr_debug("data buffer mapped at 0x%pK\n", shmdata);
dev->idx = dev->guest_ctrl->idx;
kfree(pages);
dev->pipe = (struct hab_pipe *) shmdata;
+ pr_debug("\"%s\": pipesize %d, addr 0x%pK, be %d\n", name,
+ pipe_alloc_size, dev->pipe, is_be);
dev->pipe_ep = hab_pipe_init(dev->pipe, PIPE_SHMEM_SIZE,
- dev->be ? 0 : 1);
-
- pchan = hab_pchan_alloc(&hab_driver.devp[id], dev->be);
- if (!pchan) {
+ is_be ? 0 : 1);
+ /* newly created pchan is added to mmid device list */
+ *pchan = hab_pchan_alloc(mmid_device, vmid_remote);
+ if (!(*pchan)) {
ret = -ENOMEM;
goto err;
}
- pchan->closed = 0;
- pchan->hyp_data = (void *)dev;
+ (*pchan)->closed = 0;
+ (*pchan)->hyp_data = (void *)dev;
+ strlcpy((*pchan)->name, name, MAX_VMID_NAME_SIZE);
+ (*pchan)->is_be = is_be;
dev->channel = channel;
+ dev->coid = coid;
- ret = create_dispatcher(pchan, id);
- if (ret < 0)
+ ret = create_dispatcher(*pchan);
+ if (ret)
goto err;
- return pchan;
+ return ret;
err:
kfree(dev);
- if (pchan)
- hab_pchan_put(pchan);
+ if (*pchan)
+ hab_pchan_put(*pchan);
pr_err("habhyp_commdev_alloc failed: %d\n", ret);
- return ERR_PTR(ret);
+ return ret;
+}
+
+int habhyp_commdev_dealloc(void *commdev)
+{
+ struct physical_channel *pchan = (struct physical_channel *)commdev;
+ struct qvm_channel *dev = pchan->hyp_data;
+
+
+ kfree(dev);
+ hab_pchan_put(pchan);
+ return 0;
}
int hab_hypervisor_register(void)
{
- int ret = 0, i;
+ int ret = 0;
hab_driver.b_server_dom = 0;
- /*
- * Can still attempt to instantiate more channels if one fails.
- * Others can be retried later.
- */
- for (i = 0; i < hab_driver.ndevices; i++) {
- if (IS_ERR(habhyp_commdev_alloc(i)))
- ret = -EAGAIN;
- }
+ pr_info("initializing for %s VM\n", hab_driver.b_server_dom ?
+ "host" : "guest");
+
+ hab_driver.hyp_priv = &qvm_priv_info;
return ret;
}
void hab_hypervisor_unregister(void)
{
+ int status, i;
+
+ for (i = 0; i < hab_driver.ndevices; i++) {
+ struct hab_device *dev = &hab_driver.devp[i];
+ struct physical_channel *pchan;
+
+ list_for_each_entry(pchan, &dev->pchannels, node) {
+ status = habhyp_commdev_dealloc(pchan);
+ if (status) {
+ pr_err("failed to free pchan %pK, i %d, ret %d\n",
+ pchan, i, status);
+ }
+ }
+ }
+
+ qvm_priv_info.probe_cnt = 0;
+ qvm_priv_info.curr = 0;
}
static int hab_shmem_probe(struct platform_device *pdev)
{
- int irq = platform_get_irq(pdev, 0);
+ int irq = 0;
+ struct resource *mem;
+ void *shmem_base = NULL;
+ int ret = 0;
+
+ /* hab in one GVM will not have pchans more than one VM could allowed */
+ if (qvm_priv_info.probe_cnt >= hab_driver.ndevices) {
+ pr_err("no more channel, current %d, maximum %d\n",
+ qvm_priv_info.probe_cnt, hab_driver.ndevices);
+ return -ENODEV;
+ }
- if (irq > 0)
- hab_driver.irq = irq;
- else
- hab_driver.irq = DEFAULT_HAB_SHMEM_IRQ;
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ pr_err("no interrupt for the channel %d, error %d\n",
+ qvm_priv_info.probe_cnt, irq);
+ return irq;
+ }
+ qvm_priv_info.pchan_settings[qvm_priv_info.probe_cnt].irq = irq;
- return 0;
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem) {
+ pr_err("can not get io mem resource for channel %d\n",
+ qvm_priv_info.probe_cnt);
+ return -EINVAL;
+ }
+ shmem_base = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(shmem_base)) {
+ pr_err("ioremap failed for channel %d, mem %pK\n",
+ qvm_priv_info.probe_cnt, mem);
+ return -EINVAL;
+ }
+ qvm_priv_info.pchan_settings[qvm_priv_info.probe_cnt].factory_addr
+ = (unsigned long)((uintptr_t)shmem_base);
+
+ pr_debug("pchan idx %d, hab irq=%d shmem_base=%pK, mem %pK\n",
+ qvm_priv_info.probe_cnt, irq, shmem_base, mem);
+
+ qvm_priv_info.probe_cnt++;
+
+ return ret;
}
static int hab_shmem_remove(struct platform_device *pdev)
@@ -220,6 +370,23 @@ static int hab_shmem_remove(struct platform_device *pdev)
return 0;
}
+static void hab_shmem_shutdown(struct platform_device *pdev)
+{
+ int i;
+ struct qvm_channel *dev;
+ struct physical_channel *pchan;
+ struct hab_device hab_dev;
+
+ for (i = 0; i < hab_driver.ndevices; i++) {
+ hab_dev = hab_driver.devp[i];
+ pr_debug("detaching %s\n", hab_dev.name);
+ list_for_each_entry(pchan, &hab_dev.pchannels, node) {
+ dev = (struct qvm_channel *)pchan->hyp_data;
+ dev->guest_ctrl->detach = 0;
+ }
+ }
+}
+
static const struct of_device_id hab_shmem_match_table[] = {
{.compatible = "qvm,guest_shm"},
{},
@@ -228,6 +395,7 @@ static const struct of_device_id hab_shmem_match_table[] = {
static struct platform_driver hab_shmem_driver = {
.probe = hab_shmem_probe,
.remove = hab_shmem_remove,
+ .shutdown = hab_shmem_shutdown,
.driver = {
.name = "hab_shmem",
.of_match_table = of_match_ptr(hab_shmem_match_table),
@@ -236,12 +404,14 @@ static struct platform_driver hab_shmem_driver = {
static int __init hab_shmem_init(void)
{
+ qvm_priv_info.probe_cnt = 0;
return platform_driver_register(&hab_shmem_driver);
}
static void __exit hab_shmem_exit(void)
{
platform_driver_unregister(&hab_shmem_driver);
+ qvm_priv_info.probe_cnt = 0;
}
core_initcall(hab_shmem_init);
diff --git a/drivers/soc/qcom/hab/hab_qvm.h b/drivers/soc/qcom/hab/hab_qvm.h
index e94b82f87942..b483f4c21331 100644
--- a/drivers/soc/qcom/hab/hab_qvm.h
+++ b/drivers/soc/qcom/hab/hab_qvm.h
@@ -30,6 +30,7 @@ struct qvm_channel {
struct tasklet_struct task;
struct guest_shm_factory *guest_factory;
struct guest_shm_control *guest_ctrl;
+ /* cached guest ctrl idx value to prevent trap when accessed */
uint32_t idx;
int channel;
@@ -37,11 +38,15 @@ struct qvm_channel {
unsigned int guest_intr;
unsigned int guest_iid;
+ unsigned int factory_addr;
+ unsigned int irq;
+
};
/* Shared mem size in each direction for communication pipe */
#define PIPE_SHMEM_SIZE (128 * 1024)
void *qnx_hyp_rx_dispatch(void *data);
+void hab_pipe_reset(struct physical_channel *pchan);
#endif /* __HAB_QNX_H */
diff --git a/drivers/soc/qcom/hab/hab_vchan.c b/drivers/soc/qcom/hab/hab_vchan.c
index 75a3fad68ab5..91ae173f7e83 100644
--- a/drivers/soc/qcom/hab/hab_vchan.c
+++ b/drivers/soc/qcom/hab/hab_vchan.c
@@ -40,6 +40,9 @@ hab_vchan_alloc(struct uhab_context *ctx, struct physical_channel *pchan)
hab_pchan_get(pchan);
vchan->pchan = pchan;
+ write_lock(&pchan->vchans_lock);
+ list_add_tail(&vchan->pnode, &pchan->vchannels);
+ write_unlock(&pchan->vchans_lock);
vchan->id = ((id << HAB_VCID_ID_SHIFT) & HAB_VCID_ID_MASK) |
((pchan->habdev->id << HAB_VCID_MMID_SHIFT) &
HAB_VCID_MMID_MASK) |
@@ -66,19 +69,22 @@ hab_vchan_free(struct kref *ref)
struct virtual_channel *vchan =
container_of(ref, struct virtual_channel, refcount);
struct hab_message *message, *msg_tmp;
- struct export_desc *exp;
+ struct export_desc *exp, *exp_tmp;
struct physical_channel *pchan = vchan->pchan;
struct uhab_context *ctx = vchan->ctx;
+ struct virtual_channel *vc, *vc_tmp;
+ spin_lock_bh(&vchan->rx_lock);
list_for_each_entry_safe(message, msg_tmp, &vchan->rx_list, node) {
list_del(&message->node);
hab_msg_free(message);
}
+ spin_unlock_bh(&vchan->rx_lock);
do {
found = 0;
write_lock(&ctx->exp_lock);
- list_for_each_entry(exp, &ctx->exp_whse, node) {
+ list_for_each_entry_safe(exp, exp_tmp, &ctx->exp_whse, node) {
if (exp->vcid_local == vchan->id) {
list_del(&exp->node);
found = 1;
@@ -95,7 +101,7 @@ hab_vchan_free(struct kref *ref)
do {
found = 0;
spin_lock_bh(&ctx->imp_lock);
- list_for_each_entry(exp, &ctx->imp_whse, node) {
+ list_for_each_entry_safe(exp, exp_tmp, &ctx->imp_whse, node) {
if (exp->vcid_remote == vchan->id) {
list_del(&exp->node);
found = 1;
@@ -117,6 +123,15 @@ hab_vchan_free(struct kref *ref)
idr_remove(&pchan->vchan_idr, HAB_VCID_GET_ID(vchan->id));
spin_unlock_bh(&pchan->vid_lock);
+ write_lock(&pchan->vchans_lock);
+ list_for_each_entry_safe(vc, vc_tmp, &pchan->vchannels, pnode) {
+ if (vchan == vc) {
+ list_del(&vc->pnode);
+ break;
+ }
+ }
+ write_unlock(&pchan->vchans_lock);
+
hab_pchan_put(pchan);
hab_ctx_put(ctx);
@@ -124,14 +139,17 @@ hab_vchan_free(struct kref *ref)
}
struct virtual_channel*
-hab_vchan_get(struct physical_channel *pchan, uint32_t vchan_id)
+hab_vchan_get(struct physical_channel *pchan, struct hab_header *header)
{
struct virtual_channel *vchan;
+ uint32_t vchan_id = HAB_HEADER_GET_ID(*header);
+ uint32_t session_id = HAB_HEADER_GET_SESSION_ID(*header);
spin_lock_bh(&pchan->vid_lock);
vchan = idr_find(&pchan->vchan_idr, HAB_VCID_GET_ID(vchan_id));
if (vchan)
- if (!kref_get_unless_zero(&vchan->refcount))
+ if ((vchan->session_id != session_id) ||
+ (!kref_get_unless_zero(&vchan->refcount)))
vchan = NULL;
spin_unlock_bh(&pchan->vid_lock);
@@ -146,6 +164,17 @@ void hab_vchan_stop(struct virtual_channel *vchan)
}
}
+void hab_vchans_stop(struct physical_channel *pchan)
+{
+ struct virtual_channel *vchan, *tmp;
+
+ read_lock(&pchan->vchans_lock);
+ list_for_each_entry_safe(vchan, tmp, &pchan->vchannels, pnode) {
+ hab_vchan_stop(vchan);
+ }
+ read_unlock(&pchan->vchans_lock);
+}
+
void hab_vchan_stop_notify(struct virtual_channel *vchan)
{
hab_send_close_msg(vchan);
diff --git a/drivers/soc/qcom/hab/khab.c b/drivers/soc/qcom/hab/khab.c
index f7499773ae42..05e6aa2fa7ca 100644
--- a/drivers/soc/qcom/hab/khab.c
+++ b/drivers/soc/qcom/hab/khab.c
@@ -117,7 +117,7 @@ int32_t habmm_import(int32_t handle, void **buff_shared, uint32_t size_bytes,
param.flags = flags;
ret = hab_mem_import(hab_driver.kctx, &param, 1);
- if (!IS_ERR(ret))
+ if (!ret)
*buff_shared = (void *)(uintptr_t)param.kva;
return ret;
diff --git a/drivers/soc/qcom/hab/qvm_comm.c b/drivers/soc/qcom/hab/qvm_comm.c
index 20a631e13794..41e34be9ac21 100644
--- a/drivers/soc/qcom/hab/qvm_comm.c
+++ b/drivers/soc/qcom/hab/qvm_comm.c
@@ -21,6 +21,7 @@ static inline void habhyp_notify(void *commdev)
dev->guest_ctrl->notify = ~0;
}
+/* this is only used to read payload, never the head! */
int physical_channel_read(struct physical_channel *pchan,
void *payload,
size_t read_size)
@@ -33,6 +34,8 @@ int physical_channel_read(struct physical_channel *pchan,
return 0;
}
+#define HAB_HEAD_SIGNATURE 0xBEE1BEE1
+
int physical_channel_send(struct physical_channel *pchan,
struct hab_header *header,
void *payload)
@@ -40,6 +43,7 @@ int physical_channel_send(struct physical_channel *pchan,
int sizebytes = HAB_HEADER_GET_SIZE(*header);
struct qvm_channel *dev = (struct qvm_channel *)pchan->hyp_data;
int total_size = sizeof(*header) + sizebytes;
+ struct timeval tv;
if (total_size > dev->pipe_ep->tx_info.sh_buf->size)
return -EINVAL; /* too much data for ring */
@@ -53,6 +57,8 @@ int physical_channel_send(struct physical_channel *pchan,
return -EAGAIN; /* not enough free space */
}
+ header->signature = HAB_HEAD_SIGNATURE;
+
if (hab_pipe_write(dev->pipe_ep,
(unsigned char *)header,
sizeof(*header)) != sizeof(*header)) {
@@ -60,6 +66,12 @@ int physical_channel_send(struct physical_channel *pchan,
return -EIO;
}
+ if (HAB_HEADER_GET_TYPE(*header) == HAB_PAYLOAD_TYPE_PROFILE) {
+ do_gettimeofday(&tv);
+ ((uint64_t *)payload)[0] = tv.tv_sec;
+ ((uint64_t *)payload)[1] = tv.tv_usec;
+ }
+
if (sizebytes) {
if (hab_pipe_write(dev->pipe_ep,
(unsigned char *)payload,
@@ -89,6 +101,14 @@ void physical_channel_rx_dispatch(unsigned long data)
sizeof(header)) != sizeof(header))
break; /* no data available */
+ if (header.signature != HAB_HEAD_SIGNATURE) {
+ pr_err("HAB signature mismatch, expect %X, received %X, id_type_size %X, session %X, sequence %X\n",
+ HAB_HEAD_SIGNATURE, header.signature,
+ header.id_type_size,
+ header.session_id,
+ header.sequence);
+ }
+
hab_msg_recv(pchan, &header);
}
spin_unlock_bh(&pchan->rxbuf_lock);
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index e7f83ed1307b..7f71824d9548 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -297,6 +297,7 @@ enum icnss_driver_state {
ICNSS_SHUTDOWN_DONE,
ICNSS_HOST_TRIGGERED_PDR,
ICNSS_FW_DOWN,
+ ICNSS_DRIVER_UNLOADING,
};
struct ce_irq_list {
@@ -562,6 +563,12 @@ static int icnss_assign_msa_perm_all(struct icnss_priv *priv,
int i;
enum icnss_msa_perm old_perm;
+ if (priv->nr_mem_region > QMI_WLFW_MAX_NUM_MEMORY_REGIONS_V01) {
+ icnss_pr_err("Invalid memory region len %d\n",
+ priv->nr_mem_region);
+ return -EINVAL;
+ }
+
for (i = 0; i < priv->nr_mem_region; i++) {
old_perm = priv->mem_region[i].perm;
ret = icnss_assign_msa_perm(&priv->mem_region[i], new_perm);
@@ -1167,6 +1174,16 @@ bool icnss_is_fw_ready(void)
}
EXPORT_SYMBOL(icnss_is_fw_ready);
+bool icnss_is_fw_down(void)
+{
+ if (!penv)
+ return false;
+ else
+ return test_bit(ICNSS_FW_DOWN, &penv->state);
+}
+EXPORT_SYMBOL(icnss_is_fw_down);
+
+
int icnss_power_off(struct device *dev)
{
struct icnss_priv *priv = dev_get_drvdata(dev);
@@ -2174,6 +2191,12 @@ static int icnss_pd_restart_complete(struct icnss_priv *priv)
if (!priv->ops || !priv->ops->reinit)
goto out;
+ if (test_bit(ICNSS_FW_DOWN, &priv->state)) {
+ icnss_pr_err("FW is in bad state, state: 0x%lx\n",
+ priv->state);
+ goto out;
+ }
+
if (!test_bit(ICNSS_DRIVER_PROBED, &priv->state))
goto call_probe;
@@ -2291,9 +2314,11 @@ static int icnss_driver_event_unregister_driver(void *data)
goto out;
}
+ set_bit(ICNSS_DRIVER_UNLOADING, &penv->state);
if (penv->ops)
penv->ops->remove(&penv->pdev->dev);
+ clear_bit(ICNSS_DRIVER_UNLOADING, &penv->state);
clear_bit(ICNSS_DRIVER_PROBED, &penv->state);
penv->ops = NULL;
@@ -2316,8 +2341,10 @@ static int icnss_call_driver_remove(struct icnss_priv *priv)
if (!priv->ops || !priv->ops->remove)
return 0;
+ set_bit(ICNSS_DRIVER_UNLOADING, &penv->state);
penv->ops->remove(&priv->pdev->dev);
+ clear_bit(ICNSS_DRIVER_UNLOADING, &penv->state);
clear_bit(ICNSS_DRIVER_PROBED, &priv->state);
icnss_hw_power_off(penv);
@@ -2523,7 +2550,8 @@ static int icnss_modem_notifier_nb(struct notifier_block *nb,
icnss_ignore_qmi_timeout(true);
fw_down_data.crashed = !!notif->crashed;
- if (test_bit(ICNSS_FW_READY, &priv->state))
+ if (test_bit(ICNSS_FW_READY, &priv->state) &&
+ !test_bit(ICNSS_DRIVER_UNLOADING, &priv->state))
icnss_call_driver_uevent(priv,
ICNSS_UEVENT_FW_DOWN,
&fw_down_data);
@@ -2667,7 +2695,8 @@ event_post:
icnss_ignore_qmi_timeout(true);
fw_down_data.crashed = event_data->crashed;
- if (test_bit(ICNSS_FW_READY, &priv->state))
+ if (test_bit(ICNSS_FW_READY, &priv->state) &&
+ !test_bit(ICNSS_DRIVER_UNLOADING, &priv->state))
icnss_call_driver_uevent(priv,
ICNSS_UEVENT_FW_DOWN,
&fw_down_data);
@@ -3885,6 +3914,8 @@ static int icnss_stats_show_state(struct seq_file *s, struct icnss_priv *priv)
case ICNSS_FW_DOWN:
seq_puts(s, "FW DOWN");
continue;
+ case ICNSS_DRIVER_UNLOADING:
+ seq_puts(s, "DRIVER UNLOADING");
}
seq_printf(s, "UNKNOWN-%d", i);
diff --git a/drivers/soc/qcom/msm_glink_pkt.c b/drivers/soc/qcom/msm_glink_pkt.c
index 2a2d213f8ca0..ecc633749204 100644
--- a/drivers/soc/qcom/msm_glink_pkt.c
+++ b/drivers/soc/qcom/msm_glink_pkt.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -572,8 +572,10 @@ static void glink_pkt_notify_state_worker(struct work_struct *work)
mutex_lock(&devp->ch_lock);
devp->ch_state = event;
if (event == GLINK_CONNECTED) {
- if (!devp->handle)
- devp->handle = handle;
+ if (!devp->handle) {
+ GLINK_PKT_ERR("%s: Invalid device handle\n", __func__);
+ goto exit;
+ }
devp->in_reset = 0;
wake_up_interruptible(&devp->ch_opened_wait_queue);
} else if (event == GLINK_REMOTE_DISCONNECTED) {
@@ -585,6 +587,7 @@ static void glink_pkt_notify_state_worker(struct work_struct *work)
devp->handle = NULL;
wake_up_interruptible(&devp->ch_closed_wait_queue);
}
+exit:
mutex_unlock(&devp->ch_lock);
kfree(work_item);
}
diff --git a/drivers/soc/qcom/peripheral-loader.c b/drivers/soc/qcom/peripheral-loader.c
index d82c36480159..6e153500f639 100644
--- a/drivers/soc/qcom/peripheral-loader.c
+++ b/drivers/soc/qcom/peripheral-loader.c
@@ -722,6 +722,12 @@ static void pil_clear_segment(struct pil_desc *desc)
/* Clear memory so that unauthorized ELF code is not left behind */
buf = desc->map_fw_mem(priv->region_start, (priv->region_end -
priv->region_start), map_data);
+
+ if (!buf) {
+ pil_err(desc, "Failed to map memory\n");
+ return;
+ }
+
pil_memset_io(buf, 0, (priv->region_end - priv->region_start));
desc->unmap_fw_mem(buf, (priv->region_end - priv->region_start),
map_data);
diff --git a/drivers/soc/qcom/pil-msa.c b/drivers/soc/qcom/pil-msa.c
index 7ede3e29dcf9..cc69e6d68f16 100644
--- a/drivers/soc/qcom/pil-msa.c
+++ b/drivers/soc/qcom/pil-msa.c
@@ -601,6 +601,7 @@ int pil_mss_reset_load_mba(struct pil_desc *pil)
}
drv->dp_size = dp_fw->size;
drv->mba_dp_size += drv->dp_size;
+ drv->mba_dp_size = ALIGN(drv->mba_dp_size, SZ_4K);
}
mba_dp_virt = dma_alloc_attrs(dma_dev, drv->mba_dp_size, &mba_dp_phys,
diff --git a/drivers/soc/qcom/qdsp6v2/apr.c b/drivers/soc/qcom/qdsp6v2/apr.c
index 3791169ec0ac..b1afd02b49bf 100644
--- a/drivers/soc/qcom/qdsp6v2/apr.c
+++ b/drivers/soc/qcom/qdsp6v2/apr.c
@@ -679,9 +679,10 @@ void apr_cb_func(void *buf, int len, void *priv)
}
temp_port = ((data.dest_port >> 8) * 8) + (data.dest_port & 0xFF);
- pr_debug("port = %d t_port = %d\n", data.src_port, temp_port);
- if (c_svc->port_cnt && c_svc->port_fn[temp_port])
- c_svc->port_fn[temp_port](&data, c_svc->port_priv[temp_port]);
+ if (((temp_port >= 0) && (temp_port < APR_MAX_PORTS))
+ && (c_svc->port_cnt && c_svc->port_fn[temp_port]))
+ c_svc->port_fn[temp_port](&data,
+ c_svc->port_priv[temp_port]);
else if (c_svc->fn)
c_svc->fn(&data, c_svc->priv);
else
diff --git a/drivers/soc/qcom/qdsp6v2/apr_vm.c b/drivers/soc/qcom/qdsp6v2/apr_vm.c
index d0ea7b22717a..56592ac91e1b 100644
--- a/drivers/soc/qcom/qdsp6v2/apr_vm.c
+++ b/drivers/soc/qcom/qdsp6v2/apr_vm.c
@@ -514,7 +514,8 @@ static int apr_vm_cb_process_evt(char *buf, int len)
temp_port = ((data.dest_port >> 8) * 8) + (data.dest_port & 0xFF);
pr_debug("port = %d t_port = %d\n", data.src_port, temp_port);
- if (c_svc->port_cnt && c_svc->port_fn[temp_port])
+ if (((temp_port >= 0) && (temp_port < APR_MAX_PORTS))
+ && (c_svc->port_cnt && c_svc->port_fn[temp_port]))
c_svc->port_fn[temp_port](&data, c_svc->port_priv[temp_port]);
else if (c_svc->fn)
c_svc->fn(&data, c_svc->priv);
diff --git a/drivers/soc/qcom/qdsp6v2/msm_audio_ion_vm.c b/drivers/soc/qcom/qdsp6v2/msm_audio_ion_vm.c
index afc40461e8e8..7ef16ad5575b 100644
--- a/drivers/soc/qcom/qdsp6v2/msm_audio_ion_vm.c
+++ b/drivers/soc/qcom/qdsp6v2/msm_audio_ion_vm.c
@@ -137,7 +137,7 @@ static int msm_audio_ion_smmu_map(struct ion_client *client,
mutex_unlock(&(msm_audio_ion_data.smmu_map_mutex));
if (cmd_rsp_size != sizeof(cmd_rsp)) {
- pr_err("%s: invalid size for cmd rsp %lu, expected %lu\n",
+ pr_err("%s: invalid size for cmd rsp %u, expected %zu\n",
__func__, cmd_rsp_size, sizeof(cmd_rsp));
rc = -EIO;
goto err;
@@ -218,7 +218,7 @@ static int msm_audio_ion_smmu_unmap(struct ion_client *client,
}
if (cmd_rsp_size != sizeof(cmd_rsp)) {
- pr_err("%s: invalid size for cmd rsp %lu\n",
+ pr_err("%s: invalid size for cmd rsp %u\n",
__func__, cmd_rsp_size);
rc = -EIO;
goto err;
diff --git a/drivers/soc/qcom/rpm-smd-debug.c b/drivers/soc/qcom/rpm-smd-debug.c
index 6ef90b23aed5..2b66d6d5434d 100644
--- a/drivers/soc/qcom/rpm-smd-debug.c
+++ b/drivers/soc/qcom/rpm-smd-debug.c
@@ -90,23 +90,23 @@ static ssize_t rsc_ops_write(struct file *fp, const char __user *user_buffer,
cmp += pos;
if (sscanf(cmp, "%5s %n", key_str, &pos) != 1) {
pr_err("Invalid number of arguments passed\n");
- goto err;
+ goto err_request;
}
if (strlen(key_str) > 4) {
pr_err("Key value cannot be more than 4 charecters");
- goto err;
+ goto err_request;
}
key = string_to_uint(key_str);
if (!key) {
pr_err("Key values entered incorrectly\n");
- goto err;
+ goto err_request;
}
cmp += pos;
if (sscanf(cmp, "%u %n", &data, &pos) != 1) {
pr_err("Invalid number of arguments passed\n");
- goto err;
+ goto err_request;
}
if (msm_rpm_add_kvp_data(req, key,
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 1f4a1f02a2cd..fec1ef2b1748 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -315,6 +315,7 @@ config SPI_FSL_SPI
config SPI_FSL_DSPI
tristate "Freescale DSPI controller"
select REGMAP_MMIO
+ depends on HAS_DMA
depends on SOC_VF610 || SOC_LS1021A || ARCH_LAYERSCAPE || COMPILE_TEST
help
This enables support for the Freescale DSPI controller in master
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index d22de4c8c399..3de39bd794b6 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -863,7 +863,7 @@ static int sh_msiof_transfer_one(struct spi_master *master,
break;
copy32 = copy_bswap32;
} else if (bits <= 16) {
- if (l & 1)
+ if (l & 3)
break;
copy32 = copy_wswap32;
} else {
diff --git a/drivers/spi/spi_qsd.c b/drivers/spi/spi_qsd.c
index 73396072a052..5c56001e36db 100644
--- a/drivers/spi/spi_qsd.c
+++ b/drivers/spi/spi_qsd.c
@@ -54,6 +54,7 @@ static inline void msm_spi_dma_unmap_buffers(struct msm_spi *dd);
static int get_local_resources(struct msm_spi *dd);
static void put_local_resources(struct msm_spi *dd);
static void msm_spi_slv_setup(struct msm_spi *dd);
+static inline int msm_spi_wait_valid(struct msm_spi *dd);
static inline int msm_spi_configure_gsbi(struct msm_spi *dd,
struct platform_device *pdev)
@@ -84,18 +85,22 @@ static inline int msm_spi_configure_gsbi(struct msm_spi *dd,
return 0;
}
-static inline void msm_spi_register_init(struct msm_spi *dd)
+static inline int msm_spi_register_init(struct msm_spi *dd)
{
- if (dd->pdata->is_slv_ctrl)
+ if (dd->pdata->is_slv_ctrl) {
writel_relaxed(0x00000002, dd->base + SPI_SW_RESET);
- else
+ if (msm_spi_wait_valid(dd))
+ return -EIO;
+ } else {
writel_relaxed(0x00000001, dd->base + SPI_SW_RESET);
+ }
msm_spi_set_state(dd, SPI_OP_STATE_RESET);
writel_relaxed(0x00000000, dd->base + SPI_OPERATIONAL);
writel_relaxed(0x00000000, dd->base + SPI_CONFIG);
writel_relaxed(0x00000000, dd->base + SPI_IO_MODES);
if (dd->qup_ver)
writel_relaxed(0x00000000, dd->base + QUP_OPERATIONAL_MASK);
+ return 0;
}
static int msm_spi_pinctrl_init(struct msm_spi *dd)
@@ -1561,10 +1566,11 @@ static inline void msm_spi_set_cs(struct spi_device *spi, bool set_flag)
pm_runtime_put_autosuspend(dd->dev);
}
-static void reset_core(struct msm_spi *dd)
+static int reset_core(struct msm_spi *dd)
{
u32 spi_ioc;
- msm_spi_register_init(dd);
+ if (msm_spi_register_init(dd))
+ return -EIO;
/*
* The SPI core generates a bogus input overrun error on some targets,
* when a transition from run to reset state occurs and if the FIFO has
@@ -1581,6 +1587,7 @@ static void reset_core(struct msm_spi *dd)
*/
mb();
msm_spi_set_state(dd, SPI_OP_STATE_RESET);
+ return 0;
}
static void put_local_resources(struct msm_spi *dd)
@@ -1694,7 +1701,11 @@ static int msm_spi_transfer_one(struct spi_master *master,
return -EINVAL;
}
- reset_core(dd);
+ if (reset_core(dd)) {
+ mutex_unlock(&dd->core_lock);
+ spi_finalize_current_message(master);
+ return -EIO;
+ }
if (dd->use_dma) {
msm_spi_bam_pipe_connect(dd, &dd->bam.prod,
&dd->bam.prod.config);
@@ -2450,7 +2461,8 @@ static int init_resources(struct platform_device *pdev)
}
}
- msm_spi_register_init(dd);
+ if (msm_spi_register_init(dd))
+ goto err_spi_state;
/*
* The SPI core generates a bogus input overrun error on some targets,
* when a transition from run to reset state occurs and if the FIFO has
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 067cd58375a4..f5a81fc48ffb 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -1561,6 +1561,11 @@ static int ion_sync_for_device(struct ion_client *client, int fd)
}
buffer = dmabuf->priv;
+ if (get_secure_vmid(buffer->flags) > 0) {
+ pr_err("%s: cannot sync a secure dmabuf\n", __func__);
+ dma_buf_put(dmabuf);
+ return -EINVAL;
+ }
dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
buffer->sg_table->nents, DMA_BIDIRECTIONAL);
dma_buf_put(dmabuf);
diff --git a/drivers/staging/android/ion/msm/msm_ion.c b/drivers/staging/android/ion/msm/msm_ion.c
index c2ef091d72ce..a1dd5ccc8109 100644
--- a/drivers/staging/android/ion/msm/msm_ion.c
+++ b/drivers/staging/android/ion/msm/msm_ion.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -352,7 +352,7 @@ int ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
if (!ION_IS_CACHED(flags))
return 0;
- if (flags & ION_FLAG_SECURE)
+ if (get_secure_vmid(flags) > 0)
return 0;
table = ion_sg_table(client, handle);
@@ -738,11 +738,11 @@ long msm_ion_custom_ioctl(struct ion_client *client,
down_read(&mm->mmap_sem);
- start = (unsigned long) data.flush_data.vaddr;
- end = (unsigned long) data.flush_data.vaddr
- + data.flush_data.length;
+ start = (unsigned long)data.flush_data.vaddr +
+ data.flush_data.offset;
+ end = start + data.flush_data.length;
- if (start && check_vaddr_bounds(start, end)) {
+ if (check_vaddr_bounds(start, end)) {
pr_err("%s: virtual address %pK is out of bounds\n",
__func__, data.flush_data.vaddr);
ret = -EINVAL;
diff --git a/drivers/staging/iio/cdc/ad7150.c b/drivers/staging/iio/cdc/ad7150.c
index e8d0ff2d5c9b..808d6ebf6c94 100644
--- a/drivers/staging/iio/cdc/ad7150.c
+++ b/drivers/staging/iio/cdc/ad7150.c
@@ -272,7 +272,7 @@ static int ad7150_write_event_config(struct iio_dev *indio_dev,
error_ret:
mutex_unlock(&chip->state_lock);
- return 0;
+ return ret;
}
static int ad7150_read_event_value(struct iio_dev *indio_dev,
diff --git a/drivers/staging/iio/trigger/iio-trig-bfin-timer.c b/drivers/staging/iio/trigger/iio-trig-bfin-timer.c
index 035dd456d7d6..737747354db6 100644
--- a/drivers/staging/iio/trigger/iio-trig-bfin-timer.c
+++ b/drivers/staging/iio/trigger/iio-trig-bfin-timer.c
@@ -259,7 +259,7 @@ out_free_irq:
out1:
iio_trigger_unregister(st->trig);
out:
- iio_trigger_put(st->trig);
+ iio_trigger_free(st->trig);
return ret;
}
@@ -272,7 +272,7 @@ static int iio_bfin_tmr_trigger_remove(struct platform_device *pdev)
peripheral_free(st->t->pin);
free_irq(st->irq, st);
iio_trigger_unregister(st->trig);
- iio_trigger_put(st->trig);
+ iio_trigger_free(st->trig);
return 0;
}
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
index 80f8ec529424..8ed4558238fc 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
@@ -1063,23 +1063,21 @@ struct hsm_action_item {
* \retval buffer
*/
static inline char *hai_dump_data_field(struct hsm_action_item *hai,
- char *buffer, int len)
+ char *buffer, size_t len)
{
- int i, sz, data_len;
+ int i, data_len;
char *ptr;
ptr = buffer;
- sz = len;
data_len = hai->hai_len - sizeof(*hai);
- for (i = 0 ; (i < data_len) && (sz > 0) ; i++) {
- int cnt;
-
- cnt = snprintf(ptr, sz, "%.2X",
- (unsigned char)hai->hai_data[i]);
- ptr += cnt;
- sz -= cnt;
+ for (i = 0; (i < data_len) && (len > 2); i++) {
+ snprintf(ptr, 3, "%02X", (unsigned char)hai->hai_data[i]);
+ ptr += 2;
+ len -= 2;
}
+
*ptr = '\0';
+
return buffer;
}
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
index 7f8c70056ffd..040553d6e316 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
@@ -550,6 +550,13 @@ struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle,
if (lock == NULL)
return NULL;
+ if (lock->l_export && lock->l_export->exp_failed) {
+ CDEBUG(D_INFO, "lock export failed: lock %p, exp %p\n",
+ lock, lock->l_export);
+ LDLM_LOCK_PUT(lock);
+ return NULL;
+ }
+
/* It's unlikely but possible that someone marked the lock as
* destroyed after we did handle2object on it */
if (flags == 0 && ((lock->l_flags & LDLM_FL_DESTROYED) == 0)) {
diff --git a/drivers/staging/lustre/lustre/llite/llite_mmap.c b/drivers/staging/lustre/lustre/llite/llite_mmap.c
index 7df978371c9a..44fffbd1bc74 100644
--- a/drivers/staging/lustre/lustre/llite/llite_mmap.c
+++ b/drivers/staging/lustre/lustre/llite/llite_mmap.c
@@ -402,15 +402,13 @@ static int ll_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
result = VM_FAULT_LOCKED;
break;
case -ENODATA:
+ case -EAGAIN:
case -EFAULT:
result = VM_FAULT_NOPAGE;
break;
case -ENOMEM:
result = VM_FAULT_OOM;
break;
- case -EAGAIN:
- result = VM_FAULT_RETRY;
- break;
default:
result = VM_FAULT_SIGBUS;
break;
diff --git a/drivers/staging/lustre/lustre/llite/rw26.c b/drivers/staging/lustre/lustre/llite/rw26.c
index 3da4c01e2159..adeefb31cbad 100644
--- a/drivers/staging/lustre/lustre/llite/rw26.c
+++ b/drivers/staging/lustre/lustre/llite/rw26.c
@@ -376,6 +376,10 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter,
if (!lli->lli_has_smd)
return -EBADF;
+ /* Check EOF by ourselves */
+ if (iov_iter_rw(iter) == READ && file_offset >= i_size_read(inode))
+ return 0;
+
/* FIXME: io smaller than PAGE_SIZE is broken on ia64 ??? */
if ((file_offset & ~CFS_PAGE_MASK) || (count & ~CFS_PAGE_MASK))
return -EINVAL;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/service.c b/drivers/staging/lustre/lustre/ptlrpc/service.c
index f45898f17793..6d3c25ccb297 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/service.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/service.c
@@ -1240,20 +1240,15 @@ static int ptlrpc_server_hpreq_init(struct ptlrpc_service_part *svcpt,
* it may hit swab race at LU-1044. */
if (req->rq_ops->hpreq_check) {
rc = req->rq_ops->hpreq_check(req);
- /**
- * XXX: Out of all current
- * ptlrpc_hpreq_ops::hpreq_check(), only
- * ldlm_cancel_hpreq_check() can return an error code;
- * other functions assert in similar places, which seems
- * odd. What also does not seem right is that handlers
- * for those RPCs do not assert on the same checks, but
- * rather handle the error cases. e.g. see
- * ost_rw_hpreq_check(), and ost_brw_read(),
- * ost_brw_write().
+ if (rc == -ESTALE) {
+ req->rq_status = rc;
+ ptlrpc_error(req);
+ }
+ /** can only return error,
+ * 0 for normal request,
+ * or 1 for high priority request
*/
- if (rc < 0)
- return rc;
- LASSERT(rc == 0 || rc == 1);
+ LASSERT(rc <= 1);
}
spin_lock_bh(&req->rq_export->exp_rpc_lock);
diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
index 70b8f4fabfad..e658e11e1829 100644
--- a/drivers/staging/panel/panel.c
+++ b/drivers/staging/panel/panel.c
@@ -1431,17 +1431,25 @@ static ssize_t lcd_write(struct file *file,
static int lcd_open(struct inode *inode, struct file *file)
{
+ int ret;
+
+ ret = -EBUSY;
if (!atomic_dec_and_test(&lcd_available))
- return -EBUSY; /* open only once at a time */
+ goto fail; /* open only once at a time */
+ ret = -EPERM;
if (file->f_mode & FMODE_READ) /* device is write-only */
- return -EPERM;
+ goto fail;
if (lcd.must_clear) {
lcd_clear_display();
lcd.must_clear = false;
}
return nonseekable_open(inode, file);
+
+ fail:
+ atomic_inc(&lcd_available);
+ return ret;
}
static int lcd_release(struct inode *inode, struct file *file)
@@ -1704,14 +1712,21 @@ static ssize_t keypad_read(struct file *file,
static int keypad_open(struct inode *inode, struct file *file)
{
+ int ret;
+
+ ret = -EBUSY;
if (!atomic_dec_and_test(&keypad_available))
- return -EBUSY; /* open only once at a time */
+ goto fail; /* open only once at a time */
+ ret = -EPERM;
if (file->f_mode & FMODE_WRITE) /* device is read-only */
- return -EPERM;
+ goto fail;
keypad_buflen = 0; /* flush the buffer on opening */
return 0;
+ fail:
+ atomic_inc(&keypad_available);
+ return ret;
}
static int keypad_release(struct inode *inode, struct file *file)
diff --git a/drivers/staging/rtl8188eu/include/rtw_debug.h b/drivers/staging/rtl8188eu/include/rtw_debug.h
index 971bf457f32d..e75a386344e4 100644
--- a/drivers/staging/rtl8188eu/include/rtw_debug.h
+++ b/drivers/staging/rtl8188eu/include/rtw_debug.h
@@ -75,7 +75,7 @@ extern u32 GlobalDebugLevel;
#define DBG_88E_LEVEL(_level, fmt, arg...) \
do { \
if (_level <= GlobalDebugLevel) \
- pr_info(DRIVER_PREFIX"ERROR " fmt, ##arg); \
+ pr_info(DRIVER_PREFIX fmt, ##arg); \
} while (0)
#define DBG_88E(...) \
diff --git a/drivers/staging/rtl8712/ieee80211.h b/drivers/staging/rtl8712/ieee80211.h
index d374824c4f33..7b16c05b5e8b 100644
--- a/drivers/staging/rtl8712/ieee80211.h
+++ b/drivers/staging/rtl8712/ieee80211.h
@@ -143,52 +143,52 @@ struct ieee_ibss_seq {
};
struct ieee80211_hdr {
- u16 frame_ctl;
- u16 duration_id;
+ __le16 frame_ctl;
+ __le16 duration_id;
u8 addr1[ETH_ALEN];
u8 addr2[ETH_ALEN];
u8 addr3[ETH_ALEN];
- u16 seq_ctl;
+ __le16 seq_ctl;
u8 addr4[ETH_ALEN];
-} __packed;
+} __packed __aligned(2);
struct ieee80211_hdr_3addr {
- u16 frame_ctl;
- u16 duration_id;
+ __le16 frame_ctl;
+ __le16 duration_id;
u8 addr1[ETH_ALEN];
u8 addr2[ETH_ALEN];
u8 addr3[ETH_ALEN];
- u16 seq_ctl;
-} __packed;
+ __le16 seq_ctl;
+} __packed __aligned(2);
struct ieee80211_hdr_qos {
- u16 frame_ctl;
- u16 duration_id;
+ __le16 frame_ctl;
+ __le16 duration_id;
u8 addr1[ETH_ALEN];
u8 addr2[ETH_ALEN];
u8 addr3[ETH_ALEN];
- u16 seq_ctl;
+ __le16 seq_ctl;
u8 addr4[ETH_ALEN];
- u16 qc;
-} __packed;
+ __le16 qc;
+} __packed __aligned(2);
struct ieee80211_hdr_3addr_qos {
- u16 frame_ctl;
- u16 duration_id;
+ __le16 frame_ctl;
+ __le16 duration_id;
u8 addr1[ETH_ALEN];
u8 addr2[ETH_ALEN];
u8 addr3[ETH_ALEN];
- u16 seq_ctl;
- u16 qc;
+ __le16 seq_ctl;
+ __le16 qc;
} __packed;
struct eapol {
u8 snap[6];
- u16 ethertype;
+ __be16 ethertype;
u8 version;
u8 type;
- u16 length;
+ __le16 length;
} __packed;
@@ -528,13 +528,13 @@ struct ieee80211_security {
*/
struct ieee80211_header_data {
- u16 frame_ctl;
- u16 duration_id;
+ __le16 frame_ctl;
+ __le16 duration_id;
u8 addr1[6];
u8 addr2[6];
u8 addr3[6];
- u16 seq_ctrl;
-};
+ __le16 seq_ctrl;
+} __packed __aligned(2);
#define BEACON_PROBE_SSID_ID_POSITION 12
@@ -566,18 +566,18 @@ struct ieee80211_info_element {
/*
* These are the data types that can make up management packets
*
- u16 auth_algorithm;
- u16 auth_sequence;
- u16 beacon_interval;
- u16 capability;
+ __le16 auth_algorithm;
+ __le16 auth_sequence;
+ __le16 beacon_interval;
+ __le16 capability;
u8 current_ap[ETH_ALEN];
- u16 listen_interval;
+ __le16 listen_interval;
struct {
u16 association_id:14, reserved:2;
} __packed;
- u32 time_stamp[2];
- u16 reason;
- u16 status;
+ __le32 time_stamp[2];
+ __le16 reason;
+ __le16 status;
*/
#define IEEE80211_DEFAULT_TX_ESSID "Penguin"
@@ -585,16 +585,16 @@ struct ieee80211_info_element {
struct ieee80211_authentication {
struct ieee80211_header_data header;
- u16 algorithm;
- u16 transaction;
- u16 status;
+ __le16 algorithm;
+ __le16 transaction;
+ __le16 status;
} __packed;
struct ieee80211_probe_response {
struct ieee80211_header_data header;
- u32 time_stamp[2];
- u16 beacon_interval;
- u16 capability;
+ __le32 time_stamp[2];
+ __le16 beacon_interval;
+ __le16 capability;
struct ieee80211_info_element info_element;
} __packed;
@@ -604,16 +604,16 @@ struct ieee80211_probe_request {
struct ieee80211_assoc_request_frame {
struct ieee80211_hdr_3addr header;
- u16 capability;
- u16 listen_interval;
+ __le16 capability;
+ __le16 listen_interval;
struct ieee80211_info_element_hdr info_element;
} __packed;
struct ieee80211_assoc_response_frame {
struct ieee80211_hdr_3addr header;
- u16 capability;
- u16 status;
- u16 aid;
+ __le16 capability;
+ __le16 status;
+ __le16 aid;
} __packed;
struct ieee80211_txb {
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
index edfc6805e012..2b348439242f 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
@@ -199,7 +199,7 @@ static inline char *translate_scan(struct _adapter *padapter,
iwe.cmd = SIOCGIWMODE;
memcpy((u8 *)&cap, r8712_get_capability_from_ie(pnetwork->network.IEs),
2);
- cap = le16_to_cpu(cap);
+ le16_to_cpus(&cap);
if (cap & (WLAN_CAPABILITY_IBSS | WLAN_CAPABILITY_BSS)) {
if (cap & WLAN_CAPABILITY_BSS)
iwe.u.mode = (u32)IW_MODE_MASTER;
diff --git a/drivers/staging/rtl8712/rtl871x_xmit.c b/drivers/staging/rtl8712/rtl871x_xmit.c
index 68d65d230fe3..d3ad89c7b8af 100644
--- a/drivers/staging/rtl8712/rtl871x_xmit.c
+++ b/drivers/staging/rtl8712/rtl871x_xmit.c
@@ -339,7 +339,8 @@ sint r8712_update_attrib(struct _adapter *padapter, _pkt *pkt,
/* if in MP_STATE, update pkt_attrib from mp_txcmd, and overwrite
* some settings above.*/
if (check_fwstate(pmlmepriv, WIFI_MP_STATE))
- pattrib->priority = (txdesc.txdw1 >> QSEL_SHT) & 0x1f;
+ pattrib->priority =
+ (le32_to_cpu(txdesc.txdw1) >> QSEL_SHT) & 0x1f;
return _SUCCESS;
}
@@ -479,7 +480,7 @@ static sint make_wlanhdr(struct _adapter *padapter, u8 *hdr,
struct ieee80211_hdr *pwlanhdr = (struct ieee80211_hdr *)hdr;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct qos_priv *pqospriv = &pmlmepriv->qospriv;
- u16 *fctrl = &pwlanhdr->frame_ctl;
+ __le16 *fctrl = &pwlanhdr->frame_ctl;
memset(hdr, 0, WLANHDR_OFFSET);
SetFrameSubType(fctrl, pattrib->subtype);
@@ -568,7 +569,7 @@ static sint r8712_put_snap(u8 *data, u16 h_proto)
snap->oui[0] = oui[0];
snap->oui[1] = oui[1];
snap->oui[2] = oui[2];
- *(u16 *)(data + SNAP_SIZE) = htons(h_proto);
+ *(__be16 *)(data + SNAP_SIZE) = htons(h_proto);
return SNAP_SIZE + sizeof(u16);
}
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 1ff1c83e2df5..bb73401f5761 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -1759,7 +1759,7 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
struct iscsi_tm *hdr;
int out_of_order_cmdsn = 0, ret;
bool sess_ref = false;
- u8 function;
+ u8 function, tcm_function = TMR_UNKNOWN;
hdr = (struct iscsi_tm *) buf;
hdr->flags &= ~ISCSI_FLAG_CMD_FINAL;
@@ -1805,10 +1805,6 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
* LIO-Target $FABRIC_MOD
*/
if (function != ISCSI_TM_FUNC_TASK_REASSIGN) {
-
- u8 tcm_function;
- int ret;
-
transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops,
conn->sess->se_sess, 0, DMA_NONE,
TCM_SIMPLE_TAG, cmd->sense_buffer + 2);
@@ -1844,15 +1840,14 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
return iscsit_add_reject_cmd(cmd,
ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
}
-
- ret = core_tmr_alloc_req(&cmd->se_cmd, cmd->tmr_req,
- tcm_function, GFP_KERNEL);
- if (ret < 0)
- return iscsit_add_reject_cmd(cmd,
+ }
+ ret = core_tmr_alloc_req(&cmd->se_cmd, cmd->tmr_req, tcm_function,
+ GFP_KERNEL);
+ if (ret < 0)
+ return iscsit_add_reject_cmd(cmd,
ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
- cmd->tmr_req->se_tmr_req = cmd->se_cmd.se_tmr_req;
- }
+ cmd->tmr_req->se_tmr_req = cmd->se_cmd.se_tmr_req;
cmd->iscsi_opcode = ISCSI_OP_SCSI_TMFUNC;
cmd->i_state = ISTATE_SEND_TASKMGTRSP;
@@ -1928,12 +1923,14 @@ attach:
if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
int cmdsn_ret = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
- if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP)
+ if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP) {
out_of_order_cmdsn = 1;
- else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP)
+ } else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
+ target_put_sess_cmd(&cmd->se_cmd);
return 0;
- else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
+ } else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) {
return -1;
+ }
}
iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index f69f4902dc07..ee16a45f1607 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -350,7 +350,7 @@ void core_tpg_del_initiator_node_acl(struct se_node_acl *acl)
if (acl->dynamic_node_acl) {
acl->dynamic_node_acl = 0;
}
- list_del(&acl->acl_list);
+ list_del_init(&acl->acl_list);
tpg->num_node_acls--;
mutex_unlock(&tpg->acl_node_mutex);
@@ -572,7 +572,7 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
* in transport_deregister_session().
*/
list_for_each_entry_safe(nacl, nacl_tmp, &node_list, acl_list) {
- list_del(&nacl->acl_list);
+ list_del_init(&nacl->acl_list);
se_tpg->num_node_acls--;
core_tpg_wait_for_nacl_pr_ref(nacl);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index f71bedea973a..37abf881ca75 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -431,7 +431,7 @@ static void target_complete_nacl(struct kref *kref)
}
mutex_lock(&se_tpg->acl_node_mutex);
- list_del(&nacl->acl_list);
+ list_del_init(&nacl->acl_list);
mutex_unlock(&se_tpg->acl_node_mutex);
core_tpg_wait_for_nacl_pr_ref(nacl);
@@ -503,7 +503,7 @@ void transport_free_session(struct se_session *se_sess)
spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
if (se_nacl->dynamic_stop)
- list_del(&se_nacl->acl_list);
+ list_del_init(&se_nacl->acl_list);
}
mutex_unlock(&se_tpg->acl_node_mutex);
@@ -1970,6 +1970,8 @@ static void target_restart_delayed_cmds(struct se_device *dev)
list_del(&cmd->se_delayed_node);
spin_unlock(&dev->delayed_cmd_lock);
+ cmd->transport_state |= CMD_T_SENT;
+
__target_execute_cmd(cmd, true);
if (cmd->sam_task_attr == TCM_ORDERED_TAG)
@@ -2007,6 +2009,8 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n",
dev->dev_cur_ordered_id);
}
+ cmd->se_cmd_flags &= ~SCF_TASK_ATTR_SET;
+
restart:
target_restart_delayed_cmds(dev);
}
diff --git a/drivers/tee/Kconfig b/drivers/tee/Kconfig
new file mode 100644
index 000000000000..2330a4eb4e8b
--- /dev/null
+++ b/drivers/tee/Kconfig
@@ -0,0 +1,18 @@
+# Generic Trusted Execution Environment Configuration
+config TEE
+ tristate "Trusted Execution Environment support"
+ select DMA_SHARED_BUFFER
+ select GENERIC_ALLOCATOR
+ help
+ This implements a generic interface towards a Trusted Execution
+ Environment (TEE).
+
+if TEE
+
+menu "TEE drivers"
+
+source "drivers/tee/optee/Kconfig"
+
+endmenu
+
+endif
diff --git a/drivers/tee/Makefile b/drivers/tee/Makefile
new file mode 100644
index 000000000000..7a4e4a1ac39c
--- /dev/null
+++ b/drivers/tee/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_TEE) += tee.o
+tee-objs += tee_core.o
+tee-objs += tee_shm.o
+tee-objs += tee_shm_pool.o
+obj-$(CONFIG_OPTEE) += optee/
diff --git a/drivers/tee/optee/Kconfig b/drivers/tee/optee/Kconfig
new file mode 100644
index 000000000000..0126de898036
--- /dev/null
+++ b/drivers/tee/optee/Kconfig
@@ -0,0 +1,7 @@
+# OP-TEE Trusted Execution Environment Configuration
+config OPTEE
+ tristate "OP-TEE"
+ depends on HAVE_ARM_SMCCC
+ help
+ This implements the OP-TEE Trusted Execution Environment (TEE)
+ driver.
diff --git a/drivers/tee/optee/Makefile b/drivers/tee/optee/Makefile
new file mode 100644
index 000000000000..92fe5789bcce
--- /dev/null
+++ b/drivers/tee/optee/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_OPTEE) += optee.o
+optee-objs += core.o
+optee-objs += call.o
+optee-objs += rpc.o
+optee-objs += supp.o
diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c
new file mode 100644
index 000000000000..f7b7b404c990
--- /dev/null
+++ b/drivers/tee/optee/call.c
@@ -0,0 +1,444 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/arm-smccc.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/tee_drv.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include "optee_private.h"
+#include "optee_smc.h"
+
+struct optee_call_waiter {
+ struct list_head list_node;
+ struct completion c;
+};
+
+static void optee_cq_wait_init(struct optee_call_queue *cq,
+ struct optee_call_waiter *w)
+{
+ /*
+ * We're preparing to make a call to secure world. In case we can't
+ * allocate a thread in secure world we'll end up waiting in
+ * optee_cq_wait_for_completion().
+ *
+ * Normally if there's no contention in secure world the call will
+ * complete and we can cleanup directly with optee_cq_wait_final().
+ */
+ mutex_lock(&cq->mutex);
+
+ /*
+ * We add ourselves to the queue, but we don't wait. This
+ * guarantees that we don't lose a completion if secure world
+ * returns busy and another thread just exited and try to complete
+ * someone.
+ */
+ init_completion(&w->c);
+ list_add_tail(&w->list_node, &cq->waiters);
+
+ mutex_unlock(&cq->mutex);
+}
+
+static void optee_cq_wait_for_completion(struct optee_call_queue *cq,
+ struct optee_call_waiter *w)
+{
+ wait_for_completion(&w->c);
+
+ mutex_lock(&cq->mutex);
+
+ /* Move to end of list to get out of the way for other waiters */
+ list_del(&w->list_node);
+ reinit_completion(&w->c);
+ list_add_tail(&w->list_node, &cq->waiters);
+
+ mutex_unlock(&cq->mutex);
+}
+
+static void optee_cq_complete_one(struct optee_call_queue *cq)
+{
+ struct optee_call_waiter *w;
+
+ list_for_each_entry(w, &cq->waiters, list_node) {
+ if (!completion_done(&w->c)) {
+ complete(&w->c);
+ break;
+ }
+ }
+}
+
+static void optee_cq_wait_final(struct optee_call_queue *cq,
+ struct optee_call_waiter *w)
+{
+ /*
+ * We're done with the call to secure world. The thread in secure
+ * world that was used for this call is now available for some
+ * other task to use.
+ */
+ mutex_lock(&cq->mutex);
+
+ /* Get out of the list */
+ list_del(&w->list_node);
+
+ /* Wake up one eventual waiting task */
+ optee_cq_complete_one(cq);
+
+ /*
+ * If we're completed we've got a completion from another task that
+ * was just done with its call to secure world. Since yet another
+ * thread now is available in secure world wake up another eventual
+ * waiting task.
+ */
+ if (completion_done(&w->c))
+ optee_cq_complete_one(cq);
+
+ mutex_unlock(&cq->mutex);
+}
+
+/* Requires the filpstate mutex to be held */
+static struct optee_session *find_session(struct optee_context_data *ctxdata,
+ u32 session_id)
+{
+ struct optee_session *sess;
+
+ list_for_each_entry(sess, &ctxdata->sess_list, list_node)
+ if (sess->session_id == session_id)
+ return sess;
+
+ return NULL;
+}
+
+/**
+ * optee_do_call_with_arg() - Do an SMC to OP-TEE in secure world
+ * @ctx: calling context
+ * @parg: physical address of message to pass to secure world
+ *
+ * Does and SMC to OP-TEE in secure world and handles eventual resulting
+ * Remote Procedure Calls (RPC) from OP-TEE.
+ *
+ * Returns return code from secure world, 0 is OK
+ */
+u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg)
+{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ struct optee_call_waiter w;
+ struct optee_rpc_param param = { };
+ u32 ret;
+
+ param.a0 = OPTEE_SMC_CALL_WITH_ARG;
+ reg_pair_from_64(&param.a1, &param.a2, parg);
+ /* Initialize waiter */
+ optee_cq_wait_init(&optee->call_queue, &w);
+ while (true) {
+ struct arm_smccc_res res;
+
+ optee->invoke_fn(param.a0, param.a1, param.a2, param.a3,
+ param.a4, param.a5, param.a6, param.a7,
+ &res);
+
+ if (res.a0 == OPTEE_SMC_RETURN_ETHREAD_LIMIT) {
+ /*
+ * Out of threads in secure world, wait for a thread
+ * become available.
+ */
+ optee_cq_wait_for_completion(&optee->call_queue, &w);
+ } else if (OPTEE_SMC_RETURN_IS_RPC(res.a0)) {
+ param.a0 = res.a0;
+ param.a1 = res.a1;
+ param.a2 = res.a2;
+ param.a3 = res.a3;
+ optee_handle_rpc(ctx, &param);
+ } else {
+ ret = res.a0;
+ break;
+ }
+ }
+
+ /*
+ * We're done with our thread in secure world, if there's any
+ * thread waiters wake up one.
+ */
+ optee_cq_wait_final(&optee->call_queue, &w);
+
+ return ret;
+}
+
+static struct tee_shm *get_msg_arg(struct tee_context *ctx, size_t num_params,
+ struct optee_msg_arg **msg_arg,
+ phys_addr_t *msg_parg)
+{
+ int rc;
+ struct tee_shm *shm;
+ struct optee_msg_arg *ma;
+
+ shm = tee_shm_alloc(ctx, OPTEE_MSG_GET_ARG_SIZE(num_params),
+ TEE_SHM_MAPPED);
+ if (IS_ERR(shm))
+ return shm;
+
+ ma = tee_shm_get_va(shm, 0);
+ if (IS_ERR(ma)) {
+ rc = PTR_ERR(ma);
+ goto out;
+ }
+
+ rc = tee_shm_get_pa(shm, 0, msg_parg);
+ if (rc)
+ goto out;
+
+ memset(ma, 0, OPTEE_MSG_GET_ARG_SIZE(num_params));
+ ma->num_params = num_params;
+ *msg_arg = ma;
+out:
+ if (rc) {
+ tee_shm_free(shm);
+ return ERR_PTR(rc);
+ }
+
+ return shm;
+}
+
+int optee_open_session(struct tee_context *ctx,
+ struct tee_ioctl_open_session_arg *arg,
+ struct tee_param *param)
+{
+ struct optee_context_data *ctxdata = ctx->data;
+ int rc;
+ struct tee_shm *shm;
+ struct optee_msg_arg *msg_arg;
+ phys_addr_t msg_parg;
+ struct optee_session *sess = NULL;
+
+ /* +2 for the meta parameters added below */
+ shm = get_msg_arg(ctx, arg->num_params + 2, &msg_arg, &msg_parg);
+ if (IS_ERR(shm))
+ return PTR_ERR(shm);
+
+ msg_arg->cmd = OPTEE_MSG_CMD_OPEN_SESSION;
+ msg_arg->cancel_id = arg->cancel_id;
+
+ /*
+ * Initialize and add the meta parameters needed when opening a
+ * session.
+ */
+ msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT |
+ OPTEE_MSG_ATTR_META;
+ msg_arg->params[1].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT |
+ OPTEE_MSG_ATTR_META;
+ memcpy(&msg_arg->params[0].u.value, arg->uuid, sizeof(arg->uuid));
+ memcpy(&msg_arg->params[1].u.value, arg->uuid, sizeof(arg->clnt_uuid));
+ msg_arg->params[1].u.value.c = arg->clnt_login;
+
+ rc = optee_to_msg_param(msg_arg->params + 2, arg->num_params, param);
+ if (rc)
+ goto out;
+
+ sess = kzalloc(sizeof(*sess), GFP_KERNEL);
+ if (!sess) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ if (optee_do_call_with_arg(ctx, msg_parg)) {
+ msg_arg->ret = TEEC_ERROR_COMMUNICATION;
+ msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
+ }
+
+ if (msg_arg->ret == TEEC_SUCCESS) {
+ /* A new session has been created, add it to the list. */
+ sess->session_id = msg_arg->session;
+ mutex_lock(&ctxdata->mutex);
+ list_add(&sess->list_node, &ctxdata->sess_list);
+ mutex_unlock(&ctxdata->mutex);
+ } else {
+ kfree(sess);
+ }
+
+ if (optee_from_msg_param(param, arg->num_params, msg_arg->params + 2)) {
+ arg->ret = TEEC_ERROR_COMMUNICATION;
+ arg->ret_origin = TEEC_ORIGIN_COMMS;
+ /* Close session again to avoid leakage */
+ optee_close_session(ctx, msg_arg->session);
+ } else {
+ arg->session = msg_arg->session;
+ arg->ret = msg_arg->ret;
+ arg->ret_origin = msg_arg->ret_origin;
+ }
+out:
+ tee_shm_free(shm);
+
+ return rc;
+}
+
+int optee_close_session(struct tee_context *ctx, u32 session)
+{
+ struct optee_context_data *ctxdata = ctx->data;
+ struct tee_shm *shm;
+ struct optee_msg_arg *msg_arg;
+ phys_addr_t msg_parg;
+ struct optee_session *sess;
+
+ /* Check that the session is valid and remove it from the list */
+ mutex_lock(&ctxdata->mutex);
+ sess = find_session(ctxdata, session);
+ if (sess)
+ list_del(&sess->list_node);
+ mutex_unlock(&ctxdata->mutex);
+ if (!sess)
+ return -EINVAL;
+ kfree(sess);
+
+ shm = get_msg_arg(ctx, 0, &msg_arg, &msg_parg);
+ if (IS_ERR(shm))
+ return PTR_ERR(shm);
+
+ msg_arg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION;
+ msg_arg->session = session;
+ optee_do_call_with_arg(ctx, msg_parg);
+
+ tee_shm_free(shm);
+ return 0;
+}
+
+int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg,
+ struct tee_param *param)
+{
+ struct optee_context_data *ctxdata = ctx->data;
+ struct tee_shm *shm;
+ struct optee_msg_arg *msg_arg;
+ phys_addr_t msg_parg;
+ struct optee_session *sess;
+ int rc;
+
+ /* Check that the session is valid */
+ mutex_lock(&ctxdata->mutex);
+ sess = find_session(ctxdata, arg->session);
+ mutex_unlock(&ctxdata->mutex);
+ if (!sess)
+ return -EINVAL;
+
+ shm = get_msg_arg(ctx, arg->num_params, &msg_arg, &msg_parg);
+ if (IS_ERR(shm))
+ return PTR_ERR(shm);
+ msg_arg->cmd = OPTEE_MSG_CMD_INVOKE_COMMAND;
+ msg_arg->func = arg->func;
+ msg_arg->session = arg->session;
+ msg_arg->cancel_id = arg->cancel_id;
+
+ rc = optee_to_msg_param(msg_arg->params, arg->num_params, param);
+ if (rc)
+ goto out;
+
+ if (optee_do_call_with_arg(ctx, msg_parg)) {
+ msg_arg->ret = TEEC_ERROR_COMMUNICATION;
+ msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
+ }
+
+ if (optee_from_msg_param(param, arg->num_params, msg_arg->params)) {
+ msg_arg->ret = TEEC_ERROR_COMMUNICATION;
+ msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
+ }
+
+ arg->ret = msg_arg->ret;
+ arg->ret_origin = msg_arg->ret_origin;
+out:
+ tee_shm_free(shm);
+ return rc;
+}
+
+int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session)
+{
+ struct optee_context_data *ctxdata = ctx->data;
+ struct tee_shm *shm;
+ struct optee_msg_arg *msg_arg;
+ phys_addr_t msg_parg;
+ struct optee_session *sess;
+
+ /* Check that the session is valid */
+ mutex_lock(&ctxdata->mutex);
+ sess = find_session(ctxdata, session);
+ mutex_unlock(&ctxdata->mutex);
+ if (!sess)
+ return -EINVAL;
+
+ shm = get_msg_arg(ctx, 0, &msg_arg, &msg_parg);
+ if (IS_ERR(shm))
+ return PTR_ERR(shm);
+
+ msg_arg->cmd = OPTEE_MSG_CMD_CANCEL;
+ msg_arg->session = session;
+ msg_arg->cancel_id = cancel_id;
+ optee_do_call_with_arg(ctx, msg_parg);
+
+ tee_shm_free(shm);
+ return 0;
+}
+
+/**
+ * optee_enable_shm_cache() - Enables caching of some shared memory allocation
+ * in OP-TEE
+ * @optee: main service struct
+ */
+void optee_enable_shm_cache(struct optee *optee)
+{
+ struct optee_call_waiter w;
+
+ /* We need to retry until secure world isn't busy. */
+ optee_cq_wait_init(&optee->call_queue, &w);
+ while (true) {
+ struct arm_smccc_res res;
+
+ optee->invoke_fn(OPTEE_SMC_ENABLE_SHM_CACHE, 0, 0, 0, 0, 0, 0,
+ 0, &res);
+ if (res.a0 == OPTEE_SMC_RETURN_OK)
+ break;
+ optee_cq_wait_for_completion(&optee->call_queue, &w);
+ }
+ optee_cq_wait_final(&optee->call_queue, &w);
+}
+
+/**
+ * optee_disable_shm_cache() - Disables caching of some shared memory allocation
+ * in OP-TEE
+ * @optee: main service struct
+ */
+void optee_disable_shm_cache(struct optee *optee)
+{
+ struct optee_call_waiter w;
+
+ /* We need to retry until secure world isn't busy. */
+ optee_cq_wait_init(&optee->call_queue, &w);
+ while (true) {
+ union {
+ struct arm_smccc_res smccc;
+ struct optee_smc_disable_shm_cache_result result;
+ } res;
+
+ optee->invoke_fn(OPTEE_SMC_DISABLE_SHM_CACHE, 0, 0, 0, 0, 0, 0,
+ 0, &res.smccc);
+ if (res.result.status == OPTEE_SMC_RETURN_ENOTAVAIL)
+ break; /* All shm's freed */
+ if (res.result.status == OPTEE_SMC_RETURN_OK) {
+ struct tee_shm *shm;
+
+ shm = reg_pair_to_ptr(res.result.shm_upper32,
+ res.result.shm_lower32);
+ tee_shm_free(shm);
+ } else {
+ optee_cq_wait_for_completion(&optee->call_queue, &w);
+ }
+ }
+ optee_cq_wait_final(&optee->call_queue, &w);
+}
diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
new file mode 100644
index 000000000000..58169e519422
--- /dev/null
+++ b/drivers/tee/optee/core.c
@@ -0,0 +1,622 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/arm-smccc.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/tee_drv.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include "optee_private.h"
+#include "optee_smc.h"
+
+#define DRIVER_NAME "optee"
+
+#define OPTEE_SHM_NUM_PRIV_PAGES 1
+
+/**
+ * optee_from_msg_param() - convert from OPTEE_MSG parameters to
+ * struct tee_param
+ * @params: subsystem internal parameter representation
+ * @num_params: number of elements in the parameter arrays
+ * @msg_params: OPTEE_MSG parameters
+ * Returns 0 on success or <0 on failure
+ */
+int optee_from_msg_param(struct tee_param *params, size_t num_params,
+ const struct optee_msg_param *msg_params)
+{
+ int rc;
+ size_t n;
+ struct tee_shm *shm;
+ phys_addr_t pa;
+
+ for (n = 0; n < num_params; n++) {
+ struct tee_param *p = params + n;
+ const struct optee_msg_param *mp = msg_params + n;
+ u32 attr = mp->attr & OPTEE_MSG_ATTR_TYPE_MASK;
+
+ switch (attr) {
+ case OPTEE_MSG_ATTR_TYPE_NONE:
+ p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
+ memset(&p->u, 0, sizeof(p->u));
+ break;
+ case OPTEE_MSG_ATTR_TYPE_VALUE_INPUT:
+ case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT:
+ case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT:
+ p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT +
+ attr - OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
+ p->u.value.a = mp->u.value.a;
+ p->u.value.b = mp->u.value.b;
+ p->u.value.c = mp->u.value.c;
+ break;
+ case OPTEE_MSG_ATTR_TYPE_TMEM_INPUT:
+ case OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT:
+ case OPTEE_MSG_ATTR_TYPE_TMEM_INOUT:
+ p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT +
+ attr - OPTEE_MSG_ATTR_TYPE_TMEM_INPUT;
+ p->u.memref.size = mp->u.tmem.size;
+ shm = (struct tee_shm *)(unsigned long)
+ mp->u.tmem.shm_ref;
+ if (!shm) {
+ p->u.memref.shm_offs = 0;
+ p->u.memref.shm = NULL;
+ break;
+ }
+ rc = tee_shm_get_pa(shm, 0, &pa);
+ if (rc)
+ return rc;
+ p->u.memref.shm_offs = mp->u.tmem.buf_ptr - pa;
+ p->u.memref.shm = shm;
+
+ /* Check that the memref is covered by the shm object */
+ if (p->u.memref.size) {
+ size_t o = p->u.memref.shm_offs +
+ p->u.memref.size - 1;
+
+ rc = tee_shm_get_pa(shm, o, NULL);
+ if (rc)
+ return rc;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+/**
+ * optee_to_msg_param() - convert from struct tee_params to OPTEE_MSG parameters
+ * @msg_params: OPTEE_MSG parameters
+ * @num_params: number of elements in the parameter arrays
+ * @params: subsystem itnernal parameter representation
+ * Returns 0 on success or <0 on failure
+ */
+int optee_to_msg_param(struct optee_msg_param *msg_params, size_t num_params,
+ const struct tee_param *params)
+{
+ int rc;
+ size_t n;
+ phys_addr_t pa;
+
+ for (n = 0; n < num_params; n++) {
+ const struct tee_param *p = params + n;
+ struct optee_msg_param *mp = msg_params + n;
+
+ switch (p->attr) {
+ case TEE_IOCTL_PARAM_ATTR_TYPE_NONE:
+ mp->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
+ memset(&mp->u, 0, sizeof(mp->u));
+ break;
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
+ mp->attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT + p->attr -
+ TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
+ mp->u.value.a = p->u.value.a;
+ mp->u.value.b = p->u.value.b;
+ mp->u.value.c = p->u.value.c;
+ break;
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
+ mp->attr = OPTEE_MSG_ATTR_TYPE_TMEM_INPUT +
+ p->attr -
+ TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
+ mp->u.tmem.shm_ref = (unsigned long)p->u.memref.shm;
+ mp->u.tmem.size = p->u.memref.size;
+ if (!p->u.memref.shm) {
+ mp->u.tmem.buf_ptr = 0;
+ break;
+ }
+ rc = tee_shm_get_pa(p->u.memref.shm,
+ p->u.memref.shm_offs, &pa);
+ if (rc)
+ return rc;
+ mp->u.tmem.buf_ptr = pa;
+ mp->attr |= OPTEE_MSG_ATTR_CACHE_PREDEFINED <<
+ OPTEE_MSG_ATTR_CACHE_SHIFT;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static void optee_get_version(struct tee_device *teedev,
+ struct tee_ioctl_version_data *vers)
+{
+ struct tee_ioctl_version_data v = {
+ .impl_id = TEE_IMPL_ID_OPTEE,
+ .impl_caps = TEE_OPTEE_CAP_TZ,
+ .gen_caps = TEE_GEN_CAP_GP,
+ };
+ *vers = v;
+}
+
+static int optee_open(struct tee_context *ctx)
+{
+ struct optee_context_data *ctxdata;
+ struct tee_device *teedev = ctx->teedev;
+ struct optee *optee = tee_get_drvdata(teedev);
+
+ ctxdata = kzalloc(sizeof(*ctxdata), GFP_KERNEL);
+ if (!ctxdata)
+ return -ENOMEM;
+
+ if (teedev == optee->supp_teedev) {
+ bool busy = true;
+
+ mutex_lock(&optee->supp.ctx_mutex);
+ if (!optee->supp.ctx) {
+ busy = false;
+ optee->supp.ctx = ctx;
+ }
+ mutex_unlock(&optee->supp.ctx_mutex);
+ if (busy) {
+ kfree(ctxdata);
+ return -EBUSY;
+ }
+ }
+
+ mutex_init(&ctxdata->mutex);
+ INIT_LIST_HEAD(&ctxdata->sess_list);
+
+ ctx->data = ctxdata;
+ return 0;
+}
+
+static void optee_release(struct tee_context *ctx)
+{
+ struct optee_context_data *ctxdata = ctx->data;
+ struct tee_device *teedev = ctx->teedev;
+ struct optee *optee = tee_get_drvdata(teedev);
+ struct tee_shm *shm;
+ struct optee_msg_arg *arg = NULL;
+ phys_addr_t parg;
+ struct optee_session *sess;
+ struct optee_session *sess_tmp;
+
+ if (!ctxdata)
+ return;
+
+ shm = tee_shm_alloc(ctx, sizeof(struct optee_msg_arg), TEE_SHM_MAPPED);
+ if (!IS_ERR(shm)) {
+ arg = tee_shm_get_va(shm, 0);
+ /*
+ * If va2pa fails for some reason, we can't call
+ * optee_close_session(), only free the memory. Secure OS
+ * will leak sessions and finally refuse more sessions, but
+ * we will at least let normal world reclaim its memory.
+ */
+ if (!IS_ERR(arg))
+ tee_shm_va2pa(shm, arg, &parg);
+ }
+
+ list_for_each_entry_safe(sess, sess_tmp, &ctxdata->sess_list,
+ list_node) {
+ list_del(&sess->list_node);
+ if (!IS_ERR_OR_NULL(arg)) {
+ memset(arg, 0, sizeof(*arg));
+ arg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION;
+ arg->session = sess->session_id;
+ optee_do_call_with_arg(ctx, parg);
+ }
+ kfree(sess);
+ }
+ kfree(ctxdata);
+
+ if (!IS_ERR(shm))
+ tee_shm_free(shm);
+
+ ctx->data = NULL;
+
+ if (teedev == optee->supp_teedev) {
+ mutex_lock(&optee->supp.ctx_mutex);
+ optee->supp.ctx = NULL;
+ mutex_unlock(&optee->supp.ctx_mutex);
+ }
+}
+
+static struct tee_driver_ops optee_ops = {
+ .get_version = optee_get_version,
+ .open = optee_open,
+ .release = optee_release,
+ .open_session = optee_open_session,
+ .close_session = optee_close_session,
+ .invoke_func = optee_invoke_func,
+ .cancel_req = optee_cancel_req,
+};
+
+static struct tee_desc optee_desc = {
+ .name = DRIVER_NAME "-clnt",
+ .ops = &optee_ops,
+ .owner = THIS_MODULE,
+};
+
+static struct tee_driver_ops optee_supp_ops = {
+ .get_version = optee_get_version,
+ .open = optee_open,
+ .release = optee_release,
+ .supp_recv = optee_supp_recv,
+ .supp_send = optee_supp_send,
+};
+
+static struct tee_desc optee_supp_desc = {
+ .name = DRIVER_NAME "-supp",
+ .ops = &optee_supp_ops,
+ .owner = THIS_MODULE,
+ .flags = TEE_DESC_PRIVILEGED,
+};
+
+static bool optee_msg_api_uid_is_optee_api(optee_invoke_fn *invoke_fn)
+{
+ struct arm_smccc_res res;
+
+ invoke_fn(OPTEE_SMC_CALLS_UID, 0, 0, 0, 0, 0, 0, 0, &res);
+
+ if (res.a0 == OPTEE_MSG_UID_0 && res.a1 == OPTEE_MSG_UID_1 &&
+ res.a2 == OPTEE_MSG_UID_2 && res.a3 == OPTEE_MSG_UID_3)
+ return true;
+ return false;
+}
+
+static bool optee_msg_api_revision_is_compatible(optee_invoke_fn *invoke_fn)
+{
+ union {
+ struct arm_smccc_res smccc;
+ struct optee_smc_calls_revision_result result;
+ } res;
+
+ invoke_fn(OPTEE_SMC_CALLS_REVISION, 0, 0, 0, 0, 0, 0, 0, &res.smccc);
+
+ if (res.result.major == OPTEE_MSG_REVISION_MAJOR &&
+ (int)res.result.minor >= OPTEE_MSG_REVISION_MINOR)
+ return true;
+ return false;
+}
+
+static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn,
+ u32 *sec_caps)
+{
+ union {
+ struct arm_smccc_res smccc;
+ struct optee_smc_exchange_capabilities_result result;
+ } res;
+ u32 a1 = 0;
+
+ /*
+ * TODO This isn't enough to tell if it's UP system (from kernel
+ * point of view) or not, is_smp() returns the the information
+ * needed, but can't be called directly from here.
+ */
+ if (!IS_ENABLED(CONFIG_SMP) || nr_cpu_ids == 1)
+ a1 |= OPTEE_SMC_NSEC_CAP_UNIPROCESSOR;
+
+ invoke_fn(OPTEE_SMC_EXCHANGE_CAPABILITIES, a1, 0, 0, 0, 0, 0, 0,
+ &res.smccc);
+
+ if (res.result.status != OPTEE_SMC_RETURN_OK)
+ return false;
+
+ *sec_caps = res.result.capabilities;
+ return true;
+}
+
+static struct tee_shm_pool *
+optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm)
+{
+ union {
+ struct arm_smccc_res smccc;
+ struct optee_smc_get_shm_config_result result;
+ } res;
+ struct tee_shm_pool *pool;
+ unsigned long vaddr;
+ phys_addr_t paddr;
+ size_t size;
+ phys_addr_t begin;
+ phys_addr_t end;
+ void *va;
+ struct tee_shm_pool_mem_info priv_info;
+ struct tee_shm_pool_mem_info dmabuf_info;
+
+ invoke_fn(OPTEE_SMC_GET_SHM_CONFIG, 0, 0, 0, 0, 0, 0, 0, &res.smccc);
+ if (res.result.status != OPTEE_SMC_RETURN_OK) {
+ pr_info("shm service not available\n");
+ return ERR_PTR(-ENOENT);
+ }
+
+ if (res.result.settings != OPTEE_SMC_SHM_CACHED) {
+ pr_err("only normal cached shared memory supported\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ begin = roundup(res.result.start, PAGE_SIZE);
+ end = rounddown(res.result.start + res.result.size, PAGE_SIZE);
+ paddr = begin;
+ size = end - begin;
+
+ if (size < 2 * OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE) {
+ pr_err("too small shared memory area\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ va = memremap(paddr, size, MEMREMAP_WB);
+ if (!va) {
+ pr_err("shared memory ioremap failed\n");
+ return ERR_PTR(-EINVAL);
+ }
+ vaddr = (unsigned long)va;
+
+ priv_info.vaddr = vaddr;
+ priv_info.paddr = paddr;
+ priv_info.size = OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE;
+ dmabuf_info.vaddr = vaddr + OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE;
+ dmabuf_info.paddr = paddr + OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE;
+ dmabuf_info.size = size - OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE;
+
+ pool = tee_shm_pool_alloc_res_mem(&priv_info, &dmabuf_info);
+ if (IS_ERR(pool)) {
+ memunmap(va);
+ goto out;
+ }
+
+ *memremaped_shm = va;
+out:
+ return pool;
+}
+
+/* Simple wrapper functions to be able to use a function pointer */
+static void optee_smccc_smc(unsigned long a0, unsigned long a1,
+ unsigned long a2, unsigned long a3,
+ unsigned long a4, unsigned long a5,
+ unsigned long a6, unsigned long a7,
+ struct arm_smccc_res *res)
+{
+ arm_smccc_smc(a0, a1, a2, a3, a4, a5, a6, a7, res);
+}
+
+static void optee_smccc_hvc(unsigned long a0, unsigned long a1,
+ unsigned long a2, unsigned long a3,
+ unsigned long a4, unsigned long a5,
+ unsigned long a6, unsigned long a7,
+ struct arm_smccc_res *res)
+{
+ arm_smccc_hvc(a0, a1, a2, a3, a4, a5, a6, a7, res);
+}
+
+static optee_invoke_fn *get_invoke_func(struct device_node *np)
+{
+ const char *method;
+
+ pr_info("probing for conduit method from DT.\n");
+
+ if (of_property_read_string(np, "method", &method)) {
+ pr_warn("missing \"method\" property\n");
+ return ERR_PTR(-ENXIO);
+ }
+
+ if (!strcmp("hvc", method))
+ return optee_smccc_hvc;
+ else if (!strcmp("smc", method))
+ return optee_smccc_smc;
+
+ pr_warn("invalid \"method\" property: %s\n", method);
+ return ERR_PTR(-EINVAL);
+}
+
+static struct optee *optee_probe(struct device_node *np)
+{
+ optee_invoke_fn *invoke_fn;
+ struct tee_shm_pool *pool;
+ struct optee *optee = NULL;
+ void *memremaped_shm = NULL;
+ struct tee_device *teedev;
+ u32 sec_caps;
+ int rc;
+
+ invoke_fn = get_invoke_func(np);
+ if (IS_ERR(invoke_fn))
+ return (void *)invoke_fn;
+
+ if (!optee_msg_api_uid_is_optee_api(invoke_fn)) {
+ pr_warn("api uid mismatch\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!optee_msg_api_revision_is_compatible(invoke_fn)) {
+ pr_warn("api revision mismatch\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps)) {
+ pr_warn("capabilities mismatch\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ /*
+ * We have no other option for shared memory, if secure world
+ * doesn't have any reserved memory we can use we can't continue.
+ */
+ if (!(sec_caps & OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM))
+ return ERR_PTR(-EINVAL);
+
+ pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm);
+ if (IS_ERR(pool))
+ return (void *)pool;
+
+ optee = kzalloc(sizeof(*optee), GFP_KERNEL);
+ if (!optee) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ optee->invoke_fn = invoke_fn;
+
+ teedev = tee_device_alloc(&optee_desc, NULL, pool, optee);
+ if (IS_ERR(teedev)) {
+ rc = PTR_ERR(teedev);
+ goto err;
+ }
+ optee->teedev = teedev;
+
+ teedev = tee_device_alloc(&optee_supp_desc, NULL, pool, optee);
+ if (IS_ERR(teedev)) {
+ rc = PTR_ERR(teedev);
+ goto err;
+ }
+ optee->supp_teedev = teedev;
+
+ rc = tee_device_register(optee->teedev);
+ if (rc)
+ goto err;
+
+ rc = tee_device_register(optee->supp_teedev);
+ if (rc)
+ goto err;
+
+ mutex_init(&optee->call_queue.mutex);
+ INIT_LIST_HEAD(&optee->call_queue.waiters);
+ optee_wait_queue_init(&optee->wait_queue);
+ optee_supp_init(&optee->supp);
+ optee->memremaped_shm = memremaped_shm;
+ optee->pool = pool;
+
+ optee_enable_shm_cache(optee);
+
+ pr_info("initialized driver\n");
+ return optee;
+err:
+ if (optee) {
+ /*
+ * tee_device_unregister() is safe to call even if the
+ * devices hasn't been registered with
+ * tee_device_register() yet.
+ */
+ tee_device_unregister(optee->supp_teedev);
+ tee_device_unregister(optee->teedev);
+ kfree(optee);
+ }
+ if (pool)
+ tee_shm_pool_free(pool);
+ if (memremaped_shm)
+ memunmap(memremaped_shm);
+ return ERR_PTR(rc);
+}
+
+static void optee_remove(struct optee *optee)
+{
+ /*
+ * Ask OP-TEE to free all cached shared memory objects to decrease
+ * reference counters and also avoid wild pointers in secure world
+ * into the old shared memory range.
+ */
+ optee_disable_shm_cache(optee);
+
+ /*
+ * The two devices has to be unregistered before we can free the
+ * other resources.
+ */
+ tee_device_unregister(optee->supp_teedev);
+ tee_device_unregister(optee->teedev);
+
+ tee_shm_pool_free(optee->pool);
+ if (optee->memremaped_shm)
+ memunmap(optee->memremaped_shm);
+ optee_wait_queue_exit(&optee->wait_queue);
+ optee_supp_uninit(&optee->supp);
+ mutex_destroy(&optee->call_queue.mutex);
+
+ kfree(optee);
+}
+
+static const struct of_device_id optee_match[] = {
+ { .compatible = "linaro,optee-tz" },
+ {},
+};
+
+static struct optee *optee_svc;
+
+static int __init optee_driver_init(void)
+{
+ struct device_node *fw_np;
+ struct device_node *np;
+ struct optee *optee;
+
+ /* Node is supposed to be below /firmware */
+ fw_np = of_find_node_by_name(NULL, "firmware");
+ if (!fw_np)
+ return -ENODEV;
+
+ np = of_find_matching_node(fw_np, optee_match);
+ of_node_put(fw_np);
+ if (!np)
+ return -ENODEV;
+
+ optee = optee_probe(np);
+ of_node_put(np);
+
+ if (IS_ERR(optee))
+ return PTR_ERR(optee);
+
+ optee_svc = optee;
+
+ return 0;
+}
+module_init(optee_driver_init);
+
+static void __exit optee_driver_exit(void)
+{
+ struct optee *optee = optee_svc;
+
+ optee_svc = NULL;
+ if (optee)
+ optee_remove(optee);
+}
+module_exit(optee_driver_exit);
+
+MODULE_AUTHOR("Linaro");
+MODULE_DESCRIPTION("OP-TEE driver");
+MODULE_SUPPORTED_DEVICE("");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/tee/optee/optee_msg.h b/drivers/tee/optee/optee_msg.h
new file mode 100644
index 000000000000..dd7a06ee0462
--- /dev/null
+++ b/drivers/tee/optee/optee_msg.h
@@ -0,0 +1,418 @@
+/*
+ * Copyright (c) 2015-2016, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _OPTEE_MSG_H
+#define _OPTEE_MSG_H
+
+#include <linux/bitops.h>
+#include <linux/types.h>
+
+/*
+ * This file defines the OP-TEE message protocol used to communicate
+ * with an instance of OP-TEE running in secure world.
+ *
+ * This file is divided into three sections.
+ * 1. Formatting of messages.
+ * 2. Requests from normal world
+ * 3. Requests from secure world, Remote Procedure Call (RPC), handled by
+ * tee-supplicant.
+ */
+
+/*****************************************************************************
+ * Part 1 - formatting of messages
+ *****************************************************************************/
+
+#define OPTEE_MSG_ATTR_TYPE_NONE 0x0
+#define OPTEE_MSG_ATTR_TYPE_VALUE_INPUT 0x1
+#define OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT 0x2
+#define OPTEE_MSG_ATTR_TYPE_VALUE_INOUT 0x3
+#define OPTEE_MSG_ATTR_TYPE_RMEM_INPUT 0x5
+#define OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT 0x6
+#define OPTEE_MSG_ATTR_TYPE_RMEM_INOUT 0x7
+#define OPTEE_MSG_ATTR_TYPE_TMEM_INPUT 0x9
+#define OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT 0xa
+#define OPTEE_MSG_ATTR_TYPE_TMEM_INOUT 0xb
+
+#define OPTEE_MSG_ATTR_TYPE_MASK GENMASK(7, 0)
+
+/*
+ * Meta parameter to be absorbed by the Secure OS and not passed
+ * to the Trusted Application.
+ *
+ * Currently only used with OPTEE_MSG_CMD_OPEN_SESSION.
+ */
+#define OPTEE_MSG_ATTR_META BIT(8)
+
+/*
+ * The temporary shared memory object is not physically contigous and this
+ * temp memref is followed by another fragment until the last temp memref
+ * that doesn't have this bit set.
+ */
+#define OPTEE_MSG_ATTR_FRAGMENT BIT(9)
+
+/*
+ * Memory attributes for caching passed with temp memrefs. The actual value
+ * used is defined outside the message protocol with the exception of
+ * OPTEE_MSG_ATTR_CACHE_PREDEFINED which means the attributes already
+ * defined for the memory range should be used. If optee_smc.h is used as
+ * bearer of this protocol OPTEE_SMC_SHM_* is used for values.
+ */
+#define OPTEE_MSG_ATTR_CACHE_SHIFT 16
+#define OPTEE_MSG_ATTR_CACHE_MASK GENMASK(2, 0)
+#define OPTEE_MSG_ATTR_CACHE_PREDEFINED 0
+
+/*
+ * Same values as TEE_LOGIN_* from TEE Internal API
+ */
+#define OPTEE_MSG_LOGIN_PUBLIC 0x00000000
+#define OPTEE_MSG_LOGIN_USER 0x00000001
+#define OPTEE_MSG_LOGIN_GROUP 0x00000002
+#define OPTEE_MSG_LOGIN_APPLICATION 0x00000004
+#define OPTEE_MSG_LOGIN_APPLICATION_USER 0x00000005
+#define OPTEE_MSG_LOGIN_APPLICATION_GROUP 0x00000006
+
+/**
+ * struct optee_msg_param_tmem - temporary memory reference parameter
+ * @buf_ptr: Address of the buffer
+ * @size: Size of the buffer
+ * @shm_ref: Temporary shared memory reference, pointer to a struct tee_shm
+ *
+ * Secure and normal world communicates pointers as physical address
+ * instead of the virtual address. This is because secure and normal world
+ * have completely independent memory mapping. Normal world can even have a
+ * hypervisor which need to translate the guest physical address (AKA IPA
+ * in ARM documentation) to a real physical address before passing the
+ * structure to secure world.
+ */
+struct optee_msg_param_tmem {
+ u64 buf_ptr;
+ u64 size;
+ u64 shm_ref;
+};
+
+/**
+ * struct optee_msg_param_rmem - registered memory reference parameter
+ * @offs: Offset into shared memory reference
+ * @size: Size of the buffer
+ * @shm_ref: Shared memory reference, pointer to a struct tee_shm
+ */
+struct optee_msg_param_rmem {
+ u64 offs;
+ u64 size;
+ u64 shm_ref;
+};
+
+/**
+ * struct optee_msg_param_value - opaque value parameter
+ *
+ * Value parameters are passed unchecked between normal and secure world.
+ */
+struct optee_msg_param_value {
+ u64 a;
+ u64 b;
+ u64 c;
+};
+
+/**
+ * struct optee_msg_param - parameter used together with struct optee_msg_arg
+ * @attr: attributes
+ * @tmem: parameter by temporary memory reference
+ * @rmem: parameter by registered memory reference
+ * @value: parameter by opaque value
+ *
+ * @attr & OPTEE_MSG_ATTR_TYPE_MASK indicates if tmem, rmem or value is used in
+ * the union. OPTEE_MSG_ATTR_TYPE_VALUE_* indicates value,
+ * OPTEE_MSG_ATTR_TYPE_TMEM_* indicates tmem and
+ * OPTEE_MSG_ATTR_TYPE_RMEM_* indicates rmem.
+ * OPTEE_MSG_ATTR_TYPE_NONE indicates that none of the members are used.
+ */
+struct optee_msg_param {
+ u64 attr;
+ union {
+ struct optee_msg_param_tmem tmem;
+ struct optee_msg_param_rmem rmem;
+ struct optee_msg_param_value value;
+ } u;
+};
+
+/**
+ * struct optee_msg_arg - call argument
+ * @cmd: Command, one of OPTEE_MSG_CMD_* or OPTEE_MSG_RPC_CMD_*
+ * @func: Trusted Application function, specific to the Trusted Application,
+ * used if cmd == OPTEE_MSG_CMD_INVOKE_COMMAND
+ * @session: In parameter for all OPTEE_MSG_CMD_* except
+ * OPTEE_MSG_CMD_OPEN_SESSION where it's an output parameter instead
+ * @cancel_id: Cancellation id, a unique value to identify this request
+ * @ret: return value
+ * @ret_origin: origin of the return value
+ * @num_params: number of parameters supplied to the OS Command
+ * @params: the parameters supplied to the OS Command
+ *
+ * All normal calls to Trusted OS uses this struct. If cmd requires further
+ * information than what these field holds it can be passed as a parameter
+ * tagged as meta (setting the OPTEE_MSG_ATTR_META bit in corresponding
+ * attrs field). All parameters tagged as meta has to come first.
+ *
+ * Temp memref parameters can be fragmented if supported by the Trusted OS
+ * (when optee_smc.h is bearer of this protocol this is indicated with
+ * OPTEE_SMC_SEC_CAP_UNREGISTERED_SHM). If a logical memref parameter is
+ * fragmented then has all but the last fragment the
+ * OPTEE_MSG_ATTR_FRAGMENT bit set in attrs. Even if a memref is fragmented
+ * it will still be presented as a single logical memref to the Trusted
+ * Application.
+ */
+struct optee_msg_arg {
+ u32 cmd;
+ u32 func;
+ u32 session;
+ u32 cancel_id;
+ u32 pad;
+ u32 ret;
+ u32 ret_origin;
+ u32 num_params;
+
+ /* num_params tells the actual number of element in params */
+ struct optee_msg_param params[0];
+};
+
+/**
+ * OPTEE_MSG_GET_ARG_SIZE - return size of struct optee_msg_arg
+ *
+ * @num_params: Number of parameters embedded in the struct optee_msg_arg
+ *
+ * Returns the size of the struct optee_msg_arg together with the number
+ * of embedded parameters.
+ */
+#define OPTEE_MSG_GET_ARG_SIZE(num_params) \
+ (sizeof(struct optee_msg_arg) + \
+ sizeof(struct optee_msg_param) * (num_params))
+
+/*****************************************************************************
+ * Part 2 - requests from normal world
+ *****************************************************************************/
+
+/*
+ * Return the following UID if using API specified in this file without
+ * further extensions:
+ * 384fb3e0-e7f8-11e3-af63-0002a5d5c51b.
+ * Represented in 4 32-bit words in OPTEE_MSG_UID_0, OPTEE_MSG_UID_1,
+ * OPTEE_MSG_UID_2, OPTEE_MSG_UID_3.
+ */
+#define OPTEE_MSG_UID_0 0x384fb3e0
+#define OPTEE_MSG_UID_1 0xe7f811e3
+#define OPTEE_MSG_UID_2 0xaf630002
+#define OPTEE_MSG_UID_3 0xa5d5c51b
+#define OPTEE_MSG_FUNCID_CALLS_UID 0xFF01
+
+/*
+ * Returns 2.0 if using API specified in this file without further
+ * extensions. Represented in 2 32-bit words in OPTEE_MSG_REVISION_MAJOR
+ * and OPTEE_MSG_REVISION_MINOR
+ */
+#define OPTEE_MSG_REVISION_MAJOR 2
+#define OPTEE_MSG_REVISION_MINOR 0
+#define OPTEE_MSG_FUNCID_CALLS_REVISION 0xFF03
+
+/*
+ * Get UUID of Trusted OS.
+ *
+ * Used by non-secure world to figure out which Trusted OS is installed.
+ * Note that returned UUID is the UUID of the Trusted OS, not of the API.
+ *
+ * Returns UUID in 4 32-bit words in the same way as
+ * OPTEE_MSG_FUNCID_CALLS_UID described above.
+ */
+#define OPTEE_MSG_OS_OPTEE_UUID_0 0x486178e0
+#define OPTEE_MSG_OS_OPTEE_UUID_1 0xe7f811e3
+#define OPTEE_MSG_OS_OPTEE_UUID_2 0xbc5e0002
+#define OPTEE_MSG_OS_OPTEE_UUID_3 0xa5d5c51b
+#define OPTEE_MSG_FUNCID_GET_OS_UUID 0x0000
+
+/*
+ * Get revision of Trusted OS.
+ *
+ * Used by non-secure world to figure out which version of the Trusted OS
+ * is installed. Note that the returned revision is the revision of the
+ * Trusted OS, not of the API.
+ *
+ * Returns revision in 2 32-bit words in the same way as
+ * OPTEE_MSG_CALLS_REVISION described above.
+ */
+#define OPTEE_MSG_FUNCID_GET_OS_REVISION 0x0001
+
+/*
+ * Do a secure call with struct optee_msg_arg as argument
+ * The OPTEE_MSG_CMD_* below defines what goes in struct optee_msg_arg::cmd
+ *
+ * OPTEE_MSG_CMD_OPEN_SESSION opens a session to a Trusted Application.
+ * The first two parameters are tagged as meta, holding two value
+ * parameters to pass the following information:
+ * param[0].u.value.a-b uuid of Trusted Application
+ * param[1].u.value.a-b uuid of Client
+ * param[1].u.value.c Login class of client OPTEE_MSG_LOGIN_*
+ *
+ * OPTEE_MSG_CMD_INVOKE_COMMAND invokes a command a previously opened
+ * session to a Trusted Application. struct optee_msg_arg::func is Trusted
+ * Application function, specific to the Trusted Application.
+ *
+ * OPTEE_MSG_CMD_CLOSE_SESSION closes a previously opened session to
+ * Trusted Application.
+ *
+ * OPTEE_MSG_CMD_CANCEL cancels a currently invoked command.
+ *
+ * OPTEE_MSG_CMD_REGISTER_SHM registers a shared memory reference. The
+ * information is passed as:
+ * [in] param[0].attr OPTEE_MSG_ATTR_TYPE_TMEM_INPUT
+ * [| OPTEE_MSG_ATTR_FRAGMENT]
+ * [in] param[0].u.tmem.buf_ptr physical address (of first fragment)
+ * [in] param[0].u.tmem.size size (of first fragment)
+ * [in] param[0].u.tmem.shm_ref holds shared memory reference
+ * ...
+ * The shared memory can optionally be fragmented, temp memrefs can follow
+ * each other with all but the last with the OPTEE_MSG_ATTR_FRAGMENT bit set.
+ *
+ * OPTEE_MSG_CMD_UNREGISTER_SHM unregisteres a previously registered shared
+ * memory reference. The information is passed as:
+ * [in] param[0].attr OPTEE_MSG_ATTR_TYPE_RMEM_INPUT
+ * [in] param[0].u.rmem.shm_ref holds shared memory reference
+ * [in] param[0].u.rmem.offs 0
+ * [in] param[0].u.rmem.size 0
+ */
+#define OPTEE_MSG_CMD_OPEN_SESSION 0
+#define OPTEE_MSG_CMD_INVOKE_COMMAND 1
+#define OPTEE_MSG_CMD_CLOSE_SESSION 2
+#define OPTEE_MSG_CMD_CANCEL 3
+#define OPTEE_MSG_CMD_REGISTER_SHM 4
+#define OPTEE_MSG_CMD_UNREGISTER_SHM 5
+#define OPTEE_MSG_FUNCID_CALL_WITH_ARG 0x0004
+
+/*****************************************************************************
+ * Part 3 - Requests from secure world, RPC
+ *****************************************************************************/
+
+/*
+ * All RPC is done with a struct optee_msg_arg as bearer of information,
+ * struct optee_msg_arg::arg holds values defined by OPTEE_MSG_RPC_CMD_* below
+ *
+ * RPC communication with tee-supplicant is reversed compared to normal
+ * client communication desribed above. The supplicant receives requests
+ * and sends responses.
+ */
+
+/*
+ * Load a TA into memory, defined in tee-supplicant
+ */
+#define OPTEE_MSG_RPC_CMD_LOAD_TA 0
+
+/*
+ * Reserved
+ */
+#define OPTEE_MSG_RPC_CMD_RPMB 1
+
+/*
+ * File system access, defined in tee-supplicant
+ */
+#define OPTEE_MSG_RPC_CMD_FS 2
+
+/*
+ * Get time
+ *
+ * Returns number of seconds and nano seconds since the Epoch,
+ * 1970-01-01 00:00:00 +0000 (UTC).
+ *
+ * [out] param[0].u.value.a Number of seconds
+ * [out] param[0].u.value.b Number of nano seconds.
+ */
+#define OPTEE_MSG_RPC_CMD_GET_TIME 3
+
+/*
+ * Wait queue primitive, helper for secure world to implement a wait queue.
+ *
+ * If secure world need to wait for a secure world mutex it issues a sleep
+ * request instead of spinning in secure world. Conversely is a wakeup
+ * request issued when a secure world mutex with a thread waiting thread is
+ * unlocked.
+ *
+ * Waiting on a key
+ * [in] param[0].u.value.a OPTEE_MSG_RPC_WAIT_QUEUE_SLEEP
+ * [in] param[0].u.value.b wait key
+ *
+ * Waking up a key
+ * [in] param[0].u.value.a OPTEE_MSG_RPC_WAIT_QUEUE_WAKEUP
+ * [in] param[0].u.value.b wakeup key
+ */
+#define OPTEE_MSG_RPC_CMD_WAIT_QUEUE 4
+#define OPTEE_MSG_RPC_WAIT_QUEUE_SLEEP 0
+#define OPTEE_MSG_RPC_WAIT_QUEUE_WAKEUP 1
+
+/*
+ * Suspend execution
+ *
+ * [in] param[0].value .a number of milliseconds to suspend
+ */
+#define OPTEE_MSG_RPC_CMD_SUSPEND 5
+
+/*
+ * Allocate a piece of shared memory
+ *
+ * Shared memory can optionally be fragmented, to support that additional
+ * spare param entries are allocated to make room for eventual fragments.
+ * The spare param entries has .attr = OPTEE_MSG_ATTR_TYPE_NONE when
+ * unused. All returned temp memrefs except the last should have the
+ * OPTEE_MSG_ATTR_FRAGMENT bit set in the attr field.
+ *
+ * [in] param[0].u.value.a type of memory one of
+ * OPTEE_MSG_RPC_SHM_TYPE_* below
+ * [in] param[0].u.value.b requested size
+ * [in] param[0].u.value.c required alignment
+ *
+ * [out] param[0].u.tmem.buf_ptr physical address (of first fragment)
+ * [out] param[0].u.tmem.size size (of first fragment)
+ * [out] param[0].u.tmem.shm_ref shared memory reference
+ * ...
+ * [out] param[n].u.tmem.buf_ptr physical address
+ * [out] param[n].u.tmem.size size
+ * [out] param[n].u.tmem.shm_ref shared memory reference (same value
+ * as in param[n-1].u.tmem.shm_ref)
+ */
+#define OPTEE_MSG_RPC_CMD_SHM_ALLOC 6
+/* Memory that can be shared with a non-secure user space application */
+#define OPTEE_MSG_RPC_SHM_TYPE_APPL 0
+/* Memory only shared with non-secure kernel */
+#define OPTEE_MSG_RPC_SHM_TYPE_KERNEL 1
+
+/*
+ * Free shared memory previously allocated with OPTEE_MSG_RPC_CMD_SHM_ALLOC
+ *
+ * [in] param[0].u.value.a type of memory one of
+ * OPTEE_MSG_RPC_SHM_TYPE_* above
+ * [in] param[0].u.value.b value of shared memory reference
+ * returned in param[0].u.tmem.shm_ref
+ * above
+ */
+#define OPTEE_MSG_RPC_CMD_SHM_FREE 7
+
+#endif /* _OPTEE_MSG_H */
diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h
new file mode 100644
index 000000000000..c374cd594314
--- /dev/null
+++ b/drivers/tee/optee/optee_private.h
@@ -0,0 +1,183 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef OPTEE_PRIVATE_H
+#define OPTEE_PRIVATE_H
+
+#include <linux/arm-smccc.h>
+#include <linux/semaphore.h>
+#include <linux/tee_drv.h>
+#include <linux/types.h>
+#include "optee_msg.h"
+
+#define OPTEE_MAX_ARG_SIZE 1024
+
+/* Some Global Platform error codes used in this driver */
+#define TEEC_SUCCESS 0x00000000
+#define TEEC_ERROR_BAD_PARAMETERS 0xFFFF0006
+#define TEEC_ERROR_COMMUNICATION 0xFFFF000E
+#define TEEC_ERROR_OUT_OF_MEMORY 0xFFFF000C
+
+#define TEEC_ORIGIN_COMMS 0x00000002
+
+typedef void (optee_invoke_fn)(unsigned long, unsigned long, unsigned long,
+ unsigned long, unsigned long, unsigned long,
+ unsigned long, unsigned long,
+ struct arm_smccc_res *);
+
+struct optee_call_queue {
+ /* Serializes access to this struct */
+ struct mutex mutex;
+ struct list_head waiters;
+};
+
+struct optee_wait_queue {
+ /* Serializes access to this struct */
+ struct mutex mu;
+ struct list_head db;
+};
+
+/**
+ * struct optee_supp - supplicant synchronization struct
+ * @ctx the context of current connected supplicant.
+ * if !NULL the supplicant device is available for use,
+ * else busy
+ * @ctx_mutex: held while accessing @ctx
+ * @func: supplicant function id to call
+ * @ret: call return value
+ * @num_params: number of elements in @param
+ * @param: parameters for @func
+ * @req_posted: if true, a request has been posted to the supplicant
+ * @supp_next_send: if true, next step is for supplicant to send response
+ * @thrd_mutex: held by the thread doing a request to supplicant
+ * @supp_mutex: held by supplicant while operating on this struct
+ * @data_to_supp: supplicant is waiting on this for next request
+ * @data_from_supp: requesting thread is waiting on this to get the result
+ */
+struct optee_supp {
+ struct tee_context *ctx;
+ /* Serializes access of ctx */
+ struct mutex ctx_mutex;
+
+ u32 func;
+ u32 ret;
+ size_t num_params;
+ struct tee_param *param;
+
+ bool req_posted;
+ bool supp_next_send;
+ /* Serializes access to this struct for requesting thread */
+ struct mutex thrd_mutex;
+ /* Serializes access to this struct for supplicant threads */
+ struct mutex supp_mutex;
+ struct completion data_to_supp;
+ struct completion data_from_supp;
+};
+
+/**
+ * struct optee - main service struct
+ * @supp_teedev: supplicant device
+ * @teedev: client device
+ * @invoke_fn: function to issue smc or hvc
+ * @call_queue: queue of threads waiting to call @invoke_fn
+ * @wait_queue: queue of threads from secure world waiting for a
+ * secure world sync object
+ * @supp: supplicant synchronization struct for RPC to supplicant
+ * @pool: shared memory pool
+ * @memremaped_shm virtual address of memory in shared memory pool
+ */
+struct optee {
+ struct tee_device *supp_teedev;
+ struct tee_device *teedev;
+ optee_invoke_fn *invoke_fn;
+ struct optee_call_queue call_queue;
+ struct optee_wait_queue wait_queue;
+ struct optee_supp supp;
+ struct tee_shm_pool *pool;
+ void *memremaped_shm;
+};
+
+struct optee_session {
+ struct list_head list_node;
+ u32 session_id;
+};
+
+struct optee_context_data {
+ /* Serializes access to this struct */
+ struct mutex mutex;
+ struct list_head sess_list;
+};
+
+struct optee_rpc_param {
+ u32 a0;
+ u32 a1;
+ u32 a2;
+ u32 a3;
+ u32 a4;
+ u32 a5;
+ u32 a6;
+ u32 a7;
+};
+
+void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param);
+
+void optee_wait_queue_init(struct optee_wait_queue *wq);
+void optee_wait_queue_exit(struct optee_wait_queue *wq);
+
+u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
+ struct tee_param *param);
+
+int optee_supp_read(struct tee_context *ctx, void __user *buf, size_t len);
+int optee_supp_write(struct tee_context *ctx, void __user *buf, size_t len);
+void optee_supp_init(struct optee_supp *supp);
+void optee_supp_uninit(struct optee_supp *supp);
+
+int optee_supp_recv(struct tee_context *ctx, u32 *func, u32 *num_params,
+ struct tee_param *param);
+int optee_supp_send(struct tee_context *ctx, u32 ret, u32 num_params,
+ struct tee_param *param);
+
+u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg);
+int optee_open_session(struct tee_context *ctx,
+ struct tee_ioctl_open_session_arg *arg,
+ struct tee_param *param);
+int optee_close_session(struct tee_context *ctx, u32 session);
+int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg,
+ struct tee_param *param);
+int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session);
+
+void optee_enable_shm_cache(struct optee *optee);
+void optee_disable_shm_cache(struct optee *optee);
+
+int optee_from_msg_param(struct tee_param *params, size_t num_params,
+ const struct optee_msg_param *msg_params);
+int optee_to_msg_param(struct optee_msg_param *msg_params, size_t num_params,
+ const struct tee_param *params);
+
+/*
+ * Small helpers
+ */
+
+static inline void *reg_pair_to_ptr(u32 reg0, u32 reg1)
+{
+ return (void *)(unsigned long)(((u64)reg0 << 32) | reg1);
+}
+
+static inline void reg_pair_from_64(u32 *reg0, u32 *reg1, u64 val)
+{
+ *reg0 = val >> 32;
+ *reg1 = val;
+}
+
+#endif /*OPTEE_PRIVATE_H*/
diff --git a/drivers/tee/optee/optee_smc.h b/drivers/tee/optee/optee_smc.h
new file mode 100644
index 000000000000..13b7c98cdf25
--- /dev/null
+++ b/drivers/tee/optee/optee_smc.h
@@ -0,0 +1,450 @@
+/*
+ * Copyright (c) 2015-2016, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef OPTEE_SMC_H
+#define OPTEE_SMC_H
+
+#include <linux/arm-smccc.h>
+#include <linux/bitops.h>
+
+#define OPTEE_SMC_STD_CALL_VAL(func_num) \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL, ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_TRUSTED_OS, (func_num))
+#define OPTEE_SMC_FAST_CALL_VAL(func_num) \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_TRUSTED_OS, (func_num))
+
+/*
+ * Function specified by SMC Calling convention.
+ */
+#define OPTEE_SMC_FUNCID_CALLS_COUNT 0xFF00
+#define OPTEE_SMC_CALLS_COUNT \
+ ARM_SMCCC_CALL_VAL(OPTEE_SMC_FAST_CALL, SMCCC_SMC_32, \
+ SMCCC_OWNER_TRUSTED_OS_END, \
+ OPTEE_SMC_FUNCID_CALLS_COUNT)
+
+/*
+ * Normal cached memory (write-back), shareable for SMP systems and not
+ * shareable for UP systems.
+ */
+#define OPTEE_SMC_SHM_CACHED 1
+
+/*
+ * a0..a7 is used as register names in the descriptions below, on arm32
+ * that translates to r0..r7 and on arm64 to w0..w7. In both cases it's
+ * 32-bit registers.
+ */
+
+/*
+ * Function specified by SMC Calling convention
+ *
+ * Return one of the following UIDs if using API specified in this file
+ * without further extentions:
+ * 65cb6b93-af0c-4617-8ed6-644a8d1140f8
+ * see also OPTEE_SMC_UID_* in optee_msg.h
+ */
+#define OPTEE_SMC_FUNCID_CALLS_UID OPTEE_MSG_FUNCID_CALLS_UID
+#define OPTEE_SMC_CALLS_UID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_TRUSTED_OS_END, \
+ OPTEE_SMC_FUNCID_CALLS_UID)
+
+/*
+ * Function specified by SMC Calling convention
+ *
+ * Returns 2.0 if using API specified in this file without further extentions.
+ * see also OPTEE_MSG_REVISION_* in optee_msg.h
+ */
+#define OPTEE_SMC_FUNCID_CALLS_REVISION OPTEE_MSG_FUNCID_CALLS_REVISION
+#define OPTEE_SMC_CALLS_REVISION \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_TRUSTED_OS_END, \
+ OPTEE_SMC_FUNCID_CALLS_REVISION)
+
+struct optee_smc_calls_revision_result {
+ unsigned long major;
+ unsigned long minor;
+ unsigned long reserved0;
+ unsigned long reserved1;
+};
+
+/*
+ * Get UUID of Trusted OS.
+ *
+ * Used by non-secure world to figure out which Trusted OS is installed.
+ * Note that returned UUID is the UUID of the Trusted OS, not of the API.
+ *
+ * Returns UUID in a0-4 in the same way as OPTEE_SMC_CALLS_UID
+ * described above.
+ */
+#define OPTEE_SMC_FUNCID_GET_OS_UUID OPTEE_MSG_FUNCID_GET_OS_UUID
+#define OPTEE_SMC_CALL_GET_OS_UUID \
+ OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_OS_UUID)
+
+/*
+ * Get revision of Trusted OS.
+ *
+ * Used by non-secure world to figure out which version of the Trusted OS
+ * is installed. Note that the returned revision is the revision of the
+ * Trusted OS, not of the API.
+ *
+ * Returns revision in a0-1 in the same way as OPTEE_SMC_CALLS_REVISION
+ * described above.
+ */
+#define OPTEE_SMC_FUNCID_GET_OS_REVISION OPTEE_MSG_FUNCID_GET_OS_REVISION
+#define OPTEE_SMC_CALL_GET_OS_REVISION \
+ OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_OS_REVISION)
+
+/*
+ * Call with struct optee_msg_arg as argument
+ *
+ * Call register usage:
+ * a0 SMC Function ID, OPTEE_SMC*CALL_WITH_ARG
+ * a1 Upper 32bit of a 64bit physical pointer to a struct optee_msg_arg
+ * a2 Lower 32bit of a 64bit physical pointer to a struct optee_msg_arg
+ * a3 Cache settings, not used if physical pointer is in a predefined shared
+ * memory area else per OPTEE_SMC_SHM_*
+ * a4-6 Not used
+ * a7 Hypervisor Client ID register
+ *
+ * Normal return register usage:
+ * a0 Return value, OPTEE_SMC_RETURN_*
+ * a1-3 Not used
+ * a4-7 Preserved
+ *
+ * OPTEE_SMC_RETURN_ETHREAD_LIMIT return register usage:
+ * a0 Return value, OPTEE_SMC_RETURN_ETHREAD_LIMIT
+ * a1-3 Preserved
+ * a4-7 Preserved
+ *
+ * RPC return register usage:
+ * a0 Return value, OPTEE_SMC_RETURN_IS_RPC(val)
+ * a1-2 RPC parameters
+ * a3-7 Resume information, must be preserved
+ *
+ * Possible return values:
+ * OPTEE_SMC_RETURN_UNKNOWN_FUNCTION Trusted OS does not recognize this
+ * function.
+ * OPTEE_SMC_RETURN_OK Call completed, result updated in
+ * the previously supplied struct
+ * optee_msg_arg.
+ * OPTEE_SMC_RETURN_ETHREAD_LIMIT Number of Trusted OS threads exceeded,
+ * try again later.
+ * OPTEE_SMC_RETURN_EBADADDR Bad physcial pointer to struct
+ * optee_msg_arg.
+ * OPTEE_SMC_RETURN_EBADCMD Bad/unknown cmd in struct optee_msg_arg
+ * OPTEE_SMC_RETURN_IS_RPC() Call suspended by RPC call to normal
+ * world.
+ */
+#define OPTEE_SMC_FUNCID_CALL_WITH_ARG OPTEE_MSG_FUNCID_CALL_WITH_ARG
+#define OPTEE_SMC_CALL_WITH_ARG \
+ OPTEE_SMC_STD_CALL_VAL(OPTEE_SMC_FUNCID_CALL_WITH_ARG)
+
+/*
+ * Get Shared Memory Config
+ *
+ * Returns the Secure/Non-secure shared memory config.
+ *
+ * Call register usage:
+ * a0 SMC Function ID, OPTEE_SMC_GET_SHM_CONFIG
+ * a1-6 Not used
+ * a7 Hypervisor Client ID register
+ *
+ * Have config return register usage:
+ * a0 OPTEE_SMC_RETURN_OK
+ * a1 Physical address of start of SHM
+ * a2 Size of of SHM
+ * a3 Cache settings of memory, as defined by the
+ * OPTEE_SMC_SHM_* values above
+ * a4-7 Preserved
+ *
+ * Not available register usage:
+ * a0 OPTEE_SMC_RETURN_ENOTAVAIL
+ * a1-3 Not used
+ * a4-7 Preserved
+ */
+#define OPTEE_SMC_FUNCID_GET_SHM_CONFIG 7
+#define OPTEE_SMC_GET_SHM_CONFIG \
+ OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_SHM_CONFIG)
+
+struct optee_smc_get_shm_config_result {
+ unsigned long status;
+ unsigned long start;
+ unsigned long size;
+ unsigned long settings;
+};
+
+/*
+ * Exchanges capabilities between normal world and secure world
+ *
+ * Call register usage:
+ * a0 SMC Function ID, OPTEE_SMC_EXCHANGE_CAPABILITIES
+ * a1 bitfield of normal world capabilities OPTEE_SMC_NSEC_CAP_*
+ * a2-6 Not used
+ * a7 Hypervisor Client ID register
+ *
+ * Normal return register usage:
+ * a0 OPTEE_SMC_RETURN_OK
+ * a1 bitfield of secure world capabilities OPTEE_SMC_SEC_CAP_*
+ * a2-7 Preserved
+ *
+ * Error return register usage:
+ * a0 OPTEE_SMC_RETURN_ENOTAVAIL, can't use the capabilities from normal world
+ * a1 bitfield of secure world capabilities OPTEE_SMC_SEC_CAP_*
+ * a2-7 Preserved
+ */
+/* Normal world works as a uniprocessor system */
+#define OPTEE_SMC_NSEC_CAP_UNIPROCESSOR BIT(0)
+/* Secure world has reserved shared memory for normal world to use */
+#define OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM BIT(0)
+/* Secure world can communicate via previously unregistered shared memory */
+#define OPTEE_SMC_SEC_CAP_UNREGISTERED_SHM BIT(1)
+#define OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES 9
+#define OPTEE_SMC_EXCHANGE_CAPABILITIES \
+ OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES)
+
+struct optee_smc_exchange_capabilities_result {
+ unsigned long status;
+ unsigned long capabilities;
+ unsigned long reserved0;
+ unsigned long reserved1;
+};
+
+/*
+ * Disable and empties cache of shared memory objects
+ *
+ * Secure world can cache frequently used shared memory objects, for
+ * example objects used as RPC arguments. When secure world is idle this
+ * function returns one shared memory reference to free. To disable the
+ * cache and free all cached objects this function has to be called until
+ * it returns OPTEE_SMC_RETURN_ENOTAVAIL.
+ *
+ * Call register usage:
+ * a0 SMC Function ID, OPTEE_SMC_DISABLE_SHM_CACHE
+ * a1-6 Not used
+ * a7 Hypervisor Client ID register
+ *
+ * Normal return register usage:
+ * a0 OPTEE_SMC_RETURN_OK
+ * a1 Upper 32bit of a 64bit Shared memory cookie
+ * a2 Lower 32bit of a 64bit Shared memory cookie
+ * a3-7 Preserved
+ *
+ * Cache empty return register usage:
+ * a0 OPTEE_SMC_RETURN_ENOTAVAIL
+ * a1-7 Preserved
+ *
+ * Not idle return register usage:
+ * a0 OPTEE_SMC_RETURN_EBUSY
+ * a1-7 Preserved
+ */
+#define OPTEE_SMC_FUNCID_DISABLE_SHM_CACHE 10
+#define OPTEE_SMC_DISABLE_SHM_CACHE \
+ OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_DISABLE_SHM_CACHE)
+
+struct optee_smc_disable_shm_cache_result {
+ unsigned long status;
+ unsigned long shm_upper32;
+ unsigned long shm_lower32;
+ unsigned long reserved0;
+};
+
+/*
+ * Enable cache of shared memory objects
+ *
+ * Secure world can cache frequently used shared memory objects, for
+ * example objects used as RPC arguments. When secure world is idle this
+ * function returns OPTEE_SMC_RETURN_OK and the cache is enabled. If
+ * secure world isn't idle OPTEE_SMC_RETURN_EBUSY is returned.
+ *
+ * Call register usage:
+ * a0 SMC Function ID, OPTEE_SMC_ENABLE_SHM_CACHE
+ * a1-6 Not used
+ * a7 Hypervisor Client ID register
+ *
+ * Normal return register usage:
+ * a0 OPTEE_SMC_RETURN_OK
+ * a1-7 Preserved
+ *
+ * Not idle return register usage:
+ * a0 OPTEE_SMC_RETURN_EBUSY
+ * a1-7 Preserved
+ */
+#define OPTEE_SMC_FUNCID_ENABLE_SHM_CACHE 11
+#define OPTEE_SMC_ENABLE_SHM_CACHE \
+ OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_ENABLE_SHM_CACHE)
+
+/*
+ * Resume from RPC (for example after processing an IRQ)
+ *
+ * Call register usage:
+ * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC
+ * a1-3 Value of a1-3 when OPTEE_SMC_CALL_WITH_ARG returned
+ * OPTEE_SMC_RETURN_RPC in a0
+ *
+ * Return register usage is the same as for OPTEE_SMC_*CALL_WITH_ARG above.
+ *
+ * Possible return values
+ * OPTEE_SMC_RETURN_UNKNOWN_FUNCTION Trusted OS does not recognize this
+ * function.
+ * OPTEE_SMC_RETURN_OK Original call completed, result
+ * updated in the previously supplied.
+ * struct optee_msg_arg
+ * OPTEE_SMC_RETURN_RPC Call suspended by RPC call to normal
+ * world.
+ * OPTEE_SMC_RETURN_ERESUME Resume failed, the opaque resume
+ * information was corrupt.
+ */
+#define OPTEE_SMC_FUNCID_RETURN_FROM_RPC 3
+#define OPTEE_SMC_CALL_RETURN_FROM_RPC \
+ OPTEE_SMC_STD_CALL_VAL(OPTEE_SMC_FUNCID_RETURN_FROM_RPC)
+
+#define OPTEE_SMC_RETURN_RPC_PREFIX_MASK 0xFFFF0000
+#define OPTEE_SMC_RETURN_RPC_PREFIX 0xFFFF0000
+#define OPTEE_SMC_RETURN_RPC_FUNC_MASK 0x0000FFFF
+
+#define OPTEE_SMC_RETURN_GET_RPC_FUNC(ret) \
+ ((ret) & OPTEE_SMC_RETURN_RPC_FUNC_MASK)
+
+#define OPTEE_SMC_RPC_VAL(func) ((func) | OPTEE_SMC_RETURN_RPC_PREFIX)
+
+/*
+ * Allocate memory for RPC parameter passing. The memory is used to hold a
+ * struct optee_msg_arg.
+ *
+ * "Call" register usage:
+ * a0 This value, OPTEE_SMC_RETURN_RPC_ALLOC
+ * a1 Size in bytes of required argument memory
+ * a2 Not used
+ * a3 Resume information, must be preserved
+ * a4-5 Not used
+ * a6-7 Resume information, must be preserved
+ *
+ * "Return" register usage:
+ * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC.
+ * a1 Upper 32bits of 64bit physical pointer to allocated
+ * memory, (a1 == 0 && a2 == 0) if size was 0 or if memory can't
+ * be allocated.
+ * a2 Lower 32bits of 64bit physical pointer to allocated
+ * memory, (a1 == 0 && a2 == 0) if size was 0 or if memory can't
+ * be allocated
+ * a3 Preserved
+ * a4 Upper 32bits of 64bit Shared memory cookie used when freeing
+ * the memory or doing an RPC
+ * a5 Lower 32bits of 64bit Shared memory cookie used when freeing
+ * the memory or doing an RPC
+ * a6-7 Preserved
+ */
+#define OPTEE_SMC_RPC_FUNC_ALLOC 0
+#define OPTEE_SMC_RETURN_RPC_ALLOC \
+ OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_ALLOC)
+
+/*
+ * Free memory previously allocated by OPTEE_SMC_RETURN_RPC_ALLOC
+ *
+ * "Call" register usage:
+ * a0 This value, OPTEE_SMC_RETURN_RPC_FREE
+ * a1 Upper 32bits of 64bit shared memory cookie belonging to this
+ * argument memory
+ * a2 Lower 32bits of 64bit shared memory cookie belonging to this
+ * argument memory
+ * a3-7 Resume information, must be preserved
+ *
+ * "Return" register usage:
+ * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC.
+ * a1-2 Not used
+ * a3-7 Preserved
+ */
+#define OPTEE_SMC_RPC_FUNC_FREE 2
+#define OPTEE_SMC_RETURN_RPC_FREE \
+ OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_FREE)
+
+/*
+ * Deliver an IRQ in normal world.
+ *
+ * "Call" register usage:
+ * a0 OPTEE_SMC_RETURN_RPC_IRQ
+ * a1-7 Resume information, must be preserved
+ *
+ * "Return" register usage:
+ * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC.
+ * a1-7 Preserved
+ */
+#define OPTEE_SMC_RPC_FUNC_IRQ 4
+#define OPTEE_SMC_RETURN_RPC_IRQ \
+ OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_IRQ)
+
+/*
+ * Do an RPC request. The supplied struct optee_msg_arg tells which
+ * request to do and the parameters for the request. The following fields
+ * are used (the rest are unused):
+ * - cmd the Request ID
+ * - ret return value of the request, filled in by normal world
+ * - num_params number of parameters for the request
+ * - params the parameters
+ * - param_attrs attributes of the parameters
+ *
+ * "Call" register usage:
+ * a0 OPTEE_SMC_RETURN_RPC_CMD
+ * a1 Upper 32bit of a 64bit Shared memory cookie holding a
+ * struct optee_msg_arg, must be preserved, only the data should
+ * be updated
+ * a2 Lower 32bit of a 64bit Shared memory cookie holding a
+ * struct optee_msg_arg, must be preserved, only the data should
+ * be updated
+ * a3-7 Resume information, must be preserved
+ *
+ * "Return" register usage:
+ * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC.
+ * a1-2 Not used
+ * a3-7 Preserved
+ */
+#define OPTEE_SMC_RPC_FUNC_CMD 5
+#define OPTEE_SMC_RETURN_RPC_CMD \
+ OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_CMD)
+
+/* Returned in a0 */
+#define OPTEE_SMC_RETURN_UNKNOWN_FUNCTION 0xFFFFFFFF
+
+/* Returned in a0 only from Trusted OS functions */
+#define OPTEE_SMC_RETURN_OK 0x0
+#define OPTEE_SMC_RETURN_ETHREAD_LIMIT 0x1
+#define OPTEE_SMC_RETURN_EBUSY 0x2
+#define OPTEE_SMC_RETURN_ERESUME 0x3
+#define OPTEE_SMC_RETURN_EBADADDR 0x4
+#define OPTEE_SMC_RETURN_EBADCMD 0x5
+#define OPTEE_SMC_RETURN_ENOMEM 0x6
+#define OPTEE_SMC_RETURN_ENOTAVAIL 0x7
+#define OPTEE_SMC_RETURN_IS_RPC(ret) __optee_smc_return_is_rpc((ret))
+
+static inline bool __optee_smc_return_is_rpc(u32 ret)
+{
+ return ret != OPTEE_SMC_RETURN_UNKNOWN_FUNCTION &&
+ (ret & OPTEE_SMC_RETURN_RPC_PREFIX_MASK) ==
+ OPTEE_SMC_RETURN_RPC_PREFIX;
+}
+
+#endif /* OPTEE_SMC_H */
diff --git a/drivers/tee/optee/rpc.c b/drivers/tee/optee/rpc.c
new file mode 100644
index 000000000000..8814eca06021
--- /dev/null
+++ b/drivers/tee/optee/rpc.c
@@ -0,0 +1,396 @@
+/*
+ * Copyright (c) 2015-2016, Linaro Limited
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/tee_drv.h>
+#include "optee_private.h"
+#include "optee_smc.h"
+
+struct wq_entry {
+ struct list_head link;
+ struct completion c;
+ u32 key;
+};
+
+void optee_wait_queue_init(struct optee_wait_queue *priv)
+{
+ mutex_init(&priv->mu);
+ INIT_LIST_HEAD(&priv->db);
+}
+
+void optee_wait_queue_exit(struct optee_wait_queue *priv)
+{
+ mutex_destroy(&priv->mu);
+}
+
+static void handle_rpc_func_cmd_get_time(struct optee_msg_arg *arg)
+{
+ struct timespec64 ts;
+
+ if (arg->num_params != 1)
+ goto bad;
+ if ((arg->params[0].attr & OPTEE_MSG_ATTR_TYPE_MASK) !=
+ OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT)
+ goto bad;
+
+ getnstimeofday64(&ts);
+ arg->params[0].u.value.a = ts.tv_sec;
+ arg->params[0].u.value.b = ts.tv_nsec;
+
+ arg->ret = TEEC_SUCCESS;
+ return;
+bad:
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+}
+
+static struct wq_entry *wq_entry_get(struct optee_wait_queue *wq, u32 key)
+{
+ struct wq_entry *w;
+
+ mutex_lock(&wq->mu);
+
+ list_for_each_entry(w, &wq->db, link)
+ if (w->key == key)
+ goto out;
+
+ w = kmalloc(sizeof(*w), GFP_KERNEL);
+ if (w) {
+ init_completion(&w->c);
+ w->key = key;
+ list_add_tail(&w->link, &wq->db);
+ }
+out:
+ mutex_unlock(&wq->mu);
+ return w;
+}
+
+static void wq_sleep(struct optee_wait_queue *wq, u32 key)
+{
+ struct wq_entry *w = wq_entry_get(wq, key);
+
+ if (w) {
+ wait_for_completion(&w->c);
+ mutex_lock(&wq->mu);
+ list_del(&w->link);
+ mutex_unlock(&wq->mu);
+ kfree(w);
+ }
+}
+
+static void wq_wakeup(struct optee_wait_queue *wq, u32 key)
+{
+ struct wq_entry *w = wq_entry_get(wq, key);
+
+ if (w)
+ complete(&w->c);
+}
+
+static void handle_rpc_func_cmd_wq(struct optee *optee,
+ struct optee_msg_arg *arg)
+{
+ if (arg->num_params != 1)
+ goto bad;
+
+ if ((arg->params[0].attr & OPTEE_MSG_ATTR_TYPE_MASK) !=
+ OPTEE_MSG_ATTR_TYPE_VALUE_INPUT)
+ goto bad;
+
+ switch (arg->params[0].u.value.a) {
+ case OPTEE_MSG_RPC_WAIT_QUEUE_SLEEP:
+ wq_sleep(&optee->wait_queue, arg->params[0].u.value.b);
+ break;
+ case OPTEE_MSG_RPC_WAIT_QUEUE_WAKEUP:
+ wq_wakeup(&optee->wait_queue, arg->params[0].u.value.b);
+ break;
+ default:
+ goto bad;
+ }
+
+ arg->ret = TEEC_SUCCESS;
+ return;
+bad:
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+}
+
+static void handle_rpc_func_cmd_wait(struct optee_msg_arg *arg)
+{
+ u32 msec_to_wait;
+
+ if (arg->num_params != 1)
+ goto bad;
+
+ if ((arg->params[0].attr & OPTEE_MSG_ATTR_TYPE_MASK) !=
+ OPTEE_MSG_ATTR_TYPE_VALUE_INPUT)
+ goto bad;
+
+ msec_to_wait = arg->params[0].u.value.a;
+
+ /* set task's state to interruptible sleep */
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ /* take a nap */
+ msleep(msec_to_wait);
+
+ arg->ret = TEEC_SUCCESS;
+ return;
+bad:
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+}
+
+static void handle_rpc_supp_cmd(struct tee_context *ctx,
+ struct optee_msg_arg *arg)
+{
+ struct tee_param *params;
+
+ arg->ret_origin = TEEC_ORIGIN_COMMS;
+
+ params = kmalloc_array(arg->num_params, sizeof(struct tee_param),
+ GFP_KERNEL);
+ if (!params) {
+ arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
+ return;
+ }
+
+ if (optee_from_msg_param(params, arg->num_params, arg->params)) {
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ goto out;
+ }
+
+ arg->ret = optee_supp_thrd_req(ctx, arg->cmd, arg->num_params, params);
+
+ if (optee_to_msg_param(arg->params, arg->num_params, params))
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+out:
+ kfree(params);
+}
+
+static struct tee_shm *cmd_alloc_suppl(struct tee_context *ctx, size_t sz)
+{
+ u32 ret;
+ struct tee_param param;
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ struct tee_shm *shm;
+
+ param.attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT;
+ param.u.value.a = OPTEE_MSG_RPC_SHM_TYPE_APPL;
+ param.u.value.b = sz;
+ param.u.value.c = 0;
+
+ ret = optee_supp_thrd_req(ctx, OPTEE_MSG_RPC_CMD_SHM_ALLOC, 1, &param);
+ if (ret)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_lock(&optee->supp.ctx_mutex);
+ /* Increases count as secure world doesn't have a reference */
+ shm = tee_shm_get_from_id(optee->supp.ctx, param.u.value.c);
+ mutex_unlock(&optee->supp.ctx_mutex);
+ return shm;
+}
+
+static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
+ struct optee_msg_arg *arg)
+{
+ phys_addr_t pa;
+ struct tee_shm *shm;
+ size_t sz;
+ size_t n;
+
+ arg->ret_origin = TEEC_ORIGIN_COMMS;
+
+ if (!arg->num_params ||
+ arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) {
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ return;
+ }
+
+ for (n = 1; n < arg->num_params; n++) {
+ if (arg->params[n].attr != OPTEE_MSG_ATTR_TYPE_NONE) {
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ return;
+ }
+ }
+
+ sz = arg->params[0].u.value.b;
+ switch (arg->params[0].u.value.a) {
+ case OPTEE_MSG_RPC_SHM_TYPE_APPL:
+ shm = cmd_alloc_suppl(ctx, sz);
+ break;
+ case OPTEE_MSG_RPC_SHM_TYPE_KERNEL:
+ shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED);
+ break;
+ default:
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ return;
+ }
+
+ if (IS_ERR(shm)) {
+ arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
+ return;
+ }
+
+ if (tee_shm_get_pa(shm, 0, &pa)) {
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ goto bad;
+ }
+
+ arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT;
+ arg->params[0].u.tmem.buf_ptr = pa;
+ arg->params[0].u.tmem.size = sz;
+ arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
+ arg->ret = TEEC_SUCCESS;
+ return;
+bad:
+ tee_shm_free(shm);
+}
+
+static void cmd_free_suppl(struct tee_context *ctx, struct tee_shm *shm)
+{
+ struct tee_param param;
+
+ param.attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT;
+ param.u.value.a = OPTEE_MSG_RPC_SHM_TYPE_APPL;
+ param.u.value.b = tee_shm_get_id(shm);
+ param.u.value.c = 0;
+
+ /*
+ * Match the tee_shm_get_from_id() in cmd_alloc_suppl() as secure
+ * world has released its reference.
+ *
+ * It's better to do this before sending the request to supplicant
+ * as we'd like to let the process doing the initial allocation to
+ * do release the last reference too in order to avoid stacking
+ * many pending fput() on the client process. This could otherwise
+ * happen if secure world does many allocate and free in a single
+ * invoke.
+ */
+ tee_shm_put(shm);
+
+ optee_supp_thrd_req(ctx, OPTEE_MSG_RPC_CMD_SHM_FREE, 1, &param);
+}
+
+static void handle_rpc_func_cmd_shm_free(struct tee_context *ctx,
+ struct optee_msg_arg *arg)
+{
+ struct tee_shm *shm;
+
+ arg->ret_origin = TEEC_ORIGIN_COMMS;
+
+ if (arg->num_params != 1 ||
+ arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) {
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ return;
+ }
+
+ shm = (struct tee_shm *)(unsigned long)arg->params[0].u.value.b;
+ switch (arg->params[0].u.value.a) {
+ case OPTEE_MSG_RPC_SHM_TYPE_APPL:
+ cmd_free_suppl(ctx, shm);
+ break;
+ case OPTEE_MSG_RPC_SHM_TYPE_KERNEL:
+ tee_shm_free(shm);
+ break;
+ default:
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ }
+ arg->ret = TEEC_SUCCESS;
+}
+
+static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee,
+ struct tee_shm *shm)
+{
+ struct optee_msg_arg *arg;
+
+ arg = tee_shm_get_va(shm, 0);
+ if (IS_ERR(arg)) {
+ pr_err("%s: tee_shm_get_va %p failed\n", __func__, shm);
+ return;
+ }
+
+ switch (arg->cmd) {
+ case OPTEE_MSG_RPC_CMD_GET_TIME:
+ handle_rpc_func_cmd_get_time(arg);
+ break;
+ case OPTEE_MSG_RPC_CMD_WAIT_QUEUE:
+ handle_rpc_func_cmd_wq(optee, arg);
+ break;
+ case OPTEE_MSG_RPC_CMD_SUSPEND:
+ handle_rpc_func_cmd_wait(arg);
+ break;
+ case OPTEE_MSG_RPC_CMD_SHM_ALLOC:
+ handle_rpc_func_cmd_shm_alloc(ctx, arg);
+ break;
+ case OPTEE_MSG_RPC_CMD_SHM_FREE:
+ handle_rpc_func_cmd_shm_free(ctx, arg);
+ break;
+ default:
+ handle_rpc_supp_cmd(ctx, arg);
+ }
+}
+
+/**
+ * optee_handle_rpc() - handle RPC from secure world
+ * @ctx: context doing the RPC
+ * @param: value of registers for the RPC
+ *
+ * Result of RPC is written back into @param.
+ */
+void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param)
+{
+ struct tee_device *teedev = ctx->teedev;
+ struct optee *optee = tee_get_drvdata(teedev);
+ struct tee_shm *shm;
+ phys_addr_t pa;
+
+ switch (OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)) {
+ case OPTEE_SMC_RPC_FUNC_ALLOC:
+ shm = tee_shm_alloc(ctx, param->a1, TEE_SHM_MAPPED);
+ if (!IS_ERR(shm) && !tee_shm_get_pa(shm, 0, &pa)) {
+ reg_pair_from_64(&param->a1, &param->a2, pa);
+ reg_pair_from_64(&param->a4, &param->a5,
+ (unsigned long)shm);
+ } else {
+ param->a1 = 0;
+ param->a2 = 0;
+ param->a4 = 0;
+ param->a5 = 0;
+ }
+ break;
+ case OPTEE_SMC_RPC_FUNC_FREE:
+ shm = reg_pair_to_ptr(param->a1, param->a2);
+ tee_shm_free(shm);
+ break;
+ case OPTEE_SMC_RPC_FUNC_IRQ:
+ /*
+ * An IRQ was raised while secure world was executing,
+ * since all IRQs are handled in Linux a dummy RPC is
+ * performed to let Linux take the IRQ through the normal
+ * vector.
+ */
+ break;
+ case OPTEE_SMC_RPC_FUNC_CMD:
+ shm = reg_pair_to_ptr(param->a1, param->a2);
+ handle_rpc_func_cmd(ctx, optee, shm);
+ break;
+ default:
+ pr_warn("Unknown RPC func 0x%x\n",
+ (u32)OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0));
+ break;
+ }
+
+ param->a0 = OPTEE_SMC_CALL_RETURN_FROM_RPC;
+}
diff --git a/drivers/tee/optee/supp.c b/drivers/tee/optee/supp.c
new file mode 100644
index 000000000000..b4ea0678a436
--- /dev/null
+++ b/drivers/tee/optee/supp.c
@@ -0,0 +1,273 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include "optee_private.h"
+
+void optee_supp_init(struct optee_supp *supp)
+{
+ memset(supp, 0, sizeof(*supp));
+ mutex_init(&supp->ctx_mutex);
+ mutex_init(&supp->thrd_mutex);
+ mutex_init(&supp->supp_mutex);
+ init_completion(&supp->data_to_supp);
+ init_completion(&supp->data_from_supp);
+}
+
+void optee_supp_uninit(struct optee_supp *supp)
+{
+ mutex_destroy(&supp->ctx_mutex);
+ mutex_destroy(&supp->thrd_mutex);
+ mutex_destroy(&supp->supp_mutex);
+}
+
+/**
+ * optee_supp_thrd_req() - request service from supplicant
+ * @ctx: context doing the request
+ * @func: function requested
+ * @num_params: number of elements in @param array
+ * @param: parameters for function
+ *
+ * Returns result of operation to be passed to secure world
+ */
+u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
+ struct tee_param *param)
+{
+ bool interruptable;
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ struct optee_supp *supp = &optee->supp;
+ u32 ret;
+
+ /*
+ * Other threads blocks here until we've copied our answer from
+ * supplicant.
+ */
+ while (mutex_lock_interruptible(&supp->thrd_mutex)) {
+ /* See comment below on when the RPC can be interrupted. */
+ mutex_lock(&supp->ctx_mutex);
+ interruptable = !supp->ctx;
+ mutex_unlock(&supp->ctx_mutex);
+ if (interruptable)
+ return TEEC_ERROR_COMMUNICATION;
+ }
+
+ /*
+ * We have exclusive access now since the supplicant at this
+ * point is either doing a
+ * wait_for_completion_interruptible(&supp->data_to_supp) or is in
+ * userspace still about to do the ioctl() to enter
+ * optee_supp_recv() below.
+ */
+
+ supp->func = func;
+ supp->num_params = num_params;
+ supp->param = param;
+ supp->req_posted = true;
+
+ /* Let supplicant get the data */
+ complete(&supp->data_to_supp);
+
+ /*
+ * Wait for supplicant to process and return result, once we've
+ * returned from wait_for_completion(data_from_supp) we have
+ * exclusive access again.
+ */
+ while (wait_for_completion_interruptible(&supp->data_from_supp)) {
+ mutex_lock(&supp->ctx_mutex);
+ interruptable = !supp->ctx;
+ if (interruptable) {
+ /*
+ * There's no supplicant available and since the
+ * supp->ctx_mutex currently is held none can
+ * become available until the mutex released
+ * again.
+ *
+ * Interrupting an RPC to supplicant is only
+ * allowed as a way of slightly improving the user
+ * experience in case the supplicant hasn't been
+ * started yet. During normal operation the supplicant
+ * will serve all requests in a timely manner and
+ * interrupting then wouldn't make sense.
+ */
+ supp->ret = TEEC_ERROR_COMMUNICATION;
+ init_completion(&supp->data_to_supp);
+ }
+ mutex_unlock(&supp->ctx_mutex);
+ if (interruptable)
+ break;
+ }
+
+ ret = supp->ret;
+ supp->param = NULL;
+ supp->req_posted = false;
+
+ /* We're done, let someone else talk to the supplicant now. */
+ mutex_unlock(&supp->thrd_mutex);
+
+ return ret;
+}
+
+/**
+ * optee_supp_recv() - receive request for supplicant
+ * @ctx: context receiving the request
+ * @func: requested function in supplicant
+ * @num_params: number of elements allocated in @param, updated with number
+ * used elements
+ * @param: space for parameters for @func
+ *
+ * Returns 0 on success or <0 on failure
+ */
+int optee_supp_recv(struct tee_context *ctx, u32 *func, u32 *num_params,
+ struct tee_param *param)
+{
+ struct tee_device *teedev = ctx->teedev;
+ struct optee *optee = tee_get_drvdata(teedev);
+ struct optee_supp *supp = &optee->supp;
+ int rc;
+
+ /*
+ * In case two threads in one supplicant is calling this function
+ * simultaneously we need to protect the data with a mutex which
+ * we'll release before returning.
+ */
+ mutex_lock(&supp->supp_mutex);
+
+ if (supp->supp_next_send) {
+ /*
+ * optee_supp_recv() has been called again without
+ * a optee_supp_send() in between. Supplicant has
+ * probably been restarted before it was able to
+ * write back last result. Abort last request and
+ * wait for a new.
+ */
+ if (supp->req_posted) {
+ supp->ret = TEEC_ERROR_COMMUNICATION;
+ supp->supp_next_send = false;
+ complete(&supp->data_from_supp);
+ }
+ }
+
+ /*
+ * This is where supplicant will be hanging most of the
+ * time, let's make this interruptable so we can easily
+ * restart supplicant if needed.
+ */
+ if (wait_for_completion_interruptible(&supp->data_to_supp)) {
+ rc = -ERESTARTSYS;
+ goto out;
+ }
+
+ /* We have exlusive access to the data */
+
+ if (*num_params < supp->num_params) {
+ /*
+ * Not enough room for parameters, tell supplicant
+ * it failed and abort last request.
+ */
+ supp->ret = TEEC_ERROR_COMMUNICATION;
+ rc = -EINVAL;
+ complete(&supp->data_from_supp);
+ goto out;
+ }
+
+ *func = supp->func;
+ *num_params = supp->num_params;
+ memcpy(param, supp->param,
+ sizeof(struct tee_param) * supp->num_params);
+
+ /* Allow optee_supp_send() below to do its work */
+ supp->supp_next_send = true;
+
+ rc = 0;
+out:
+ mutex_unlock(&supp->supp_mutex);
+ return rc;
+}
+
+/**
+ * optee_supp_send() - send result of request from supplicant
+ * @ctx: context sending result
+ * @ret: return value of request
+ * @num_params: number of parameters returned
+ * @param: returned parameters
+ *
+ * Returns 0 on success or <0 on failure.
+ */
+int optee_supp_send(struct tee_context *ctx, u32 ret, u32 num_params,
+ struct tee_param *param)
+{
+ struct tee_device *teedev = ctx->teedev;
+ struct optee *optee = tee_get_drvdata(teedev);
+ struct optee_supp *supp = &optee->supp;
+ size_t n;
+ int rc = 0;
+
+ /*
+ * We still have exclusive access to the data since that's how we
+ * left it when returning from optee_supp_read().
+ */
+
+ /* See comment on mutex in optee_supp_read() above */
+ mutex_lock(&supp->supp_mutex);
+
+ if (!supp->supp_next_send) {
+ /*
+ * Something strange is going on, supplicant shouldn't
+ * enter optee_supp_send() in this state
+ */
+ rc = -ENOENT;
+ goto out;
+ }
+
+ if (num_params != supp->num_params) {
+ /*
+ * Something is wrong, let supplicant restart. Next call to
+ * optee_supp_recv() will give an error to the requesting
+ * thread and release it.
+ */
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* Update out and in/out parameters */
+ for (n = 0; n < num_params; n++) {
+ struct tee_param *p = supp->param + n;
+
+ switch (p->attr) {
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
+ p->u.value.a = param[n].u.value.a;
+ p->u.value.b = param[n].u.value.b;
+ p->u.value.c = param[n].u.value.c;
+ break;
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
+ p->u.memref.size = param[n].u.memref.size;
+ break;
+ default:
+ break;
+ }
+ }
+ supp->ret = ret;
+
+ /* Allow optee_supp_recv() above to do its work */
+ supp->supp_next_send = false;
+
+ /* Let the requesting thread continue */
+ complete(&supp->data_from_supp);
+out:
+ mutex_unlock(&supp->supp_mutex);
+ return rc;
+}
diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c
new file mode 100644
index 000000000000..5c60bf4423e6
--- /dev/null
+++ b/drivers/tee/tee_core.c
@@ -0,0 +1,893 @@
+/*
+ * Copyright (c) 2015-2016, Linaro Limited
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/idr.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/tee_drv.h>
+#include <linux/uaccess.h>
+#include "tee_private.h"
+
+#define TEE_NUM_DEVICES 32
+
+#define TEE_IOCTL_PARAM_SIZE(x) (sizeof(struct tee_param) * (x))
+
+/*
+ * Unprivileged devices in the lower half range and privileged devices in
+ * the upper half range.
+ */
+static DECLARE_BITMAP(dev_mask, TEE_NUM_DEVICES);
+static DEFINE_SPINLOCK(driver_lock);
+
+static struct class *tee_class;
+static dev_t tee_devt;
+
+static int tee_open(struct inode *inode, struct file *filp)
+{
+ int rc;
+ struct tee_device *teedev;
+ struct tee_context *ctx;
+
+ teedev = container_of(inode->i_cdev, struct tee_device, cdev);
+ if (!tee_device_get(teedev))
+ return -EINVAL;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ ctx->teedev = teedev;
+ INIT_LIST_HEAD(&ctx->list_shm);
+ filp->private_data = ctx;
+ rc = teedev->desc->ops->open(ctx);
+ if (rc)
+ goto err;
+
+ return 0;
+err:
+ kfree(ctx);
+ tee_device_put(teedev);
+ return rc;
+}
+
+static int tee_release(struct inode *inode, struct file *filp)
+{
+ struct tee_context *ctx = filp->private_data;
+ struct tee_device *teedev = ctx->teedev;
+ struct tee_shm *shm;
+
+ ctx->teedev->desc->ops->release(ctx);
+ mutex_lock(&ctx->teedev->mutex);
+ list_for_each_entry(shm, &ctx->list_shm, link)
+ shm->ctx = NULL;
+ mutex_unlock(&ctx->teedev->mutex);
+ kfree(ctx);
+ tee_device_put(teedev);
+ return 0;
+}
+
+static int tee_ioctl_version(struct tee_context *ctx,
+ struct tee_ioctl_version_data __user *uvers)
+{
+ struct tee_ioctl_version_data vers;
+
+ ctx->teedev->desc->ops->get_version(ctx->teedev, &vers);
+ if (copy_to_user(uvers, &vers, sizeof(vers)))
+ return -EFAULT;
+ return 0;
+}
+
+static int tee_ioctl_shm_alloc(struct tee_context *ctx,
+ struct tee_ioctl_shm_alloc_data __user *udata)
+{
+ long ret;
+ struct tee_ioctl_shm_alloc_data data;
+ struct tee_shm *shm;
+
+ if (copy_from_user(&data, udata, sizeof(data)))
+ return -EFAULT;
+
+ /* Currently no input flags are supported */
+ if (data.flags)
+ return -EINVAL;
+
+ data.id = -1;
+
+ shm = tee_shm_alloc(ctx, data.size, TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
+ if (IS_ERR(shm))
+ return PTR_ERR(shm);
+
+ data.id = shm->id;
+ data.flags = shm->flags;
+ data.size = shm->size;
+
+ if (copy_to_user(udata, &data, sizeof(data)))
+ ret = -EFAULT;
+ else
+ ret = tee_shm_get_fd(shm);
+
+ /*
+ * When user space closes the file descriptor the shared memory
+ * should be freed or if tee_shm_get_fd() failed then it will
+ * be freed immediately.
+ */
+ tee_shm_put(shm);
+ return ret;
+}
+
+static int params_from_user(struct tee_context *ctx, struct tee_param *params,
+ size_t num_params,
+ struct tee_ioctl_param __user *uparams)
+{
+ size_t n;
+
+ for (n = 0; n < num_params; n++) {
+ struct tee_shm *shm;
+ struct tee_ioctl_param ip;
+
+ if (copy_from_user(&ip, uparams + n, sizeof(ip)))
+ return -EFAULT;
+
+ /* All unused attribute bits has to be zero */
+ if (ip.attr & ~TEE_IOCTL_PARAM_ATTR_TYPE_MASK)
+ return -EINVAL;
+
+ params[n].attr = ip.attr;
+ switch (ip.attr) {
+ case TEE_IOCTL_PARAM_ATTR_TYPE_NONE:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
+ break;
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
+ params[n].u.value.a = ip.a;
+ params[n].u.value.b = ip.b;
+ params[n].u.value.c = ip.c;
+ break;
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
+ /*
+ * If we fail to get a pointer to a shared memory
+ * object (and increase the ref count) from an
+ * identifier we return an error. All pointers that
+ * has been added in params have an increased ref
+ * count. It's the callers responibility to do
+ * tee_shm_put() on all resolved pointers.
+ */
+ shm = tee_shm_get_from_id(ctx, ip.c);
+ if (IS_ERR(shm))
+ return PTR_ERR(shm);
+
+ params[n].u.memref.shm_offs = ip.a;
+ params[n].u.memref.size = ip.b;
+ params[n].u.memref.shm = shm;
+ break;
+ default:
+ /* Unknown attribute */
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static int params_to_user(struct tee_ioctl_param __user *uparams,
+ size_t num_params, struct tee_param *params)
+{
+ size_t n;
+
+ for (n = 0; n < num_params; n++) {
+ struct tee_ioctl_param __user *up = uparams + n;
+ struct tee_param *p = params + n;
+
+ switch (p->attr) {
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
+ if (put_user(p->u.value.a, &up->a) ||
+ put_user(p->u.value.b, &up->b) ||
+ put_user(p->u.value.c, &up->c))
+ return -EFAULT;
+ break;
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
+ if (put_user((u64)p->u.memref.size, &up->b))
+ return -EFAULT;
+ default:
+ break;
+ }
+ }
+ return 0;
+}
+
+static bool param_is_memref(struct tee_param *param)
+{
+ switch (param->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int tee_ioctl_open_session(struct tee_context *ctx,
+ struct tee_ioctl_buf_data __user *ubuf)
+{
+ int rc;
+ size_t n;
+ struct tee_ioctl_buf_data buf;
+ struct tee_ioctl_open_session_arg __user *uarg;
+ struct tee_ioctl_open_session_arg arg;
+ struct tee_ioctl_param __user *uparams = NULL;
+ struct tee_param *params = NULL;
+ bool have_session = false;
+
+ if (!ctx->teedev->desc->ops->open_session)
+ return -EINVAL;
+
+ if (copy_from_user(&buf, ubuf, sizeof(buf)))
+ return -EFAULT;
+
+ if (buf.buf_len > TEE_MAX_ARG_SIZE ||
+ buf.buf_len < sizeof(struct tee_ioctl_open_session_arg))
+ return -EINVAL;
+
+ uarg = u64_to_user_ptr(buf.buf_ptr);
+ if (copy_from_user(&arg, uarg, sizeof(arg)))
+ return -EFAULT;
+
+ if (sizeof(arg) + TEE_IOCTL_PARAM_SIZE(arg.num_params) != buf.buf_len)
+ return -EINVAL;
+
+ if (arg.num_params) {
+ params = kcalloc(arg.num_params, sizeof(struct tee_param),
+ GFP_KERNEL);
+ if (!params)
+ return -ENOMEM;
+ uparams = uarg->params;
+ rc = params_from_user(ctx, params, arg.num_params, uparams);
+ if (rc)
+ goto out;
+ }
+
+ rc = ctx->teedev->desc->ops->open_session(ctx, &arg, params);
+ if (rc)
+ goto out;
+ have_session = true;
+
+ if (put_user(arg.session, &uarg->session) ||
+ put_user(arg.ret, &uarg->ret) ||
+ put_user(arg.ret_origin, &uarg->ret_origin)) {
+ rc = -EFAULT;
+ goto out;
+ }
+ rc = params_to_user(uparams, arg.num_params, params);
+out:
+ /*
+ * If we've succeeded to open the session but failed to communicate
+ * it back to user space, close the session again to avoid leakage.
+ */
+ if (rc && have_session && ctx->teedev->desc->ops->close_session)
+ ctx->teedev->desc->ops->close_session(ctx, arg.session);
+
+ if (params) {
+ /* Decrease ref count for all valid shared memory pointers */
+ for (n = 0; n < arg.num_params; n++)
+ if (param_is_memref(params + n) &&
+ params[n].u.memref.shm)
+ tee_shm_put(params[n].u.memref.shm);
+ kfree(params);
+ }
+
+ return rc;
+}
+
+static int tee_ioctl_invoke(struct tee_context *ctx,
+ struct tee_ioctl_buf_data __user *ubuf)
+{
+ int rc;
+ size_t n;
+ struct tee_ioctl_buf_data buf;
+ struct tee_ioctl_invoke_arg __user *uarg;
+ struct tee_ioctl_invoke_arg arg;
+ struct tee_ioctl_param __user *uparams = NULL;
+ struct tee_param *params = NULL;
+
+ if (!ctx->teedev->desc->ops->invoke_func)
+ return -EINVAL;
+
+ if (copy_from_user(&buf, ubuf, sizeof(buf)))
+ return -EFAULT;
+
+ if (buf.buf_len > TEE_MAX_ARG_SIZE ||
+ buf.buf_len < sizeof(struct tee_ioctl_invoke_arg))
+ return -EINVAL;
+
+ uarg = u64_to_user_ptr(buf.buf_ptr);
+ if (copy_from_user(&arg, uarg, sizeof(arg)))
+ return -EFAULT;
+
+ if (sizeof(arg) + TEE_IOCTL_PARAM_SIZE(arg.num_params) != buf.buf_len)
+ return -EINVAL;
+
+ if (arg.num_params) {
+ params = kcalloc(arg.num_params, sizeof(struct tee_param),
+ GFP_KERNEL);
+ if (!params)
+ return -ENOMEM;
+ uparams = uarg->params;
+ rc = params_from_user(ctx, params, arg.num_params, uparams);
+ if (rc)
+ goto out;
+ }
+
+ rc = ctx->teedev->desc->ops->invoke_func(ctx, &arg, params);
+ if (rc)
+ goto out;
+
+ if (put_user(arg.ret, &uarg->ret) ||
+ put_user(arg.ret_origin, &uarg->ret_origin)) {
+ rc = -EFAULT;
+ goto out;
+ }
+ rc = params_to_user(uparams, arg.num_params, params);
+out:
+ if (params) {
+ /* Decrease ref count for all valid shared memory pointers */
+ for (n = 0; n < arg.num_params; n++)
+ if (param_is_memref(params + n) &&
+ params[n].u.memref.shm)
+ tee_shm_put(params[n].u.memref.shm);
+ kfree(params);
+ }
+ return rc;
+}
+
+static int tee_ioctl_cancel(struct tee_context *ctx,
+ struct tee_ioctl_cancel_arg __user *uarg)
+{
+ struct tee_ioctl_cancel_arg arg;
+
+ if (!ctx->teedev->desc->ops->cancel_req)
+ return -EINVAL;
+
+ if (copy_from_user(&arg, uarg, sizeof(arg)))
+ return -EFAULT;
+
+ return ctx->teedev->desc->ops->cancel_req(ctx, arg.cancel_id,
+ arg.session);
+}
+
+static int
+tee_ioctl_close_session(struct tee_context *ctx,
+ struct tee_ioctl_close_session_arg __user *uarg)
+{
+ struct tee_ioctl_close_session_arg arg;
+
+ if (!ctx->teedev->desc->ops->close_session)
+ return -EINVAL;
+
+ if (copy_from_user(&arg, uarg, sizeof(arg)))
+ return -EFAULT;
+
+ return ctx->teedev->desc->ops->close_session(ctx, arg.session);
+}
+
+static int params_to_supp(struct tee_context *ctx,
+ struct tee_ioctl_param __user *uparams,
+ size_t num_params, struct tee_param *params)
+{
+ size_t n;
+
+ for (n = 0; n < num_params; n++) {
+ struct tee_ioctl_param ip;
+ struct tee_param *p = params + n;
+
+ ip.attr = p->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK;
+ switch (p->attr) {
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
+ ip.a = p->u.value.a;
+ ip.b = p->u.value.b;
+ ip.c = p->u.value.c;
+ break;
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
+ ip.b = p->u.memref.size;
+ if (!p->u.memref.shm) {
+ ip.a = 0;
+ ip.c = (u64)-1; /* invalid shm id */
+ break;
+ }
+ ip.a = p->u.memref.shm_offs;
+ ip.c = p->u.memref.shm->id;
+ break;
+ default:
+ ip.a = 0;
+ ip.b = 0;
+ ip.c = 0;
+ break;
+ }
+
+ if (copy_to_user(uparams + n, &ip, sizeof(ip)))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int tee_ioctl_supp_recv(struct tee_context *ctx,
+ struct tee_ioctl_buf_data __user *ubuf)
+{
+ int rc;
+ struct tee_ioctl_buf_data buf;
+ struct tee_iocl_supp_recv_arg __user *uarg;
+ struct tee_param *params;
+ u32 num_params;
+ u32 func;
+
+ if (!ctx->teedev->desc->ops->supp_recv)
+ return -EINVAL;
+
+ if (copy_from_user(&buf, ubuf, sizeof(buf)))
+ return -EFAULT;
+
+ if (buf.buf_len > TEE_MAX_ARG_SIZE ||
+ buf.buf_len < sizeof(struct tee_iocl_supp_recv_arg))
+ return -EINVAL;
+
+ uarg = u64_to_user_ptr(buf.buf_ptr);
+ if (get_user(num_params, &uarg->num_params))
+ return -EFAULT;
+
+ if (sizeof(*uarg) + TEE_IOCTL_PARAM_SIZE(num_params) != buf.buf_len)
+ return -EINVAL;
+
+ params = kcalloc(num_params, sizeof(struct tee_param), GFP_KERNEL);
+ if (!params)
+ return -ENOMEM;
+
+ rc = ctx->teedev->desc->ops->supp_recv(ctx, &func, &num_params, params);
+ if (rc)
+ goto out;
+
+ if (put_user(func, &uarg->func) ||
+ put_user(num_params, &uarg->num_params)) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ rc = params_to_supp(ctx, uarg->params, num_params, params);
+out:
+ kfree(params);
+ return rc;
+}
+
+static int params_from_supp(struct tee_param *params, size_t num_params,
+ struct tee_ioctl_param __user *uparams)
+{
+ size_t n;
+
+ for (n = 0; n < num_params; n++) {
+ struct tee_param *p = params + n;
+ struct tee_ioctl_param ip;
+
+ if (copy_from_user(&ip, uparams + n, sizeof(ip)))
+ return -EFAULT;
+
+ /* All unused attribute bits has to be zero */
+ if (ip.attr & ~TEE_IOCTL_PARAM_ATTR_TYPE_MASK)
+ return -EINVAL;
+
+ p->attr = ip.attr;
+ switch (ip.attr) {
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
+ /* Only out and in/out values can be updated */
+ p->u.value.a = ip.a;
+ p->u.value.b = ip.b;
+ p->u.value.c = ip.c;
+ break;
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
+ /*
+ * Only the size of the memref can be updated.
+ * Since we don't have access to the original
+ * parameters here, only store the supplied size.
+ * The driver will copy the updated size into the
+ * original parameters.
+ */
+ p->u.memref.shm = NULL;
+ p->u.memref.shm_offs = 0;
+ p->u.memref.size = ip.b;
+ break;
+ default:
+ memset(&p->u, 0, sizeof(p->u));
+ break;
+ }
+ }
+ return 0;
+}
+
+static int tee_ioctl_supp_send(struct tee_context *ctx,
+ struct tee_ioctl_buf_data __user *ubuf)
+{
+ long rc;
+ struct tee_ioctl_buf_data buf;
+ struct tee_iocl_supp_send_arg __user *uarg;
+ struct tee_param *params;
+ u32 num_params;
+ u32 ret;
+
+ /* Not valid for this driver */
+ if (!ctx->teedev->desc->ops->supp_send)
+ return -EINVAL;
+
+ if (copy_from_user(&buf, ubuf, sizeof(buf)))
+ return -EFAULT;
+
+ if (buf.buf_len > TEE_MAX_ARG_SIZE ||
+ buf.buf_len < sizeof(struct tee_iocl_supp_send_arg))
+ return -EINVAL;
+
+ uarg = u64_to_user_ptr(buf.buf_ptr);
+ if (get_user(ret, &uarg->ret) ||
+ get_user(num_params, &uarg->num_params))
+ return -EFAULT;
+
+ if (sizeof(*uarg) + TEE_IOCTL_PARAM_SIZE(num_params) > buf.buf_len)
+ return -EINVAL;
+
+ params = kcalloc(num_params, sizeof(struct tee_param), GFP_KERNEL);
+ if (!params)
+ return -ENOMEM;
+
+ rc = params_from_supp(params, num_params, uarg->params);
+ if (rc)
+ goto out;
+
+ rc = ctx->teedev->desc->ops->supp_send(ctx, ret, num_params, params);
+out:
+ kfree(params);
+ return rc;
+}
+
+static long tee_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct tee_context *ctx = filp->private_data;
+ void __user *uarg = (void __user *)arg;
+
+ switch (cmd) {
+ case TEE_IOC_VERSION:
+ return tee_ioctl_version(ctx, uarg);
+ case TEE_IOC_SHM_ALLOC:
+ return tee_ioctl_shm_alloc(ctx, uarg);
+ case TEE_IOC_OPEN_SESSION:
+ return tee_ioctl_open_session(ctx, uarg);
+ case TEE_IOC_INVOKE:
+ return tee_ioctl_invoke(ctx, uarg);
+ case TEE_IOC_CANCEL:
+ return tee_ioctl_cancel(ctx, uarg);
+ case TEE_IOC_CLOSE_SESSION:
+ return tee_ioctl_close_session(ctx, uarg);
+ case TEE_IOC_SUPPL_RECV:
+ return tee_ioctl_supp_recv(ctx, uarg);
+ case TEE_IOC_SUPPL_SEND:
+ return tee_ioctl_supp_send(ctx, uarg);
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct file_operations tee_fops = {
+ .owner = THIS_MODULE,
+ .open = tee_open,
+ .release = tee_release,
+ .unlocked_ioctl = tee_ioctl,
+ .compat_ioctl = tee_ioctl,
+};
+
+static void tee_release_device(struct device *dev)
+{
+ struct tee_device *teedev = container_of(dev, struct tee_device, dev);
+
+ spin_lock(&driver_lock);
+ clear_bit(teedev->id, dev_mask);
+ spin_unlock(&driver_lock);
+ mutex_destroy(&teedev->mutex);
+ idr_destroy(&teedev->idr);
+ kfree(teedev);
+}
+
+/**
+ * tee_device_alloc() - Allocate a new struct tee_device instance
+ * @teedesc: Descriptor for this driver
+ * @dev: Parent device for this device
+ * @pool: Shared memory pool, NULL if not used
+ * @driver_data: Private driver data for this device
+ *
+ * Allocates a new struct tee_device instance. The device is
+ * removed by tee_device_unregister().
+ *
+ * @returns a pointer to a 'struct tee_device' or an ERR_PTR on failure
+ */
+struct tee_device *tee_device_alloc(const struct tee_desc *teedesc,
+ struct device *dev,
+ struct tee_shm_pool *pool,
+ void *driver_data)
+{
+ struct tee_device *teedev;
+ void *ret;
+ int rc;
+ int offs = 0;
+
+ if (!teedesc || !teedesc->name || !teedesc->ops ||
+ !teedesc->ops->get_version || !teedesc->ops->open ||
+ !teedesc->ops->release || !pool)
+ return ERR_PTR(-EINVAL);
+
+ teedev = kzalloc(sizeof(*teedev), GFP_KERNEL);
+ if (!teedev) {
+ ret = ERR_PTR(-ENOMEM);
+ goto err;
+ }
+
+ if (teedesc->flags & TEE_DESC_PRIVILEGED)
+ offs = TEE_NUM_DEVICES / 2;
+
+ spin_lock(&driver_lock);
+ teedev->id = find_next_zero_bit(dev_mask, TEE_NUM_DEVICES, offs);
+ if (teedev->id < TEE_NUM_DEVICES)
+ set_bit(teedev->id, dev_mask);
+ spin_unlock(&driver_lock);
+
+ if (teedev->id >= TEE_NUM_DEVICES) {
+ ret = ERR_PTR(-ENOMEM);
+ goto err;
+ }
+
+ snprintf(teedev->name, sizeof(teedev->name), "tee%s%d",
+ teedesc->flags & TEE_DESC_PRIVILEGED ? "priv" : "",
+ teedev->id - offs);
+
+ teedev->dev.class = tee_class;
+ teedev->dev.release = tee_release_device;
+ teedev->dev.parent = dev;
+
+ teedev->dev.devt = MKDEV(MAJOR(tee_devt), teedev->id);
+
+ rc = dev_set_name(&teedev->dev, "%s", teedev->name);
+ if (rc) {
+ ret = ERR_PTR(rc);
+ goto err_devt;
+ }
+
+ cdev_init(&teedev->cdev, &tee_fops);
+ teedev->cdev.owner = teedesc->owner;
+ teedev->cdev.kobj.parent = &teedev->dev.kobj;
+
+ dev_set_drvdata(&teedev->dev, driver_data);
+ device_initialize(&teedev->dev);
+
+ /* 1 as tee_device_unregister() does one final tee_device_put() */
+ teedev->num_users = 1;
+ init_completion(&teedev->c_no_users);
+ mutex_init(&teedev->mutex);
+ idr_init(&teedev->idr);
+
+ teedev->desc = teedesc;
+ teedev->pool = pool;
+
+ return teedev;
+err_devt:
+ unregister_chrdev_region(teedev->dev.devt, 1);
+err:
+ pr_err("could not register %s driver\n",
+ teedesc->flags & TEE_DESC_PRIVILEGED ? "privileged" : "client");
+ if (teedev && teedev->id < TEE_NUM_DEVICES) {
+ spin_lock(&driver_lock);
+ clear_bit(teedev->id, dev_mask);
+ spin_unlock(&driver_lock);
+ }
+ kfree(teedev);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tee_device_alloc);
+
+static ssize_t implementation_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tee_device *teedev = container_of(dev, struct tee_device, dev);
+ struct tee_ioctl_version_data vers;
+
+ teedev->desc->ops->get_version(teedev, &vers);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", vers.impl_id);
+}
+static DEVICE_ATTR_RO(implementation_id);
+
+static struct attribute *tee_dev_attrs[] = {
+ &dev_attr_implementation_id.attr,
+ NULL
+};
+
+static const struct attribute_group tee_dev_group = {
+ .attrs = tee_dev_attrs,
+};
+
+/**
+ * tee_device_register() - Registers a TEE device
+ * @teedev: Device to register
+ *
+ * tee_device_unregister() need to be called to remove the @teedev if
+ * this function fails.
+ *
+ * @returns < 0 on failure
+ */
+int tee_device_register(struct tee_device *teedev)
+{
+ int rc;
+
+ if (teedev->flags & TEE_DEVICE_FLAG_REGISTERED) {
+ dev_err(&teedev->dev, "attempt to register twice\n");
+ return -EINVAL;
+ }
+
+ rc = cdev_add(&teedev->cdev, teedev->dev.devt, 1);
+ if (rc) {
+ dev_err(&teedev->dev,
+ "unable to cdev_add() %s, major %d, minor %d, err=%d\n",
+ teedev->name, MAJOR(teedev->dev.devt),
+ MINOR(teedev->dev.devt), rc);
+ return rc;
+ }
+
+ rc = device_add(&teedev->dev);
+ if (rc) {
+ dev_err(&teedev->dev,
+ "unable to device_add() %s, major %d, minor %d, err=%d\n",
+ teedev->name, MAJOR(teedev->dev.devt),
+ MINOR(teedev->dev.devt), rc);
+ goto err_device_add;
+ }
+
+ rc = sysfs_create_group(&teedev->dev.kobj, &tee_dev_group);
+ if (rc) {
+ dev_err(&teedev->dev,
+ "failed to create sysfs attributes, err=%d\n", rc);
+ goto err_sysfs_create_group;
+ }
+
+ teedev->flags |= TEE_DEVICE_FLAG_REGISTERED;
+ return 0;
+
+err_sysfs_create_group:
+ device_del(&teedev->dev);
+err_device_add:
+ cdev_del(&teedev->cdev);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tee_device_register);
+
+void tee_device_put(struct tee_device *teedev)
+{
+ mutex_lock(&teedev->mutex);
+ /* Shouldn't put in this state */
+ if (!WARN_ON(!teedev->desc)) {
+ teedev->num_users--;
+ if (!teedev->num_users) {
+ teedev->desc = NULL;
+ complete(&teedev->c_no_users);
+ }
+ }
+ mutex_unlock(&teedev->mutex);
+}
+
+bool tee_device_get(struct tee_device *teedev)
+{
+ mutex_lock(&teedev->mutex);
+ if (!teedev->desc) {
+ mutex_unlock(&teedev->mutex);
+ return false;
+ }
+ teedev->num_users++;
+ mutex_unlock(&teedev->mutex);
+ return true;
+}
+
+/**
+ * tee_device_unregister() - Removes a TEE device
+ * @teedev: Device to unregister
+ *
+ * This function should be called to remove the @teedev even if
+ * tee_device_register() hasn't been called yet. Does nothing if
+ * @teedev is NULL.
+ */
+void tee_device_unregister(struct tee_device *teedev)
+{
+ if (!teedev)
+ return;
+
+ if (teedev->flags & TEE_DEVICE_FLAG_REGISTERED) {
+ sysfs_remove_group(&teedev->dev.kobj, &tee_dev_group);
+ cdev_del(&teedev->cdev);
+ device_del(&teedev->dev);
+ }
+
+ tee_device_put(teedev);
+ wait_for_completion(&teedev->c_no_users);
+
+ /*
+ * No need to take a mutex any longer now since teedev->desc was
+ * set to NULL before teedev->c_no_users was completed.
+ */
+
+ teedev->pool = NULL;
+
+ put_device(&teedev->dev);
+}
+EXPORT_SYMBOL_GPL(tee_device_unregister);
+
+/**
+ * tee_get_drvdata() - Return driver_data pointer
+ * @teedev: Device containing the driver_data pointer
+ * @returns the driver_data pointer supplied to tee_register().
+ */
+void *tee_get_drvdata(struct tee_device *teedev)
+{
+ return dev_get_drvdata(&teedev->dev);
+}
+EXPORT_SYMBOL_GPL(tee_get_drvdata);
+
+static int __init tee_init(void)
+{
+ int rc;
+
+ tee_class = class_create(THIS_MODULE, "tee");
+ if (IS_ERR(tee_class)) {
+ pr_err("couldn't create class\n");
+ return PTR_ERR(tee_class);
+ }
+
+ rc = alloc_chrdev_region(&tee_devt, 0, TEE_NUM_DEVICES, "tee");
+ if (rc) {
+ pr_err("failed to allocate char dev region\n");
+ class_destroy(tee_class);
+ tee_class = NULL;
+ }
+
+ return rc;
+}
+
+static void __exit tee_exit(void)
+{
+ class_destroy(tee_class);
+ tee_class = NULL;
+ unregister_chrdev_region(tee_devt, TEE_NUM_DEVICES);
+}
+
+subsys_initcall(tee_init);
+module_exit(tee_exit);
+
+MODULE_AUTHOR("Linaro");
+MODULE_DESCRIPTION("TEE Driver");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/tee/tee_private.h b/drivers/tee/tee_private.h
new file mode 100644
index 000000000000..21cb6be8bce9
--- /dev/null
+++ b/drivers/tee/tee_private.h
@@ -0,0 +1,129 @@
+/*
+ * Copyright (c) 2015-2016, Linaro Limited
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef TEE_PRIVATE_H
+#define TEE_PRIVATE_H
+
+#include <linux/cdev.h>
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/kref.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+struct tee_device;
+
+/**
+ * struct tee_shm - shared memory object
+ * @teedev: device used to allocate the object
+ * @ctx: context using the object, if NULL the context is gone
+ * @link link element
+ * @paddr: physical address of the shared memory
+ * @kaddr: virtual address of the shared memory
+ * @size: size of shared memory
+ * @dmabuf: dmabuf used to for exporting to user space
+ * @flags: defined by TEE_SHM_* in tee_drv.h
+ * @id: unique id of a shared memory object on this device
+ */
+struct tee_shm {
+ struct tee_device *teedev;
+ struct tee_context *ctx;
+ struct list_head link;
+ phys_addr_t paddr;
+ void *kaddr;
+ size_t size;
+ struct dma_buf *dmabuf;
+ u32 flags;
+ int id;
+};
+
+struct tee_shm_pool_mgr;
+
+/**
+ * struct tee_shm_pool_mgr_ops - shared memory pool manager operations
+ * @alloc: called when allocating shared memory
+ * @free: called when freeing shared memory
+ */
+struct tee_shm_pool_mgr_ops {
+ int (*alloc)(struct tee_shm_pool_mgr *poolmgr, struct tee_shm *shm,
+ size_t size);
+ void (*free)(struct tee_shm_pool_mgr *poolmgr, struct tee_shm *shm);
+};
+
+/**
+ * struct tee_shm_pool_mgr - shared memory manager
+ * @ops: operations
+ * @private_data: private data for the shared memory manager
+ */
+struct tee_shm_pool_mgr {
+ const struct tee_shm_pool_mgr_ops *ops;
+ void *private_data;
+};
+
+/**
+ * struct tee_shm_pool - shared memory pool
+ * @private_mgr: pool manager for shared memory only between kernel
+ * and secure world
+ * @dma_buf_mgr: pool manager for shared memory exported to user space
+ * @destroy: called when destroying the pool
+ * @private_data: private data for the pool
+ */
+struct tee_shm_pool {
+ struct tee_shm_pool_mgr private_mgr;
+ struct tee_shm_pool_mgr dma_buf_mgr;
+ void (*destroy)(struct tee_shm_pool *pool);
+ void *private_data;
+};
+
+#define TEE_DEVICE_FLAG_REGISTERED 0x1
+#define TEE_MAX_DEV_NAME_LEN 32
+
+/**
+ * struct tee_device - TEE Device representation
+ * @name: name of device
+ * @desc: description of device
+ * @id: unique id of device
+ * @flags: represented by TEE_DEVICE_FLAG_REGISTERED above
+ * @dev: embedded basic device structure
+ * @cdev: embedded cdev
+ * @num_users: number of active users of this device
+ * @c_no_user: completion used when unregistering the device
+ * @mutex: mutex protecting @num_users and @idr
+ * @idr: register of shared memory object allocated on this device
+ * @pool: shared memory pool
+ */
+struct tee_device {
+ char name[TEE_MAX_DEV_NAME_LEN];
+ const struct tee_desc *desc;
+ int id;
+ unsigned int flags;
+
+ struct device dev;
+ struct cdev cdev;
+
+ size_t num_users;
+ struct completion c_no_users;
+ struct mutex mutex; /* protects num_users and idr */
+
+ struct idr idr;
+ struct tee_shm_pool *pool;
+};
+
+int tee_shm_init(void);
+
+int tee_shm_get_fd(struct tee_shm *shm);
+
+bool tee_device_get(struct tee_device *teedev);
+void tee_device_put(struct tee_device *teedev);
+
+#endif /*TEE_PRIVATE_H*/
diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c
new file mode 100644
index 000000000000..0be1e3e93bee
--- /dev/null
+++ b/drivers/tee/tee_shm.c
@@ -0,0 +1,358 @@
+/*
+ * Copyright (c) 2015-2016, Linaro Limited
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/device.h>
+#include <linux/dma-buf.h>
+#include <linux/fdtable.h>
+#include <linux/idr.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/tee_drv.h>
+#include "tee_private.h"
+
+static void tee_shm_release(struct tee_shm *shm)
+{
+ struct tee_device *teedev = shm->teedev;
+ struct tee_shm_pool_mgr *poolm;
+
+ mutex_lock(&teedev->mutex);
+ idr_remove(&teedev->idr, shm->id);
+ if (shm->ctx)
+ list_del(&shm->link);
+ mutex_unlock(&teedev->mutex);
+
+ if (shm->flags & TEE_SHM_DMA_BUF)
+ poolm = &teedev->pool->dma_buf_mgr;
+ else
+ poolm = &teedev->pool->private_mgr;
+
+ poolm->ops->free(poolm, shm);
+ kfree(shm);
+
+ tee_device_put(teedev);
+}
+
+static struct sg_table *tee_shm_op_map_dma_buf(struct dma_buf_attachment
+ *attach, enum dma_data_direction dir)
+{
+ return NULL;
+}
+
+static void tee_shm_op_unmap_dma_buf(struct dma_buf_attachment *attach,
+ struct sg_table *table,
+ enum dma_data_direction dir)
+{
+}
+
+static void tee_shm_op_release(struct dma_buf *dmabuf)
+{
+ struct tee_shm *shm = dmabuf->priv;
+
+ tee_shm_release(shm);
+}
+
+static void *tee_shm_op_kmap_atomic(struct dma_buf *dmabuf, unsigned long pgnum)
+{
+ return NULL;
+}
+
+static void *tee_shm_op_kmap(struct dma_buf *dmabuf, unsigned long pgnum)
+{
+ return NULL;
+}
+
+static int tee_shm_op_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
+{
+ struct tee_shm *shm = dmabuf->priv;
+ size_t size = vma->vm_end - vma->vm_start;
+
+ return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT,
+ size, vma->vm_page_prot);
+}
+
+static struct dma_buf_ops tee_shm_dma_buf_ops = {
+ .map_dma_buf = tee_shm_op_map_dma_buf,
+ .unmap_dma_buf = tee_shm_op_unmap_dma_buf,
+ .release = tee_shm_op_release,
+ .kmap_atomic = tee_shm_op_kmap_atomic,
+ .kmap = tee_shm_op_kmap,
+ .mmap = tee_shm_op_mmap,
+};
+
+/**
+ * tee_shm_alloc() - Allocate shared memory
+ * @ctx: Context that allocates the shared memory
+ * @size: Requested size of shared memory
+ * @flags: Flags setting properties for the requested shared memory.
+ *
+ * Memory allocated as global shared memory is automatically freed when the
+ * TEE file pointer is closed. The @flags field uses the bits defined by
+ * TEE_SHM_* in <linux/tee_drv.h>. TEE_SHM_MAPPED must currently always be
+ * set. If TEE_SHM_DMA_BUF global shared memory will be allocated and
+ * associated with a dma-buf handle, else driver private memory.
+ */
+struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
+{
+ struct tee_device *teedev = ctx->teedev;
+ struct tee_shm_pool_mgr *poolm = NULL;
+ struct tee_shm *shm;
+ void *ret;
+ int rc;
+
+ if (!(flags & TEE_SHM_MAPPED)) {
+ dev_err(teedev->dev.parent,
+ "only mapped allocations supported\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if ((flags & ~(TEE_SHM_MAPPED | TEE_SHM_DMA_BUF))) {
+ dev_err(teedev->dev.parent, "invalid shm flags 0x%x", flags);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!tee_device_get(teedev))
+ return ERR_PTR(-EINVAL);
+
+ if (!teedev->pool) {
+ /* teedev has been detached from driver */
+ ret = ERR_PTR(-EINVAL);
+ goto err_dev_put;
+ }
+
+ shm = kzalloc(sizeof(*shm), GFP_KERNEL);
+ if (!shm) {
+ ret = ERR_PTR(-ENOMEM);
+ goto err_dev_put;
+ }
+
+ shm->flags = flags;
+ shm->teedev = teedev;
+ shm->ctx = ctx;
+ if (flags & TEE_SHM_DMA_BUF)
+ poolm = &teedev->pool->dma_buf_mgr;
+ else
+ poolm = &teedev->pool->private_mgr;
+
+ rc = poolm->ops->alloc(poolm, shm, size);
+ if (rc) {
+ ret = ERR_PTR(rc);
+ goto err_kfree;
+ }
+
+ mutex_lock(&teedev->mutex);
+ shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL);
+ mutex_unlock(&teedev->mutex);
+ if (shm->id < 0) {
+ ret = ERR_PTR(shm->id);
+ goto err_pool_free;
+ }
+
+ if (flags & TEE_SHM_DMA_BUF) {
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+
+ exp_info.ops = &tee_shm_dma_buf_ops;
+ exp_info.size = shm->size;
+ exp_info.flags = O_RDWR;
+ exp_info.priv = shm;
+
+ shm->dmabuf = dma_buf_export(&exp_info);
+ if (IS_ERR(shm->dmabuf)) {
+ ret = ERR_CAST(shm->dmabuf);
+ goto err_rem;
+ }
+ }
+ mutex_lock(&teedev->mutex);
+ list_add_tail(&shm->link, &ctx->list_shm);
+ mutex_unlock(&teedev->mutex);
+
+ return shm;
+err_rem:
+ mutex_lock(&teedev->mutex);
+ idr_remove(&teedev->idr, shm->id);
+ mutex_unlock(&teedev->mutex);
+err_pool_free:
+ poolm->ops->free(poolm, shm);
+err_kfree:
+ kfree(shm);
+err_dev_put:
+ tee_device_put(teedev);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tee_shm_alloc);
+
+/**
+ * tee_shm_get_fd() - Increase reference count and return file descriptor
+ * @shm: Shared memory handle
+ * @returns user space file descriptor to shared memory
+ */
+int tee_shm_get_fd(struct tee_shm *shm)
+{
+ u32 req_flags = TEE_SHM_MAPPED | TEE_SHM_DMA_BUF;
+ int fd;
+
+ if ((shm->flags & req_flags) != req_flags)
+ return -EINVAL;
+
+ fd = dma_buf_fd(shm->dmabuf, O_CLOEXEC);
+ if (fd >= 0)
+ get_dma_buf(shm->dmabuf);
+ return fd;
+}
+
+/**
+ * tee_shm_free() - Free shared memory
+ * @shm: Handle to shared memory to free
+ */
+void tee_shm_free(struct tee_shm *shm)
+{
+ /*
+ * dma_buf_put() decreases the dmabuf reference counter and will
+ * call tee_shm_release() when the last reference is gone.
+ *
+ * In the case of driver private memory we call tee_shm_release
+ * directly instead as it doesn't have a reference counter.
+ */
+ if (shm->flags & TEE_SHM_DMA_BUF)
+ dma_buf_put(shm->dmabuf);
+ else
+ tee_shm_release(shm);
+}
+EXPORT_SYMBOL_GPL(tee_shm_free);
+
+/**
+ * tee_shm_va2pa() - Get physical address of a virtual address
+ * @shm: Shared memory handle
+ * @va: Virtual address to tranlsate
+ * @pa: Returned physical address
+ * @returns 0 on success and < 0 on failure
+ */
+int tee_shm_va2pa(struct tee_shm *shm, void *va, phys_addr_t *pa)
+{
+ /* Check that we're in the range of the shm */
+ if ((char *)va < (char *)shm->kaddr)
+ return -EINVAL;
+ if ((char *)va >= ((char *)shm->kaddr + shm->size))
+ return -EINVAL;
+
+ return tee_shm_get_pa(
+ shm, (unsigned long)va - (unsigned long)shm->kaddr, pa);
+}
+EXPORT_SYMBOL_GPL(tee_shm_va2pa);
+
+/**
+ * tee_shm_pa2va() - Get virtual address of a physical address
+ * @shm: Shared memory handle
+ * @pa: Physical address to tranlsate
+ * @va: Returned virtual address
+ * @returns 0 on success and < 0 on failure
+ */
+int tee_shm_pa2va(struct tee_shm *shm, phys_addr_t pa, void **va)
+{
+ /* Check that we're in the range of the shm */
+ if (pa < shm->paddr)
+ return -EINVAL;
+ if (pa >= (shm->paddr + shm->size))
+ return -EINVAL;
+
+ if (va) {
+ void *v = tee_shm_get_va(shm, pa - shm->paddr);
+
+ if (IS_ERR(v))
+ return PTR_ERR(v);
+ *va = v;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tee_shm_pa2va);
+
+/**
+ * tee_shm_get_va() - Get virtual address of a shared memory plus an offset
+ * @shm: Shared memory handle
+ * @offs: Offset from start of this shared memory
+ * @returns virtual address of the shared memory + offs if offs is within
+ * the bounds of this shared memory, else an ERR_PTR
+ */
+void *tee_shm_get_va(struct tee_shm *shm, size_t offs)
+{
+ if (offs >= shm->size)
+ return ERR_PTR(-EINVAL);
+ return (char *)shm->kaddr + offs;
+}
+EXPORT_SYMBOL_GPL(tee_shm_get_va);
+
+/**
+ * tee_shm_get_pa() - Get physical address of a shared memory plus an offset
+ * @shm: Shared memory handle
+ * @offs: Offset from start of this shared memory
+ * @pa: Physical address to return
+ * @returns 0 if offs is within the bounds of this shared memory, else an
+ * error code.
+ */
+int tee_shm_get_pa(struct tee_shm *shm, size_t offs, phys_addr_t *pa)
+{
+ if (offs >= shm->size)
+ return -EINVAL;
+ if (pa)
+ *pa = shm->paddr + offs;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tee_shm_get_pa);
+
+/**
+ * tee_shm_get_from_id() - Find shared memory object and increase reference
+ * count
+ * @ctx: Context owning the shared memory
+ * @id: Id of shared memory object
+ * @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure
+ */
+struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id)
+{
+ struct tee_device *teedev;
+ struct tee_shm *shm;
+
+ if (!ctx)
+ return ERR_PTR(-EINVAL);
+
+ teedev = ctx->teedev;
+ mutex_lock(&teedev->mutex);
+ shm = idr_find(&teedev->idr, id);
+ if (!shm || shm->ctx != ctx)
+ shm = ERR_PTR(-EINVAL);
+ else if (shm->flags & TEE_SHM_DMA_BUF)
+ get_dma_buf(shm->dmabuf);
+ mutex_unlock(&teedev->mutex);
+ return shm;
+}
+EXPORT_SYMBOL_GPL(tee_shm_get_from_id);
+
+/**
+ * tee_shm_get_id() - Get id of a shared memory object
+ * @shm: Shared memory handle
+ * @returns id
+ */
+int tee_shm_get_id(struct tee_shm *shm)
+{
+ return shm->id;
+}
+EXPORT_SYMBOL_GPL(tee_shm_get_id);
+
+/**
+ * tee_shm_put() - Decrease reference count on a shared memory handle
+ * @shm: Shared memory handle
+ */
+void tee_shm_put(struct tee_shm *shm)
+{
+ if (shm->flags & TEE_SHM_DMA_BUF)
+ dma_buf_put(shm->dmabuf);
+}
+EXPORT_SYMBOL_GPL(tee_shm_put);
diff --git a/drivers/tee/tee_shm_pool.c b/drivers/tee/tee_shm_pool.c
new file mode 100644
index 000000000000..fb4f8522a526
--- /dev/null
+++ b/drivers/tee/tee_shm_pool.c
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/device.h>
+#include <linux/dma-buf.h>
+#include <linux/genalloc.h>
+#include <linux/slab.h>
+#include <linux/tee_drv.h>
+#include "tee_private.h"
+
+static int pool_op_gen_alloc(struct tee_shm_pool_mgr *poolm,
+ struct tee_shm *shm, size_t size)
+{
+ unsigned long va;
+ struct gen_pool *genpool = poolm->private_data;
+ size_t s = roundup(size, 1 << genpool->min_alloc_order);
+
+ va = gen_pool_alloc(genpool, s);
+ if (!va)
+ return -ENOMEM;
+
+ memset((void *)va, 0, s);
+ shm->kaddr = (void *)va;
+ shm->paddr = gen_pool_virt_to_phys(genpool, va);
+ shm->size = s;
+ return 0;
+}
+
+static void pool_op_gen_free(struct tee_shm_pool_mgr *poolm,
+ struct tee_shm *shm)
+{
+ gen_pool_free(poolm->private_data, (unsigned long)shm->kaddr,
+ shm->size);
+ shm->kaddr = NULL;
+}
+
+static const struct tee_shm_pool_mgr_ops pool_ops_generic = {
+ .alloc = pool_op_gen_alloc,
+ .free = pool_op_gen_free,
+};
+
+static void pool_res_mem_destroy(struct tee_shm_pool *pool)
+{
+ gen_pool_destroy(pool->private_mgr.private_data);
+ gen_pool_destroy(pool->dma_buf_mgr.private_data);
+}
+
+static int pool_res_mem_mgr_init(struct tee_shm_pool_mgr *mgr,
+ struct tee_shm_pool_mem_info *info,
+ int min_alloc_order)
+{
+ size_t page_mask = PAGE_SIZE - 1;
+ struct gen_pool *genpool = NULL;
+ int rc;
+
+ /*
+ * Start and end must be page aligned
+ */
+ if ((info->vaddr & page_mask) || (info->paddr & page_mask) ||
+ (info->size & page_mask))
+ return -EINVAL;
+
+ genpool = gen_pool_create(min_alloc_order, -1);
+ if (!genpool)
+ return -ENOMEM;
+
+ gen_pool_set_algo(genpool, gen_pool_best_fit, NULL);
+ rc = gen_pool_add_virt(genpool, info->vaddr, info->paddr, info->size,
+ -1);
+ if (rc) {
+ gen_pool_destroy(genpool);
+ return rc;
+ }
+
+ mgr->private_data = genpool;
+ mgr->ops = &pool_ops_generic;
+ return 0;
+}
+
+/**
+ * tee_shm_pool_alloc_res_mem() - Create a shared memory pool from reserved
+ * memory range
+ * @priv_info: Information for driver private shared memory pool
+ * @dmabuf_info: Information for dma-buf shared memory pool
+ *
+ * Start and end of pools will must be page aligned.
+ *
+ * Allocation with the flag TEE_SHM_DMA_BUF set will use the range supplied
+ * in @dmabuf, others will use the range provided by @priv.
+ *
+ * @returns pointer to a 'struct tee_shm_pool' or an ERR_PTR on failure.
+ */
+struct tee_shm_pool *
+tee_shm_pool_alloc_res_mem(struct tee_shm_pool_mem_info *priv_info,
+ struct tee_shm_pool_mem_info *dmabuf_info)
+{
+ struct tee_shm_pool *pool = NULL;
+ int ret;
+
+ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ /*
+ * Create the pool for driver private shared memory
+ */
+ ret = pool_res_mem_mgr_init(&pool->private_mgr, priv_info,
+ 3 /* 8 byte aligned */);
+ if (ret)
+ goto err;
+
+ /*
+ * Create the pool for dma_buf shared memory
+ */
+ ret = pool_res_mem_mgr_init(&pool->dma_buf_mgr, dmabuf_info,
+ PAGE_SHIFT);
+ if (ret)
+ goto err;
+
+ pool->destroy = pool_res_mem_destroy;
+ return pool;
+err:
+ if (ret == -ENOMEM)
+ pr_err("%s: can't allocate memory for res_mem shared memory pool\n", __func__);
+ if (pool && pool->private_mgr.private_data)
+ gen_pool_destroy(pool->private_mgr.private_data);
+ kfree(pool);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(tee_shm_pool_alloc_res_mem);
+
+/**
+ * tee_shm_pool_free() - Free a shared memory pool
+ * @pool: The shared memory pool to free
+ *
+ * There must be no remaining shared memory allocated from this pool when
+ * this function is called.
+ */
+void tee_shm_pool_free(struct tee_shm_pool *pool)
+{
+ pool->destroy(pool);
+ kfree(pool);
+}
+EXPORT_SYMBOL_GPL(tee_shm_pool_free);
diff --git a/drivers/thermal/msm_lmh_dcvs.c b/drivers/thermal/msm_lmh_dcvs.c
index efe80c24d270..215c37526081 100644
--- a/drivers/thermal/msm_lmh_dcvs.c
+++ b/drivers/thermal/msm_lmh_dcvs.c
@@ -82,6 +82,7 @@ struct msm_lmh_dcvs_hw {
uint32_t affinity;
uint32_t temp_limits[LIMITS_TRIP_MAX];
struct sensor_threshold default_lo, default_hi;
+ struct thermal_cooling_device *cdev;
int irq_num;
void *osm_hw_reg;
void *int_clr_reg;
@@ -377,13 +378,38 @@ int msm_lmh_dcvsh_sw_notify(int cpu)
return 0;
}
+static int __ref lmh_dcvs_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ uint32_t cpu = (uintptr_t)hcpu;
+ struct msm_lmh_dcvs_hw *hw = get_dcvsh_hw_from_cpu(cpu);
+
+ if (!hw || hw->cdev)
+ return NOTIFY_OK;
+
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_ONLINE:
+ hw->cdev = cpufreq_platform_cooling_register(&hw->core_map,
+ &cd_ops);
+ if (IS_ERR_OR_NULL(hw->cdev))
+ hw->cdev = NULL;
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block __refdata lmh_dcvs_cpu_notifier = {
+ .notifier_call = lmh_dcvs_cpu_callback,
+};
+
static int msm_lmh_dcvs_probe(struct platform_device *pdev)
{
int ret;
int affinity = -1;
struct msm_lmh_dcvs_hw *hw;
struct thermal_zone_device *tzdev;
- struct thermal_cooling_device *cdev;
struct device_node *dn = pdev->dev.of_node;
struct device_node *cpu_node, *lmh_node;
uint32_t id, max_freq, request_reg, clear_reg;
@@ -458,10 +484,6 @@ static int msm_lmh_dcvs_probe(struct platform_device *pdev)
if (IS_ERR_OR_NULL(tzdev))
return PTR_ERR(tzdev);
- /* Setup cooling devices to request mitigation states */
- cdev = cpufreq_platform_cooling_register(&hw->core_map, &cd_ops);
- if (IS_ERR_OR_NULL(cdev))
- return PTR_ERR(cdev);
/*
* Driver defaults to for low and hi thresholds.
* Since we make a check for hi > lo value, set the hi threshold
@@ -531,9 +553,16 @@ static int msm_lmh_dcvs_probe(struct platform_device *pdev)
return ret;
}
+ if (list_empty(&lmh_dcvs_hw_list))
+ register_cpu_notifier(&lmh_dcvs_cpu_notifier);
+
INIT_LIST_HEAD(&hw->list);
list_add(&hw->list, &lmh_dcvs_hw_list);
+ /* Better register explicitly for 1st CPU of each HW */
+ lmh_dcvs_cpu_callback(&lmh_dcvs_cpu_notifier, CPU_ONLINE,
+ (void *)(long)cpumask_first(&hw->core_map));
+
return ret;
}
diff --git a/drivers/tty/serial/8250/8250_fintek.c b/drivers/tty/serial/8250/8250_fintek.c
index 89474399ab89..1d5a9e5fb069 100644
--- a/drivers/tty/serial/8250/8250_fintek.c
+++ b/drivers/tty/serial/8250/8250_fintek.c
@@ -117,7 +117,7 @@ static int fintek_8250_rs485_config(struct uart_port *port,
if ((!!(rs485->flags & SER_RS485_RTS_ON_SEND)) ==
(!!(rs485->flags & SER_RS485_RTS_AFTER_SEND)))
- rs485->flags &= SER_RS485_ENABLED;
+ rs485->flags &= ~SER_RS485_ENABLED;
else
config |= RS485_URA;
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index cf3da51a3536..7025f47fa284 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -5797,6 +5797,9 @@ static struct pci_device_id serial_pci_tbl[] = {
{ PCI_DEVICE(0x1601, 0x0800), .driver_data = pbn_b0_4_1250000 },
{ PCI_DEVICE(0x1601, 0xa801), .driver_data = pbn_b0_4_1250000 },
+ /* Amazon PCI serial device */
+ { PCI_DEVICE(0x1d0f, 0x8250), .driver_data = pbn_b0_1_115200 },
+
/*
* These entries match devices with class COMMUNICATION_SERIAL,
* COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 56ccbcefdd85..d42d66b72d5a 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -2223,8 +2223,11 @@ static void serial8250_set_divisor(struct uart_port *port, unsigned int baud,
serial_dl_write(up, quot);
/* XR17V35x UARTs have an extra fractional divisor register (DLD) */
- if (up->port.type == PORT_XR17V35X)
+ if (up->port.type == PORT_XR17V35X) {
+ /* Preserve bits not related to baudrate; DLD[7:4]. */
+ quot_frac |= serial_port_in(port, 0x2) & 0xf0;
serial_port_out(port, 0x2, quot_frac);
+ }
}
static unsigned int
diff --git a/drivers/tty/serial/msm_serial_hs.c b/drivers/tty/serial/msm_serial_hs.c
index c2c9b9361d64..5da2f1406546 100644
--- a/drivers/tty/serial/msm_serial_hs.c
+++ b/drivers/tty/serial/msm_serial_hs.c
@@ -258,6 +258,7 @@ struct msm_hs_port {
atomic_t client_count;
bool obs; /* out of band sleep flag */
atomic_t client_req_state;
+ int sys_suspend_noirq_cnt;
void *ipc_msm_hs_log_ctxt;
void *ipc_msm_hs_pwr_ctxt;
int ipc_debug_mask;
@@ -395,8 +396,6 @@ static void msm_hs_resource_unvote(struct msm_hs_port *msm_uport)
{
struct uart_port *uport = &(msm_uport->uport);
int rc = atomic_read(&msm_uport->resource_count);
- struct msm_hs_tx *tx = &msm_uport->tx;
- struct msm_hs_rx *rx = &msm_uport->rx;
MSM_HS_DBG("%s(): power usage count %d", __func__, rc);
if (rc <= 0) {
@@ -405,15 +404,8 @@ static void msm_hs_resource_unvote(struct msm_hs_port *msm_uport)
return;
}
atomic_dec(&msm_uport->resource_count);
-
- if (pm_runtime_enabled(uport->dev)) {
- pm_runtime_mark_last_busy(uport->dev);
- pm_runtime_put_autosuspend(uport->dev);
- } else {
- MSM_HS_DBG("%s():tx.flush:%d,in_flight:%d,rx.flush:%d\n",
- __func__, tx->flush, tx->dma_in_flight, rx->flush);
- msm_hs_pm_suspend(uport->dev);
- }
+ pm_runtime_mark_last_busy(uport->dev);
+ pm_runtime_put_autosuspend(uport->dev);
}
/* Vote for resources before accessing them */
@@ -1390,8 +1382,9 @@ static void msm_hs_disconnect_rx(struct uart_port *uport)
if (msm_uport->rx.flush == FLUSH_NONE)
msm_uport->rx.flush = FLUSH_STOP;
- if (sps_is_pipe_empty(sps_pipe_handle, &prod_empty)) {
- MSM_HS_WARN("%s():Pipe Not Empty, ret=%d, flush=%d\n",
+ if (!sps_is_pipe_empty(sps_pipe_handle, &prod_empty)) {
+ if (prod_empty == false)
+ MSM_HS_WARN("%s():Pipe Not Empty, prod=%d, flush=%d\n",
__func__, prod_empty, msm_uport->rx.flush);
}
disconnect_rx_endpoint(msm_uport);
@@ -3295,6 +3288,7 @@ static int msm_hs_pm_resume(struct device *dev)
LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
"%s:PM State:Active client_count %d\n", __func__, client_count);
exit_pm_resume:
+ msm_uport->sys_suspend_noirq_cnt = 0;
mutex_unlock(&msm_uport->mtx);
return ret;
}
@@ -3304,14 +3298,11 @@ static int msm_hs_pm_sys_suspend_noirq(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
- enum msm_hs_pm_state prev_pwr_state;
int clk_cnt, client_count, ret = 0;
if (IS_ERR_OR_NULL(msm_uport))
return -ENODEV;
-
mutex_lock(&msm_uport->mtx);
-
/*
* If there is an active clk request or an impending userspace request
* fail the suspend callback.
@@ -3319,19 +3310,40 @@ static int msm_hs_pm_sys_suspend_noirq(struct device *dev)
clk_cnt = atomic_read(&msm_uport->resource_count);
client_count = atomic_read(&msm_uport->client_count);
if (msm_uport->pm_state == MSM_HS_PM_ACTIVE) {
- MSM_HS_WARN("%s:Fail Suspend.clk_cnt:%d,clnt_count:%d\n",
- __func__, clk_cnt, client_count);
- ret = -EBUSY;
- goto exit_suspend_noirq;
+ if (clk_cnt == 0 && client_count == 0)
+ msm_uport->sys_suspend_noirq_cnt++;
+ /*Serve force suspend post autosuspend timer expires
+ */
+ if (msm_uport->sys_suspend_noirq_cnt >= 2) {
+ msm_uport->pm_state = MSM_HS_PM_SYS_SUSPENDED;
+ msm_uport->sys_suspend_noirq_cnt = 0;
+ mutex_unlock(&msm_uport->mtx);
+
+ msm_hs_pm_suspend(dev);
+ /*
+ * Synchronize RT-pm and system-pm, RT-PM thinks that
+ * we are active. The three calls below let the RT-PM
+ * know that we are suspended already without calling
+ * suspend callback
+ */
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_enable(dev);
+
+ /*To Balance out exit time Mutex unlock */
+ mutex_lock(&msm_uport->mtx);
+ } else {
+ ret = -EBUSY;
+ }
}
-
- prev_pwr_state = msm_uport->pm_state;
- msm_uport->pm_state = MSM_HS_PM_SYS_SUSPENDED;
- LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
- "%s:PM State:Sys-Suspended client_count %d\n", __func__,
- client_count);
-exit_suspend_noirq:
mutex_unlock(&msm_uport->mtx);
+ if (ret)
+ MSM_HS_WARN("%s:Fail Suspend.clk_cnt:%d,clnt_count:%d\n",
+ __func__, clk_cnt, client_count);
+ else
+ LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
+ "%s:PM State:Sys-Suspended client_count %d\n",
+ __func__, client_count);
return ret;
};
@@ -3817,11 +3829,10 @@ static void msm_hs_shutdown(struct uart_port *uport)
if (atomic_read(&msm_uport->client_count)) {
MSM_HS_WARN("%s: Client vote on, forcing to 0\n", __func__);
atomic_set(&msm_uport->client_count, 0);
- LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
- "%s: Client_Count 0\n", __func__);
}
msm_hs_unconfig_uart_gpios(uport);
- MSM_HS_INFO("%s:UART port closed successfully\n", __func__);
+ LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
+ "%s:UART port closed, Client_Count 0\n", __func__);
}
static void __exit msm_serial_hs_exit(void)
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index de1c143b475f..21fc9b3a27cf 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -693,7 +693,7 @@ static void serial_omap_set_mctrl(struct uart_port *port, unsigned int mctrl)
if ((mctrl & TIOCM_RTS) && (port->status & UPSTAT_AUTORTS))
up->efr |= UART_EFR_RTS;
else
- up->efr &= UART_EFR_RTS;
+ up->efr &= ~UART_EFR_RTS;
serial_out(up, UART_EFR, up->efr);
serial_out(up, UART_LCR, lcr);
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 235e150d7b81..80d0ffe7abc1 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -163,18 +163,17 @@ static const struct plat_sci_reg sci_regmap[SCIx_NR_REGTYPES][SCIx_NR_REGS] = {
},
/*
- * Common definitions for legacy IrDA ports, dependent on
- * regshift value.
+ * Common definitions for legacy IrDA ports.
*/
[SCIx_IRDA_REGTYPE] = {
[SCSMR] = { 0x00, 8 },
- [SCBRR] = { 0x01, 8 },
- [SCSCR] = { 0x02, 8 },
- [SCxTDR] = { 0x03, 8 },
- [SCxSR] = { 0x04, 8 },
- [SCxRDR] = { 0x05, 8 },
- [SCFCR] = { 0x06, 8 },
- [SCFDR] = { 0x07, 16 },
+ [SCBRR] = { 0x02, 8 },
+ [SCSCR] = { 0x04, 8 },
+ [SCxTDR] = { 0x06, 8 },
+ [SCxSR] = { 0x08, 16 },
+ [SCxRDR] = { 0x0a, 8 },
+ [SCFCR] = { 0x0c, 8 },
+ [SCFDR] = { 0x0e, 16 },
[SCTFDR] = sci_reg_invalid,
[SCRFDR] = sci_reg_invalid,
[SCSPTR] = sci_reg_invalid,
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index 1ca9cea2eaf8..64dc549276af 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -244,8 +244,10 @@ static void sysrq_handle_showallcpus(int key)
* architecture has no support for it:
*/
if (!trigger_all_cpu_backtrace()) {
- struct pt_regs *regs = get_irq_regs();
+ struct pt_regs *regs = NULL;
+ if (in_irq())
+ regs = get_irq_regs();
if (regs) {
pr_info("CPU%d:\n", smp_processor_id());
show_regs(regs);
@@ -264,7 +266,10 @@ static struct sysrq_key_op sysrq_showallcpus_op = {
static void sysrq_handle_showregs(int key)
{
- struct pt_regs *regs = get_irq_regs();
+ struct pt_regs *regs = NULL;
+
+ if (in_irq())
+ regs = get_irq_regs();
if (regs)
show_regs(regs);
perf_event_print_debug();
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index e8846c91ca71..7fab79f9eb33 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -871,14 +871,25 @@ void usb_release_bos_descriptor(struct usb_device *dev)
}
}
+static const __u8 bos_desc_len[256] = {
+ [USB_CAP_TYPE_WIRELESS_USB] = USB_DT_USB_WIRELESS_CAP_SIZE,
+ [USB_CAP_TYPE_EXT] = USB_DT_USB_EXT_CAP_SIZE,
+ [USB_SS_CAP_TYPE] = USB_DT_USB_SS_CAP_SIZE,
+ [USB_SSP_CAP_TYPE] = USB_DT_USB_SSP_CAP_SIZE(1),
+ [CONTAINER_ID_TYPE] = USB_DT_USB_SS_CONTN_ID_SIZE,
+ [USB_PTM_CAP_TYPE] = USB_DT_USB_PTM_ID_SIZE,
+};
+
/* Get BOS descriptor set */
int usb_get_bos_descriptor(struct usb_device *dev)
{
struct device *ddev = &dev->dev;
struct usb_bos_descriptor *bos;
struct usb_dev_cap_header *cap;
+ struct usb_ssp_cap_descriptor *ssp_cap;
unsigned char *buffer;
- int length, total_len, num, i;
+ int length, total_len, num, i, ssac;
+ __u8 cap_type;
int ret;
bos = kzalloc(sizeof(struct usb_bos_descriptor), GFP_KERNEL);
@@ -931,7 +942,13 @@ int usb_get_bos_descriptor(struct usb_device *dev)
dev->bos->desc->bNumDeviceCaps = i;
break;
}
+ cap_type = cap->bDevCapabilityType;
length = cap->bLength;
+ if (bos_desc_len[cap_type] && length < bos_desc_len[cap_type]) {
+ dev->bos->desc->bNumDeviceCaps = i;
+ break;
+ }
+
total_len -= length;
if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) {
@@ -939,7 +956,7 @@ int usb_get_bos_descriptor(struct usb_device *dev)
continue;
}
- switch (cap->bDevCapabilityType) {
+ switch (cap_type) {
case USB_CAP_TYPE_WIRELESS_USB:
/* Wireless USB cap descriptor is handled by wusb */
break;
@@ -952,13 +969,20 @@ int usb_get_bos_descriptor(struct usb_device *dev)
(struct usb_ss_cap_descriptor *)buffer;
break;
case USB_SSP_CAP_TYPE:
- dev->bos->ssp_cap =
- (struct usb_ssp_cap_descriptor *)buffer;
+ ssp_cap = (struct usb_ssp_cap_descriptor *)buffer;
+ ssac = (le32_to_cpu(ssp_cap->bmAttributes) &
+ USB_SSP_SUBLINK_SPEED_ATTRIBS) + 1;
+ if (length >= USB_DT_USB_SSP_CAP_SIZE(ssac))
+ dev->bos->ssp_cap = ssp_cap;
break;
case CONTAINER_ID_TYPE:
dev->bos->ss_id =
(struct usb_ss_container_id_descriptor *)buffer;
break;
+ case USB_PTM_CAP_TYPE:
+ dev->bos->ptm_cap =
+ (struct usb_ptm_cap_descriptor *)buffer;
+ break;
case USB_CAP_TYPE_CONFIG_SUMMARY:
/* one such desc per configuration */
if (!dev->bos->num_config_summary_desc)
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 873ba02d59e6..ad2e6d235c30 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -113,42 +113,38 @@ enum snoop_when {
#define USB_DEVICE_DEV MKDEV(USB_DEVICE_MAJOR, 0)
/* Limit on the total amount of memory we can allocate for transfers */
-static unsigned usbfs_memory_mb = 16;
+static u32 usbfs_memory_mb = 16;
module_param(usbfs_memory_mb, uint, 0644);
MODULE_PARM_DESC(usbfs_memory_mb,
"maximum MB allowed for usbfs buffers (0 = no limit)");
/* Hard limit, necessary to avoid arithmetic overflow */
-#define USBFS_XFER_MAX (UINT_MAX / 2 - 1000000)
+#define USBFS_XFER_MAX (UINT_MAX / 2 - 1000000)
-static atomic_t usbfs_memory_usage; /* Total memory currently allocated */
+static atomic64_t usbfs_memory_usage; /* Total memory currently allocated */
/* Check whether it's okay to allocate more memory for a transfer */
-static int usbfs_increase_memory_usage(unsigned amount)
+static int usbfs_increase_memory_usage(u64 amount)
{
- unsigned lim;
+ u64 lim;
- /*
- * Convert usbfs_memory_mb to bytes, avoiding overflows.
- * 0 means use the hard limit (effectively unlimited).
- */
lim = ACCESS_ONCE(usbfs_memory_mb);
- if (lim == 0 || lim > (USBFS_XFER_MAX >> 20))
- lim = USBFS_XFER_MAX;
- else
- lim <<= 20;
+ lim <<= 20;
- atomic_add(amount, &usbfs_memory_usage);
- if (atomic_read(&usbfs_memory_usage) <= lim)
- return 0;
- atomic_sub(amount, &usbfs_memory_usage);
- return -ENOMEM;
+ atomic64_add(amount, &usbfs_memory_usage);
+
+ if (lim > 0 && atomic64_read(&usbfs_memory_usage) > lim) {
+ atomic64_sub(amount, &usbfs_memory_usage);
+ return -ENOMEM;
+ }
+
+ return 0;
}
/* Memory for a transfer is being deallocated */
-static void usbfs_decrease_memory_usage(unsigned amount)
+static void usbfs_decrease_memory_usage(u64 amount)
{
- atomic_sub(amount, &usbfs_memory_usage);
+ atomic64_sub(amount, &usbfs_memory_usage);
}
static int connected(struct usb_dev_state *ps)
@@ -1077,7 +1073,7 @@ static int proc_bulk(struct usb_dev_state *ps, void __user *arg)
if (!usb_maxpacket(dev, pipe, !(bulk.ep & USB_DIR_IN)))
return -EINVAL;
len1 = bulk.len;
- if (len1 >= USBFS_XFER_MAX)
+ if (len1 >= (INT_MAX - sizeof(struct urb)))
return -EINVAL;
ret = usbfs_increase_memory_usage(len1 + sizeof(struct urb));
if (ret)
@@ -1297,13 +1293,19 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
int number_of_packets = 0;
unsigned int stream_id = 0;
void *buf;
-
- if (uurb->flags & ~(USBDEVFS_URB_ISO_ASAP |
- USBDEVFS_URB_SHORT_NOT_OK |
+ unsigned long mask = USBDEVFS_URB_SHORT_NOT_OK |
USBDEVFS_URB_BULK_CONTINUATION |
USBDEVFS_URB_NO_FSBR |
USBDEVFS_URB_ZERO_PACKET |
- USBDEVFS_URB_NO_INTERRUPT))
+ USBDEVFS_URB_NO_INTERRUPT;
+ /* USBDEVFS_URB_ISO_ASAP is a special case */
+ if (uurb->type == USBDEVFS_URB_TYPE_ISO)
+ mask |= USBDEVFS_URB_ISO_ASAP;
+
+ if (uurb->flags & ~mask)
+ return -EINVAL;
+
+ if ((unsigned int)uurb->buffer_length >= USBFS_XFER_MAX)
return -EINVAL;
if (uurb->buffer_length > 0 && !uurb->buffer)
return -EINVAL;
@@ -1424,10 +1426,6 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
return -EINVAL;
}
- if (uurb->buffer_length >= USBFS_XFER_MAX) {
- ret = -EINVAL;
- goto error;
- }
if (uurb->buffer_length > 0 &&
!access_ok(is_in ? VERIFY_WRITE : VERIFY_READ,
uurb->buffer, uurb->buffer_length)) {
@@ -1653,6 +1651,18 @@ static int proc_unlinkurb(struct usb_dev_state *ps, void __user *arg)
return 0;
}
+static void compute_isochronous_actual_length(struct urb *urb)
+{
+ unsigned int i;
+
+ if (urb->number_of_packets > 0) {
+ urb->actual_length = 0;
+ for (i = 0; i < urb->number_of_packets; i++)
+ urb->actual_length +=
+ urb->iso_frame_desc[i].actual_length;
+ }
+}
+
static int processcompl(struct async *as, void __user * __user *arg)
{
struct urb *urb = as->urb;
@@ -1660,6 +1670,7 @@ static int processcompl(struct async *as, void __user * __user *arg)
void __user *addr = as->userurb;
unsigned int i;
+ compute_isochronous_actual_length(urb);
if (as->userbuffer && urb->actual_length) {
if (copy_urb_data_to_user(as->userbuffer, urb))
goto err_out;
@@ -1829,6 +1840,7 @@ static int processcompl_compat(struct async *as, void __user * __user *arg)
void __user *addr = as->userurb;
unsigned int i;
+ compute_isochronous_actual_length(urb);
if (as->userbuffer && urb->actual_length) {
if (copy_urb_data_to_user(as->userbuffer, urb))
return -EFAULT;
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 1fb9191b8542..592f45e6dbac 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -3057,6 +3057,7 @@ void usb_remove_hcd(struct usb_hcd *hcd)
}
usb_put_invalidate_rhdev(hcd);
+ hcd->flags = 0;
}
EXPORT_SYMBOL_GPL(usb_remove_hcd);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 5644051b4010..5df314dd5f3c 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -4877,6 +4877,15 @@ loop:
usb_put_dev(udev);
if ((status == -ENOTCONN) || (status == -ENOTSUPP))
break;
+
+ /* When halfway through our retry count, power-cycle the port */
+ if (i == (SET_CONFIG_TRIES / 2) - 1) {
+ dev_info(&port_dev->dev, "attempt power cycle\n");
+ usb_hub_set_port_power(hdev, hub, port1, false);
+ msleep(2 * hub_power_on_good_delay(hub));
+ usb_hub_set_port_power(hdev, hub, port1, true);
+ msleep(hub_power_on_good_delay(hub));
+ }
}
if (hub->hdev->parent ||
!hcd->driver->port_handed_over ||
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index a6aaf2f193a4..50010282c010 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -151,6 +151,9 @@ static const struct usb_device_id usb_quirk_list[] = {
/* appletouch */
{ USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME },
+ /* Genesys Logic hub, internally used by KY-688 USB 3.1 Type-C Hub */
+ { USB_DEVICE(0x05e3, 0x0612), .driver_info = USB_QUIRK_NO_LPM },
+
/* Genesys Logic hub, internally used by Moshi USB to Ethernet Adapter */
{ USB_DEVICE(0x05e3, 0x0616), .driver_info = USB_QUIRK_NO_LPM },
@@ -221,6 +224,9 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Corsair Strafe RGB */
{ USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT },
+ /* Corsair K70 LUX */
+ { USB_DEVICE(0x1b1c, 0x1b36), .driver_info = USB_QUIRK_DELAY_INIT },
+
/* MIDI keyboard WORLDE MINI */
{ USB_DEVICE(0x1c75, 0x0204), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index 26422a659bfc..0c992cfb3afa 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -3720,7 +3720,10 @@ static int dwc3_msm_gadget_vbus_draw(struct dwc3_msm *mdwc, unsigned mA)
psy_type = get_psy_type(mdwc);
if (psy_type == POWER_SUPPLY_TYPE_USB_FLOAT) {
- pval.intval = -ETIMEDOUT;
+ if (!mA)
+ pval.intval = -ETIMEDOUT;
+ else
+ pval.intval = 1000 * mA;
goto set_prop;
}
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index a1e43d6f7ebb..7d7197e2cfc4 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -924,7 +924,7 @@ retry:
}
if (io_data->aio) {
- req = usb_ep_alloc_request(ep->ep, GFP_KERNEL);
+ req = usb_ep_alloc_request(ep->ep, GFP_ATOMIC);
if (unlikely(!req))
goto error_lock;
diff --git a/drivers/usb/gadget/function/f_gsi.h b/drivers/usb/gadget/function/f_gsi.h
index 14490ab296c2..829e1fcbe156 100644
--- a/drivers/usb/gadget/function/f_gsi.h
+++ b/drivers/usb/gadget/function/f_gsi.h
@@ -565,7 +565,7 @@ static struct usb_endpoint_descriptor rndis_gsi_fs_out_desc = {
};
static struct usb_descriptor_header *gsi_eth_fs_function[] = {
- (struct usb_descriptor_header *) &gsi_eth_fs_function,
+ (struct usb_descriptor_header *) &rndis_gsi_iad_descriptor,
/* control interface matches ACM, not Ethernet */
(struct usb_descriptor_header *) &rndis_gsi_control_intf,
(struct usb_descriptor_header *) &rndis_gsi_header_desc,
diff --git a/drivers/usb/gadget/function/f_qc_rndis.c b/drivers/usb/gadget/function/f_qc_rndis.c
index a28bcd084dc3..8a2346ce6b42 100644
--- a/drivers/usb/gadget/function/f_qc_rndis.c
+++ b/drivers/usb/gadget/function/f_qc_rndis.c
@@ -106,6 +106,7 @@ struct f_rndis_qc {
u8 port_num;
u16 cdc_filter;
bool net_ready_trigger;
+ bool use_wceis;
};
static struct ipa_usb_init_params rndis_ipa_params;
@@ -161,9 +162,9 @@ static struct usb_interface_descriptor rndis_qc_control_intf = {
/* .bInterfaceNumber = DYNAMIC */
/* status endpoint is optional; this could be patched later */
.bNumEndpoints = 1,
- .bInterfaceClass = USB_CLASS_WIRELESS_CONTROLLER,
- .bInterfaceSubClass = 0x01,
- .bInterfaceProtocol = 0x03,
+ .bInterfaceClass = USB_CLASS_MISC,
+ .bInterfaceSubClass = 0x04,
+ .bInterfaceProtocol = 0x01, /* RNDIS over ethernet */
/* .iInterface = DYNAMIC */
};
@@ -222,9 +223,9 @@ rndis_qc_iad_descriptor = {
.bDescriptorType = USB_DT_INTERFACE_ASSOCIATION,
.bFirstInterface = 0, /* XXX, hardcoded */
.bInterfaceCount = 2, /* control + data */
- .bFunctionClass = USB_CLASS_WIRELESS_CONTROLLER,
- .bFunctionSubClass = 0x01,
- .bFunctionProtocol = 0x03,
+ .bFunctionClass = USB_CLASS_MISC,
+ .bFunctionSubClass = 0x04,
+ .bFunctionProtocol = 0x01, /* RNDIS over ethernet */
/* .iFunction = DYNAMIC */
};
@@ -935,6 +936,17 @@ rndis_qc_bind(struct usb_configuration *c, struct usb_function *f)
rndis_qc_iad_descriptor.iFunction = status;
}
+ if (rndis->use_wceis) {
+ rndis_qc_iad_descriptor.bFunctionClass =
+ USB_CLASS_WIRELESS_CONTROLLER;
+ rndis_qc_iad_descriptor.bFunctionSubClass = 0x01;
+ rndis_qc_iad_descriptor.bFunctionProtocol = 0x03;
+ rndis_qc_control_intf.bInterfaceClass =
+ USB_CLASS_WIRELESS_CONTROLLER;
+ rndis_qc_control_intf.bInterfaceSubClass = 0x1;
+ rndis_qc_control_intf.bInterfaceProtocol = 0x03;
+ }
+
/* allocate instance-specific interface IDs */
status = usb_interface_id(c, f);
if (status < 0)
@@ -1470,8 +1482,38 @@ static struct configfs_item_operations qcrndis_item_ops = {
.release = qcrndis_attr_release,
};
+
+static ssize_t qcrndis_wceis_show(struct config_item *item, char *page)
+{
+ struct f_rndis_qc *rndis = to_f_qc_rndis_opts(item)->rndis;
+
+ return snprintf(page, PAGE_SIZE, "%d\n", rndis->use_wceis);
+}
+
+static ssize_t qcrndis_wceis_store(struct config_item *item,
+ const char *page, size_t len)
+{
+ struct f_rndis_qc *rndis = to_f_qc_rndis_opts(item)->rndis;
+ bool val;
+
+ if (kstrtobool(page, &val))
+ return -EINVAL;
+
+ rndis->use_wceis = val;
+
+ return len;
+}
+
+CONFIGFS_ATTR(qcrndis_, wceis);
+
+static struct configfs_attribute *qcrndis_attrs[] = {
+ &qcrndis_attr_wceis,
+ NULL,
+};
+
static struct config_item_type qcrndis_func_type = {
.ct_item_ops = &qcrndis_item_ops,
+ .ct_attrs = qcrndis_attrs,
.ct_owner = THIS_MODULE,
};
diff --git a/drivers/usb/host/ehci-dbg.c b/drivers/usb/host/ehci-dbg.c
index 8d22fda48618..c1c14d818b5c 100644
--- a/drivers/usb/host/ehci-dbg.c
+++ b/drivers/usb/host/ehci-dbg.c
@@ -851,7 +851,7 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf)
default: /* unknown */
break;
}
- temp = (cap >> 8) & 0xff;
+ offset = (cap >> 8) & 0xff;
}
}
#endif
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index fae1222d4bc8..250a4449ac51 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -397,25 +397,25 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
GFP_NOWAIT);
if (!command) {
spin_unlock_irqrestore(&xhci->lock, flags);
- xhci_free_command(xhci, cmd);
- return -ENOMEM;
-
+ ret = -ENOMEM;
+ goto cmd_cleanup;
}
ret = xhci_queue_stop_endpoint(xhci, command, slot_id,
- i, suspend);
+ i, suspend);
if (ret) {
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_free_command(xhci, command);
- goto err_cmd_queue;
+ goto cmd_cleanup;
}
}
}
ret = xhci_queue_stop_endpoint(xhci, cmd, slot_id, 0, suspend);
if (ret) {
spin_unlock_irqrestore(&xhci->lock, flags);
- goto err_cmd_queue;
+ goto cmd_cleanup;
}
+
xhci_ring_cmd_db(xhci);
spin_unlock_irqrestore(&xhci->lock, flags);
@@ -427,7 +427,7 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
ret = -ETIME;
}
-err_cmd_queue:
+cmd_cleanup:
xhci_free_command(xhci, cmd);
return ret;
}
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 35e0c046fdcc..9daa5b196bc7 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -981,6 +981,12 @@ void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id)
if (!vdev)
return;
+ if (vdev->real_port == 0 ||
+ vdev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
+ xhci_dbg(xhci, "Bad vdev->real_port.\n");
+ goto out;
+ }
+
tt_list_head = &(xhci->rh_bw[vdev->real_port - 1].tts);
list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
/* is this a hub device that added a tt_info to the tts list */
@@ -994,6 +1000,7 @@ void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id)
}
}
}
+out:
/* we are now at a leaf device */
xhci_free_virt_device(xhci, slot_id);
}
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index 2e947dc94e32..bc92a498ec03 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -185,12 +185,13 @@ found:
return tmp;
}
- if (in) {
+ if (in)
dev->in_pipe = usb_rcvbulkpipe(udev,
in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
+ if (out)
dev->out_pipe = usb_sndbulkpipe(udev,
out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
- }
+
if (iso_in) {
dev->iso_in = &iso_in->desc;
dev->in_iso_pipe = usb_rcvisocpipe(udev,
diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c
index 3c0386ee5875..77a1627ac5f2 100644
--- a/drivers/usb/pd/policy_engine.c
+++ b/drivers/usb/pd/policy_engine.c
@@ -817,7 +817,7 @@ static struct rx_msg *pd_ext_msg_received(struct usbpd *pd, u16 header, u8 *buf,
/* allocate new message if first chunk */
rx_msg = kzalloc(sizeof(*rx_msg) +
PD_MSG_EXT_HDR_DATA_SIZE(ext_hdr),
- GFP_KERNEL);
+ GFP_ATOMIC);
if (!rx_msg)
return NULL;
@@ -870,7 +870,7 @@ static struct rx_msg *pd_ext_msg_received(struct usbpd *pd, u16 header, u8 *buf,
pd->rx_ext_msg = rx_msg;
- req = kzalloc(sizeof(*req), GFP_KERNEL);
+ req = kzalloc(sizeof(*req), GFP_ATOMIC);
if (!req)
goto queue_rx; /* return what we have anyway */
@@ -944,7 +944,7 @@ static void phy_msg_received(struct usbpd *pd, enum pd_sop_type sop,
PD_MSG_HDR_TYPE(header), PD_MSG_HDR_COUNT(header));
if (!PD_MSG_HDR_IS_EXTENDED(header)) {
- rx_msg = kzalloc(sizeof(*rx_msg) + len, GFP_KERNEL);
+ rx_msg = kzalloc(sizeof(*rx_msg) + len, GFP_ATOMIC);
if (!rx_msg)
return;
@@ -1140,14 +1140,13 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state)
case PE_SRC_READY:
pd->in_explicit_contract = true;
- if (pd->current_dr == DR_DFP) {
- /* don't start USB host until after SVDM discovery */
- if (pd->vdm_state == VDM_NONE)
- usbpd_send_svdm(pd, USBPD_SID,
- USBPD_SVDM_DISCOVER_IDENTITY,
- SVDM_CMD_TYPE_INITIATOR, 0,
- NULL, 0);
- }
+
+ if (pd->vdm_tx)
+ kick_sm(pd, 0);
+ else if (pd->current_dr == DR_DFP && pd->vdm_state == VDM_NONE)
+ usbpd_send_svdm(pd, USBPD_SID,
+ USBPD_SVDM_DISCOVER_IDENTITY,
+ SVDM_CMD_TYPE_INITIATOR, 0, NULL, 0);
kobject_uevent(&pd->dev.kobj, KOBJ_CHANGE);
complete(&pd->is_ready);
@@ -1282,6 +1281,14 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state)
case PE_SNK_READY:
pd->in_explicit_contract = true;
+
+ if (pd->vdm_tx)
+ kick_sm(pd, 0);
+ else if (pd->current_dr == DR_DFP && pd->vdm_state == VDM_NONE)
+ usbpd_send_svdm(pd, USBPD_SID,
+ USBPD_SVDM_DISCOVER_IDENTITY,
+ SVDM_CMD_TYPE_INITIATOR, 0, NULL, 0);
+
kobject_uevent(&pd->dev.kobj, KOBJ_CHANGE);
complete(&pd->is_ready);
dual_role_instance_changed(pd->dual_role);
@@ -1967,6 +1974,10 @@ static void usbpd_sm(struct work_struct *w)
switch (pd->current_state) {
case PE_UNKNOWN:
+ val.intval = 0;
+ power_supply_set_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_PD_IN_HARD_RESET, &val);
+
if (pd->current_pr == PR_SINK) {
usbpd_set_state(pd, PE_SNK_STARTUP);
} else if (pd->current_pr == PR_SRC) {
@@ -2133,8 +2144,11 @@ static void usbpd_sm(struct work_struct *w)
case PE_SRC_TRANSITION_TO_DEFAULT:
if (pd->vconn_enabled)
regulator_disable(pd->vconn);
+ pd->vconn_enabled = false;
+
if (pd->vbus_enabled)
regulator_disable(pd->vbus);
+ pd->vbus_enabled = false;
if (pd->current_dr != DR_DFP) {
extcon_set_cable_state_(pd->extcon, EXTCON_USB, 0);
@@ -2142,24 +2156,9 @@ static void usbpd_sm(struct work_struct *w)
pd_phy_update_roles(pd->current_dr, pd->current_pr);
}
- msleep(SRC_RECOVER_TIME);
-
- pd->vbus_enabled = false;
- enable_vbus(pd);
-
- if (pd->vconn_enabled) {
- ret = regulator_enable(pd->vconn);
- if (ret) {
- usbpd_err(&pd->dev, "Unable to enable vconn\n");
- pd->vconn_enabled = false;
- }
- }
-
- val.intval = 0;
- power_supply_set_property(pd->usb_psy,
- POWER_SUPPLY_PROP_PD_IN_HARD_RESET, &val);
-
- usbpd_set_state(pd, PE_SRC_STARTUP);
+ /* PE_UNKNOWN will turn on VBUS and go back to PE_SRC_STARTUP */
+ pd->current_state = PE_UNKNOWN;
+ kick_sm(pd, SRC_RECOVER_TIME);
break;
case PE_SRC_HARD_RESET:
diff --git a/drivers/usb/pd/qpnp-pdphy.c b/drivers/usb/pd/qpnp-pdphy.c
index b454442e2471..c3e9f2394b24 100644
--- a/drivers/usb/pd/qpnp-pdphy.c
+++ b/drivers/usb/pd/qpnp-pdphy.c
@@ -112,7 +112,6 @@ struct usb_pdphy {
int tx_status;
u8 frame_filter_val;
bool in_test_data_mode;
- bool rx_busy;
enum data_role data_role;
enum power_role power_role;
@@ -490,7 +489,7 @@ int pd_phy_write(u16 hdr, const u8 *data, size_t data_len, enum pd_sop_type sop)
}
ret = pdphy_reg_read(pdphy, &val, USB_PDPHY_RX_ACKNOWLEDGE, 1);
- if (ret || val || pdphy->rx_busy) {
+ if (ret || val) {
dev_err(pdphy->dev, "%s: RX message pending\n", __func__);
return -EBUSY;
}
@@ -670,15 +669,6 @@ static int pd_phy_bist_mode(u8 bist_mode)
static irqreturn_t pdphy_msg_rx_irq(int irq, void *data)
{
- struct usb_pdphy *pdphy = data;
-
- pdphy->rx_busy = true;
-
- return IRQ_WAKE_THREAD;
-}
-
-static irqreturn_t pdphy_msg_rx_irq_thread(int irq, void *data)
-{
u8 size, rx_status, frame_type;
u8 buf[32];
int ret;
@@ -733,7 +723,6 @@ static irqreturn_t pdphy_msg_rx_irq_thread(int irq, void *data)
false);
pdphy->rx_bytes += size + 1;
done:
- pdphy->rx_busy = false;
return IRQ_HANDLED;
}
@@ -820,7 +809,7 @@ static int pdphy_probe(struct platform_device *pdev)
ret = pdphy_request_irq(pdphy, pdev->dev.of_node,
&pdphy->msg_rx_irq, "msg-rx", pdphy_msg_rx_irq,
- pdphy_msg_rx_irq_thread, (IRQF_TRIGGER_RISING | IRQF_ONESHOT));
+ NULL, (IRQF_TRIGGER_RISING | IRQF_ONESHOT));
if (ret < 0)
return ret;
diff --git a/drivers/usb/phy/phy-tahvo.c b/drivers/usb/phy/phy-tahvo.c
index ab5d364f6e8c..335a1ef35224 100644
--- a/drivers/usb/phy/phy-tahvo.c
+++ b/drivers/usb/phy/phy-tahvo.c
@@ -368,7 +368,8 @@ static int tahvo_usb_probe(struct platform_device *pdev)
tu->extcon = devm_extcon_dev_allocate(&pdev->dev, tahvo_cable);
if (IS_ERR(tu->extcon)) {
dev_err(&pdev->dev, "failed to allocate memory for extcon\n");
- return -ENOMEM;
+ ret = PTR_ERR(tu->extcon);
+ goto err_disable_clk;
}
ret = devm_extcon_dev_register(&pdev->dev, tu->extcon);
diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
index 37d0e8cc7af6..2220c1b9df10 100644
--- a/drivers/usb/serial/garmin_gps.c
+++ b/drivers/usb/serial/garmin_gps.c
@@ -138,6 +138,7 @@ struct garmin_data {
__u8 privpkt[4*6];
spinlock_t lock;
struct list_head pktlist;
+ struct usb_anchor write_urbs;
};
@@ -906,7 +907,7 @@ static int garmin_init_session(struct usb_serial_port *port)
sizeof(GARMIN_START_SESSION_REQ), 0);
if (status < 0)
- break;
+ goto err_kill_urbs;
}
if (status > 0)
@@ -914,6 +915,12 @@ static int garmin_init_session(struct usb_serial_port *port)
}
return status;
+
+err_kill_urbs:
+ usb_kill_anchored_urbs(&garmin_data_p->write_urbs);
+ usb_kill_urb(port->interrupt_in_urb);
+
+ return status;
}
@@ -931,7 +938,6 @@ static int garmin_open(struct tty_struct *tty, struct usb_serial_port *port)
spin_unlock_irqrestore(&garmin_data_p->lock, flags);
/* shutdown any bulk reads that might be going on */
- usb_kill_urb(port->write_urb);
usb_kill_urb(port->read_urb);
if (garmin_data_p->state == STATE_RESET)
@@ -954,7 +960,7 @@ static void garmin_close(struct usb_serial_port *port)
/* shutdown our urbs */
usb_kill_urb(port->read_urb);
- usb_kill_urb(port->write_urb);
+ usb_kill_anchored_urbs(&garmin_data_p->write_urbs);
/* keep reset state so we know that we must start a new session */
if (garmin_data_p->state != STATE_RESET)
@@ -1038,12 +1044,14 @@ static int garmin_write_bulk(struct usb_serial_port *port,
}
/* send it down the pipe */
+ usb_anchor_urb(urb, &garmin_data_p->write_urbs);
status = usb_submit_urb(urb, GFP_ATOMIC);
if (status) {
dev_err(&port->dev,
"%s - usb_submit_urb(write bulk) failed with status = %d\n",
__func__, status);
count = status;
+ usb_unanchor_urb(urb);
kfree(buffer);
}
@@ -1402,9 +1410,16 @@ static int garmin_port_probe(struct usb_serial_port *port)
garmin_data_p->state = 0;
garmin_data_p->flags = 0;
garmin_data_p->count = 0;
+ init_usb_anchor(&garmin_data_p->write_urbs);
usb_set_serial_port_data(port, garmin_data_p);
status = garmin_init_session(port);
+ if (status)
+ goto err_free;
+
+ return 0;
+err_free:
+ kfree(garmin_data_p);
return status;
}
@@ -1414,6 +1429,7 @@ static int garmin_port_remove(struct usb_serial_port *port)
{
struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
+ usb_kill_anchored_urbs(&garmin_data_p->write_urbs);
usb_kill_urb(port->interrupt_in_urb);
del_timer_sync(&garmin_data_p->timer);
kfree(garmin_data_p);
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index db3d34c2c82e..ffa8ec917ff5 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -241,6 +241,7 @@ static void option_instat_callback(struct urb *urb);
/* These Quectel products use Quectel's vendor ID */
#define QUECTEL_PRODUCT_EC21 0x0121
#define QUECTEL_PRODUCT_EC25 0x0125
+#define QUECTEL_PRODUCT_BG96 0x0296
#define CMOTECH_VENDOR_ID 0x16d8
#define CMOTECH_PRODUCT_6001 0x6001
@@ -1185,6 +1186,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25),
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index e1c1e329c877..4516291df1b8 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -148,6 +148,7 @@ static const struct usb_device_id id_table[] = {
{DEVICE_SWI(0x1199, 0x68a2)}, /* Sierra Wireless MC7710 */
{DEVICE_SWI(0x1199, 0x68c0)}, /* Sierra Wireless MC7304/MC7354 */
{DEVICE_SWI(0x1199, 0x901c)}, /* Sierra Wireless EM7700 */
+ {DEVICE_SWI(0x1199, 0x901e)}, /* Sierra Wireless EM7355 QDL */
{DEVICE_SWI(0x1199, 0x901f)}, /* Sierra Wireless EM7355 */
{DEVICE_SWI(0x1199, 0x9040)}, /* Sierra Wireless Modem */
{DEVICE_SWI(0x1199, 0x9041)}, /* Sierra Wireless MC7305/MC7355 */
diff --git a/drivers/usb/storage/uas-detect.h b/drivers/usb/storage/uas-detect.h
index a155cd02bce2..ecc83c405a8b 100644
--- a/drivers/usb/storage/uas-detect.h
+++ b/drivers/usb/storage/uas-detect.h
@@ -111,6 +111,10 @@ static int uas_use_uas_driver(struct usb_interface *intf,
}
}
+ /* All Seagate disk enclosures have broken ATA pass-through support */
+ if (le16_to_cpu(udev->descriptor.idVendor) == 0x0bc2)
+ flags |= US_FL_NO_ATA_1X;
+
usb_stor_adjust_quirks(udev, &flags);
if (flags & US_FL_IGNORE_UAS) {
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index e4110d6de0b5..da6cc25baaef 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -703,6 +703,7 @@ vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write,
struct scatterlist *sg, int sg_count)
{
size_t off = iter->iov_offset;
+ struct scatterlist *p = sg;
int i, ret;
for (i = 0; i < iter->nr_segs; i++) {
@@ -711,8 +712,8 @@ vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write,
ret = vhost_scsi_map_to_sgl(cmd, base, len, sg, write);
if (ret < 0) {
- for (i = 0; i < sg_count; i++) {
- struct page *page = sg_page(&sg[i]);
+ while (p < sg) {
+ struct page *page = sg_page(p++);
if (page)
put_page(page);
}
diff --git a/drivers/video/backlight/adp5520_bl.c b/drivers/video/backlight/adp5520_bl.c
index dd88ba1d71ce..35373e2065b2 100644
--- a/drivers/video/backlight/adp5520_bl.c
+++ b/drivers/video/backlight/adp5520_bl.c
@@ -332,10 +332,18 @@ static int adp5520_bl_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, bl);
- ret |= adp5520_bl_setup(bl);
+ ret = adp5520_bl_setup(bl);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to setup\n");
+ if (data->pdata->en_ambl_sens)
+ sysfs_remove_group(&bl->dev.kobj,
+ &adp5520_bl_attr_group);
+ return ret;
+ }
+
backlight_update_status(bl);
- return ret;
+ return 0;
}
static int adp5520_bl_remove(struct platform_device *pdev)
diff --git a/drivers/video/backlight/lcd.c b/drivers/video/backlight/lcd.c
index 7de847df224f..4b40c6a4d441 100644
--- a/drivers/video/backlight/lcd.c
+++ b/drivers/video/backlight/lcd.c
@@ -226,6 +226,8 @@ struct lcd_device *lcd_device_register(const char *name, struct device *parent,
dev_set_name(&new_ld->dev, "%s", name);
dev_set_drvdata(&new_ld->dev, devdata);
+ new_ld->ops = ops;
+
rc = device_register(&new_ld->dev);
if (rc) {
put_device(&new_ld->dev);
@@ -238,8 +240,6 @@ struct lcd_device *lcd_device_register(const char *name, struct device *parent,
return ERR_PTR(rc);
}
- new_ld->ops = ops;
-
return new_ld;
}
EXPORT_SYMBOL(lcd_device_register);
diff --git a/drivers/video/fbdev/msm/mdss_mdp_intf_video.c b/drivers/video/fbdev/msm/mdss_mdp_intf_video.c
index bdf6705ef597..335614a33aaf 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_intf_video.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_intf_video.c
@@ -2458,10 +2458,21 @@ static int mdss_mdp_video_early_wake_up(struct mdss_mdp_ctl *ctl)
* lot of latency rendering the input events useless in preventing the
* idle time out.
*/
- if (ctl->mfd->idle_state == MDSS_FB_IDLE_TIMER_RUNNING) {
- if (ctl->mfd->idle_time)
+ if ((ctl->mfd->idle_state == MDSS_FB_IDLE_TIMER_RUNNING) ||
+ (ctl->mfd->idle_state == MDSS_FB_IDLE)) {
+ /*
+ * Modify the idle time so that an idle fallback can be
+ * triggered for those cases, where we have no update
+ * despite of a touch event and idle time is 0.
+ */
+ if (!ctl->mfd->idle_time) {
+ ctl->mfd->idle_time = 70;
+ schedule_delayed_work(&ctl->mfd->idle_notify_work,
+ msecs_to_jiffies(200));
+ } else {
mod_delayed_work(system_wq, &ctl->mfd->idle_notify_work,
msecs_to_jiffies(ctl->mfd->idle_time));
+ }
pr_debug("Delayed idle time\n");
} else {
pr_debug("Nothing to done for this state (%d)\n",
diff --git a/drivers/video/fbdev/pmag-ba-fb.c b/drivers/video/fbdev/pmag-ba-fb.c
index 914a52ba8477..77837665ce89 100644
--- a/drivers/video/fbdev/pmag-ba-fb.c
+++ b/drivers/video/fbdev/pmag-ba-fb.c
@@ -129,7 +129,7 @@ static struct fb_ops pmagbafb_ops = {
/*
* Turn the hardware cursor off.
*/
-static void __init pmagbafb_erase_cursor(struct fb_info *info)
+static void pmagbafb_erase_cursor(struct fb_info *info)
{
struct pmagbafb_par *par = info->par;
diff --git a/drivers/video/msm/ba/msm_ba.c b/drivers/video/msm/ba/msm_ba.c
index 4200b8f20073..566cb634ae8f 100644
--- a/drivers/video/msm/ba/msm_ba.c
+++ b/drivers/video/msm/ba/msm_ba.c
@@ -21,6 +21,7 @@
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/msm_ba.h>
+#include <media/adv7481.h>
#include "msm_ba_internal.h"
#include "msm_ba_debug.h"
@@ -555,6 +556,24 @@ long msm_ba_private_ioctl(void *instance, int cmd, void *arg)
}
}
break;
+ case VIDIOC_G_CSI_PARAMS: {
+ dprintk(BA_DBG, "VIDIOC_G_CSI_PARAMS");
+ sd = inst->sd;
+ if (!sd) {
+ dprintk(BA_ERR, "No sd registered");
+ return -EINVAL;
+ }
+ if (arg) {
+ rc = v4l2_subdev_call(sd, core, ioctl, cmd, arg);
+ if (rc)
+ dprintk(BA_ERR, "%s failed: %ld on cmd: 0x%x",
+ __func__, rc, cmd);
+ } else {
+ dprintk(BA_ERR, "%s: NULL argument provided", __func__);
+ rc = -EINVAL;
+ }
+ }
+ break;
default:
dprintk(BA_WARN, "Not a typewriter! Command: 0x%x", cmd);
rc = -ENOTTY;
diff --git a/drivers/video/msm/ba/msm_ba_common.c b/drivers/video/msm/ba/msm_ba_common.c
index 1306fca46652..e70c264b9765 100644
--- a/drivers/video/msm/ba/msm_ba_common.c
+++ b/drivers/video/msm/ba/msm_ba_common.c
@@ -191,8 +191,6 @@ void msm_ba_add_inputs(struct v4l2_subdev *sd)
int dev_id = 0;
dev_ctxt = get_ba_dev();
- if (!list_empty(&dev_ctxt->inputs))
- start_index = dev_ctxt->num_inputs;
msm_ba_inp_cfg = dev_ctxt->msm_ba_inp_cfg;
dev_id = msm_ba_inp_cfg[start_index].ba_out;
diff --git a/drivers/video/msm/ba/msm_ba_internal.h b/drivers/video/msm/ba/msm_ba_internal.h
index bd52e8e400ce..bb90a3198728 100644
--- a/drivers/video/msm/ba/msm_ba_internal.h
+++ b/drivers/video/msm/ba/msm_ba_internal.h
@@ -106,6 +106,7 @@ enum msm_ba_ip_type {
BA_INPUT_MHL,
BA_INPUT_DVI,
BA_INPUT_TTL,
+ BA_INPUT_TV_TUNER,
BA_INPUT_MAX = 0xffffffff
};
diff --git a/drivers/video/msm/ba/msm_v4l2_ba.c b/drivers/video/msm/ba/msm_v4l2_ba.c
index 89fc08dd3c33..c50d02292398 100644
--- a/drivers/video/msm/ba/msm_v4l2_ba.c
+++ b/drivers/video/msm/ba/msm_v4l2_ba.c
@@ -227,6 +227,14 @@ static int msm_ba_v4l2_g_parm(struct file *file, void *fh,
return 0;
}
+static long msm_ba_v4l2_private_ioctl(struct file *file, void *fh,
+ bool valid_prio, unsigned int cmd, void *arg)
+{
+ struct msm_ba_inst *ba_inst = get_ba_inst(file, fh);
+
+ return msm_ba_private_ioctl((void *)ba_inst, cmd, (void *)arg);
+}
+
static const struct v4l2_ioctl_ops msm_ba_v4l2_ioctl_ops = {
.vidioc_querycap = msm_ba_v4l2_querycap,
.vidioc_enum_fmt_vid_cap = msm_ba_v4l2_enum_fmt,
@@ -250,6 +258,7 @@ static const struct v4l2_ioctl_ops msm_ba_v4l2_ioctl_ops = {
.vidioc_enum_output = msm_ba_v4l2_enum_output,
.vidioc_g_output = msm_ba_v4l2_g_output,
.vidioc_s_output = msm_ba_v4l2_s_output,
+ .vidioc_default = msm_ba_v4l2_private_ioctl,
};
static unsigned int msm_ba_v4l2_poll(struct file *filp,
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 308600adf6e0..a4d749665c9f 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -827,6 +827,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
mutex_unlock(&priv->lock);
if (use_ptemod) {
+ map->pages_vm_start = vma->vm_start;
err = apply_to_page_range(vma->vm_mm, vma->vm_start,
vma->vm_end - vma->vm_start,
find_grant_ptes, map);
@@ -864,7 +865,6 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
set_grant_ptes_as_special, NULL);
}
#endif
- map->pages_vm_start = vma->vm_start;
}
return 0;
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index e12bd3635f83..2dd285827169 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -275,8 +275,16 @@ static void sysrq_handler(struct xenbus_watch *watch, const char **vec,
err = xenbus_transaction_start(&xbt);
if (err)
return;
- if (!xenbus_scanf(xbt, "control", "sysrq", "%c", &sysrq_key)) {
- pr_err("Unable to read sysrq code in control/sysrq\n");
+ err = xenbus_scanf(xbt, "control", "sysrq", "%c", &sysrq_key);
+ if (err < 0) {
+ /*
+ * The Xenstore watch fires directly after registering it and
+ * after a suspend/resume cycle. So ENOENT is no error but
+ * might happen in those cases.
+ */
+ if (err != -ENOENT)
+ pr_err("Error %d reading sysrq code in control/sysrq\n",
+ err);
xenbus_transaction_end(xbt, 1);
return;
}
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index 0e0eb10f82a0..816a0e08ef10 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -316,7 +316,7 @@ static int xenbus_write_transaction(unsigned msg_type,
rc = -ENOMEM;
goto out;
}
- } else if (msg_type == XS_TRANSACTION_END) {
+ } else if (u->u.msg.tx_id != 0) {
list_for_each_entry(trans, &u->transactions, list)
if (trans->handle.id == u->u.msg.tx_id)
break;
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 511078586fa1..73f1d1b3a51c 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -483,6 +483,9 @@ static int v9fs_test_inode(struct inode *inode, void *data)
if (v9inode->qid.type != st->qid.type)
return 0;
+
+ if (v9inode->qid.path != st->qid.path)
+ return 0;
return 1;
}
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
index cb899af1babc..0b88744c6446 100644
--- a/fs/9p/vfs_inode_dotl.c
+++ b/fs/9p/vfs_inode_dotl.c
@@ -87,6 +87,9 @@ static int v9fs_test_inode_dotl(struct inode *inode, void *data)
if (v9inode->qid.type != st->qid.type)
return 0;
+
+ if (v9inode->qid.path != st->qid.path)
+ return 0;
return 1;
}
diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
index 35b755e79c2d..fe6e7050fe50 100644
--- a/fs/autofs4/waitq.c
+++ b/fs/autofs4/waitq.c
@@ -87,7 +87,8 @@ static int autofs4_write(struct autofs_sb_info *sbi,
spin_unlock_irqrestore(&current->sighand->siglock, flags);
}
- return (bytes > 0);
+ /* if 'wr' returned 0 (impossible) we assume -EIO (safe) */
+ return bytes == 0 ? 0 : wr < 0 ? wr : -EIO;
}
static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
@@ -101,6 +102,7 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
} pkt;
struct file *pipe = NULL;
size_t pktsz;
+ int ret;
DPRINTK("wait id = 0x%08lx, name = %.*s, type=%d",
(unsigned long) wq->wait_queue_token, wq->name.len, wq->name.name, type);
@@ -173,7 +175,18 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
mutex_unlock(&sbi->wq_mutex);
if (autofs4_write(sbi, pipe, &pkt, pktsz))
+ switch (ret = autofs4_write(sbi, pipe, &pkt, pktsz)) {
+ case 0:
+ break;
+ case -ENOMEM:
+ case -ERESTARTSYS:
+ /* Just fail this one */
+ autofs4_wait_release(sbi, wq->wait_queue_token, ret);
+ break;
+ default:
autofs4_catatonic_mode(sbi);
+ break;
+ }
fput(pipe);
}
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index c36a03fa7678..260f94b019c9 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3361,13 +3361,6 @@ again:
goto again;
}
- /* We've already setup this transaction, go ahead and exit */
- if (block_group->cache_generation == trans->transid &&
- i_size_read(inode)) {
- dcs = BTRFS_DC_SETUP;
- goto out_put;
- }
-
/*
* We want to set the generation to 0, that way if anything goes wrong
* from here on out we know not to trust this cache when we load up next
@@ -3391,6 +3384,13 @@ again:
}
WARN_ON(ret);
+ /* We've already setup this transaction, go ahead and exit */
+ if (block_group->cache_generation == trans->transid &&
+ i_size_read(inode)) {
+ dcs = BTRFS_DC_SETUP;
+ goto out_put;
+ }
+
if (i_size_read(inode) > 0) {
ret = btrfs_check_trunc_cache_free_space(root,
&root->fs_info->global_block_rsv);
diff --git a/fs/btrfs/uuid-tree.c b/fs/btrfs/uuid-tree.c
index 778282944530..837a9a8d579e 100644
--- a/fs/btrfs/uuid-tree.c
+++ b/fs/btrfs/uuid-tree.c
@@ -348,7 +348,5 @@ skip:
out:
btrfs_free_path(path);
- if (ret)
- btrfs_warn(fs_info, "btrfs_uuid_tree_iterate failed %d", ret);
- return 0;
+ return ret;
}
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index c69e1253b47b..0e3de1bb6500 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -1850,6 +1850,7 @@ static int try_flush_caps(struct inode *inode, u64 *ptid)
retry:
spin_lock(&ci->i_ceph_lock);
if (ci->i_ceph_flags & CEPH_I_NOFLUSH) {
+ spin_unlock(&ci->i_ceph_lock);
dout("try_flush_caps skipping %p I_NOFLUSH set\n", inode);
goto out;
}
@@ -1867,8 +1868,10 @@ retry:
mutex_lock(&session->s_mutex);
goto retry;
}
- if (cap->session->s_state < CEPH_MDS_SESSION_OPEN)
+ if (cap->session->s_state < CEPH_MDS_SESSION_OPEN) {
+ spin_unlock(&ci->i_ceph_lock);
goto out;
+ }
flushing = __mark_caps_flushing(inode, session, &flush_tid,
&oldest_flush_tid);
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 297e05c9e2b0..49a0d6b027c1 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -193,7 +193,8 @@ check_name(struct dentry *direntry, struct cifs_tcon *tcon)
struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
int i;
- if (unlikely(direntry->d_name.len >
+ if (unlikely(tcon->fsAttrInfo.MaxPathNameComponentLength &&
+ direntry->d_name.len >
le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength)))
return -ENAMETOOLONG;
@@ -509,7 +510,7 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
rc = check_name(direntry, tcon);
if (rc)
- goto out_free_xid;
+ goto out;
server = tcon->ses->server;
diff --git a/fs/coda/upcall.c b/fs/coda/upcall.c
index f6c6c8adbc01..7289f0a7670b 100644
--- a/fs/coda/upcall.c
+++ b/fs/coda/upcall.c
@@ -446,8 +446,7 @@ int venus_fsync(struct super_block *sb, struct CodaFid *fid)
UPARG(CODA_FSYNC);
inp->coda_fsync.VFid = *fid;
- error = coda_upcall(coda_vcp(sb), sizeof(union inputArgs),
- &outsize, inp);
+ error = coda_upcall(coda_vcp(sb), insize, &outsize, inp);
CODA_FREE(inp, insize);
return error;
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
index 89e7aa5f178a..f5908e91eb17 100644
--- a/fs/ecryptfs/ecryptfs_kernel.h
+++ b/fs/ecryptfs/ecryptfs_kernel.h
@@ -84,11 +84,16 @@ struct ecryptfs_page_crypt_context {
static inline struct ecryptfs_auth_tok *
ecryptfs_get_encrypted_key_payload_data(struct key *key)
{
- if (key->type == &key_type_encrypted)
- return (struct ecryptfs_auth_tok *)
- (&((struct encrypted_key_payload *)key->payload.data[0])->payload_data);
- else
+ struct encrypted_key_payload *payload;
+
+ if (key->type != &key_type_encrypted)
return NULL;
+
+ payload = key->payload.data[0];
+ if (!payload)
+ return ERR_PTR(-EKEYREVOKED);
+
+ return (struct ecryptfs_auth_tok *)payload->payload_data;
}
static inline struct key *ecryptfs_get_encrypted_key(char *sig)
@@ -114,12 +119,17 @@ static inline struct ecryptfs_auth_tok *
ecryptfs_get_key_payload_data(struct key *key)
{
struct ecryptfs_auth_tok *auth_tok;
+ const struct user_key_payload *ukp;
auth_tok = ecryptfs_get_encrypted_key_payload_data(key);
- if (!auth_tok)
- return (struct ecryptfs_auth_tok *)user_key_payload(key)->data;
- else
+ if (auth_tok)
return auth_tok;
+
+ ukp = user_key_payload(key);
+ if (!ukp)
+ return ERR_PTR(-EKEYREVOKED);
+
+ return (struct ecryptfs_auth_tok *)ukp->data;
}
#define ECRYPTFS_MAX_KEYSET_SIZE 1024
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
index 8319b776a461..ea3d99ebb6ee 100644
--- a/fs/ecryptfs/keystore.c
+++ b/fs/ecryptfs/keystore.c
@@ -461,7 +461,8 @@ out:
* @auth_tok_key: key containing the authentication token
* @auth_tok: authentication token
*
- * Returns zero on valid auth tok; -EINVAL otherwise
+ * Returns zero on valid auth tok; -EINVAL if the payload is invalid; or
+ * -EKEYREVOKED if the key was revoked before we acquired its semaphore.
*/
static int
ecryptfs_verify_auth_tok_from_key(struct key *auth_tok_key,
@@ -470,6 +471,12 @@ ecryptfs_verify_auth_tok_from_key(struct key *auth_tok_key,
int rc = 0;
(*auth_tok) = ecryptfs_get_key_payload_data(auth_tok_key);
+ if (IS_ERR(*auth_tok)) {
+ rc = PTR_ERR(*auth_tok);
+ *auth_tok = NULL;
+ goto out;
+ }
+
if (ecryptfs_verify_version((*auth_tok)->version)) {
printk(KERN_ERR "Data structure version mismatch. Userspace "
"tools must match eCryptfs kernel module with major "
diff --git a/fs/ecryptfs/messaging.c b/fs/ecryptfs/messaging.c
index 286f10b0363b..4f457d5c4933 100644
--- a/fs/ecryptfs/messaging.c
+++ b/fs/ecryptfs/messaging.c
@@ -442,15 +442,16 @@ void ecryptfs_release_messaging(void)
}
if (ecryptfs_daemon_hash) {
struct ecryptfs_daemon *daemon;
+ struct hlist_node *n;
int i;
mutex_lock(&ecryptfs_daemon_hash_mux);
for (i = 0; i < (1 << ecryptfs_hash_bits); i++) {
int rc;
- hlist_for_each_entry(daemon,
- &ecryptfs_daemon_hash[i],
- euid_chain) {
+ hlist_for_each_entry_safe(daemon, n,
+ &ecryptfs_daemon_hash[i],
+ euid_chain) {
rc = ecryptfs_exorcise_daemon(daemon);
if (rc)
printk(KERN_ERR "%s: Error whilst "
diff --git a/fs/ext4/crypto_key.c b/fs/ext4/crypto_key.c
index 15ebac242288..d3d6b28ce9b9 100644
--- a/fs/ext4/crypto_key.c
+++ b/fs/ext4/crypto_key.c
@@ -220,11 +220,9 @@ int _ext4_get_encryption_info(struct inode *inode)
int mode;
int res;
- if (!ext4_read_workqueue) {
- res = ext4_init_crypto();
- if (res)
- return res;
- }
+ res = ext4_init_crypto();
+ if (res)
+ return res;
retry:
crypt_info = ACCESS_ONCE(ei->i_crypt_info);
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index fc496c646d12..468e7fe3616c 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -4810,7 +4810,8 @@ static long ext4_zero_range(struct file *file, loff_t offset,
}
if (!(mode & FALLOC_FL_KEEP_SIZE) &&
- offset + len > i_size_read(inode)) {
+ (offset + len > i_size_read(inode) ||
+ offset + len > EXT4_I(inode)->i_disksize)) {
new_size = offset + len;
ret = inode_newsize_ok(inode, new_size);
if (ret)
@@ -4986,7 +4987,8 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
}
if (!(mode & FALLOC_FL_KEEP_SIZE) &&
- offset + len > i_size_read(inode)) {
+ (offset + len > i_size_read(inode) ||
+ offset + len > EXT4_I(inode)->i_disksize)) {
new_size = offset + len;
ret = inode_newsize_ok(inode, new_size);
if (ret)
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index c2810503eb50..27ff3706d632 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -2136,8 +2136,10 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
* We search using buddy data only if the order of the request
* is greater than equal to the sbi_s_mb_order2_reqs
* You can tune it via /sys/fs/ext4/<partition>/mb_order2_req
+ * We also support searching for power-of-two requests only for
+ * requests upto maximum buddy size we have constructed.
*/
- if (i >= sbi->s_mb_order2_reqs) {
+ if (i >= sbi->s_mb_order2_reqs && i <= sb->s_blocksize_bits + 2) {
/*
* This should tell if fe_len is exactly power of 2
*/
@@ -2207,7 +2209,7 @@ repeat:
}
ac->ac_groups_scanned++;
- if (cr == 0 && ac->ac_2order < sb->s_blocksize_bits+2)
+ if (cr == 0)
ext4_mb_simple_scan_group(ac, &e4b);
else if (cr == 1 && sbi->s_stripe &&
!(ac->ac_g_ex.fe_len % sbi->s_stripe))
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 83a72da67df7..d3cbdbc8ad33 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -2498,9 +2498,9 @@ static unsigned long ext4_get_stripe_size(struct ext4_sb_info *sbi)
if (sbi->s_stripe && sbi->s_stripe <= sbi->s_blocks_per_group)
ret = sbi->s_stripe;
- else if (stripe_width <= sbi->s_blocks_per_group)
+ else if (stripe_width && stripe_width <= sbi->s_blocks_per_group)
ret = stripe_width;
- else if (stride <= sbi->s_blocks_per_group)
+ else if (stride && stride <= sbi->s_blocks_per_group)
ret = stride;
else
ret = 0;
diff --git a/fs/f2fs/acl.c b/fs/f2fs/acl.c
index 112f8e04c549..3f52efa0f94f 100644
--- a/fs/f2fs/acl.c
+++ b/fs/f2fs/acl.c
@@ -253,6 +253,9 @@ static int __f2fs_set_acl(struct inode *inode, int type,
int f2fs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
{
+ if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
+ return -EIO;
+
return __f2fs_set_acl(inode, type, acl, NULL);
}
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index e86f67ac96c6..2eb778174a9b 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -29,7 +29,6 @@ struct kmem_cache *inode_entry_slab;
void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io)
{
set_ckpt_flags(sbi, CP_ERROR_FLAG);
- sbi->sb->s_flags |= MS_RDONLY;
if (!end_io)
f2fs_flush_merged_writes(sbi);
}
@@ -402,24 +401,23 @@ const struct address_space_operations f2fs_meta_aops = {
#endif
};
-static void __add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
+static void __add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino,
+ unsigned int devidx, int type)
{
struct inode_management *im = &sbi->im[type];
struct ino_entry *e, *tmp;
tmp = f2fs_kmem_cache_alloc(ino_entry_slab, GFP_NOFS);
-retry:
+
radix_tree_preload(GFP_NOFS | __GFP_NOFAIL);
spin_lock(&im->ino_lock);
e = radix_tree_lookup(&im->ino_root, ino);
if (!e) {
e = tmp;
- if (radix_tree_insert(&im->ino_root, ino, e)) {
- spin_unlock(&im->ino_lock);
- radix_tree_preload_end();
- goto retry;
- }
+ if (unlikely(radix_tree_insert(&im->ino_root, ino, e)))
+ f2fs_bug_on(sbi, 1);
+
memset(e, 0, sizeof(struct ino_entry));
e->ino = ino;
@@ -427,6 +425,10 @@ retry:
if (type != ORPHAN_INO)
im->ino_num++;
}
+
+ if (type == FLUSH_INO)
+ f2fs_set_bit(devidx, (char *)&e->dirty_device);
+
spin_unlock(&im->ino_lock);
radix_tree_preload_end();
@@ -455,7 +457,7 @@ static void __remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
void add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
{
/* add new dirty ino entry into list */
- __add_ino_entry(sbi, ino, type);
+ __add_ino_entry(sbi, ino, 0, type);
}
void remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
@@ -481,7 +483,7 @@ void release_ino_entry(struct f2fs_sb_info *sbi, bool all)
struct ino_entry *e, *tmp;
int i;
- for (i = all ? ORPHAN_INO: APPEND_INO; i <= UPDATE_INO; i++) {
+ for (i = all ? ORPHAN_INO : APPEND_INO; i < MAX_INO_ENTRY; i++) {
struct inode_management *im = &sbi->im[i];
spin_lock(&im->ino_lock);
@@ -495,6 +497,27 @@ void release_ino_entry(struct f2fs_sb_info *sbi, bool all)
}
}
+void set_dirty_device(struct f2fs_sb_info *sbi, nid_t ino,
+ unsigned int devidx, int type)
+{
+ __add_ino_entry(sbi, ino, devidx, type);
+}
+
+bool is_dirty_device(struct f2fs_sb_info *sbi, nid_t ino,
+ unsigned int devidx, int type)
+{
+ struct inode_management *im = &sbi->im[type];
+ struct ino_entry *e;
+ bool is_dirty = false;
+
+ spin_lock(&im->ino_lock);
+ e = radix_tree_lookup(&im->ino_root, ino);
+ if (e && f2fs_test_bit(devidx, (char *)&e->dirty_device))
+ is_dirty = true;
+ spin_unlock(&im->ino_lock);
+ return is_dirty;
+}
+
int acquire_orphan_inode(struct f2fs_sb_info *sbi)
{
struct inode_management *im = &sbi->im[ORPHAN_INO];
@@ -531,7 +554,7 @@ void release_orphan_inode(struct f2fs_sb_info *sbi)
void add_orphan_inode(struct inode *inode)
{
/* add new orphan ino entry into list */
- __add_ino_entry(F2FS_I_SB(inode), inode->i_ino, ORPHAN_INO);
+ __add_ino_entry(F2FS_I_SB(inode), inode->i_ino, 0, ORPHAN_INO);
update_inode_page(inode);
}
@@ -555,7 +578,7 @@ static int recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
return err;
}
- __add_ino_entry(sbi, ino, ORPHAN_INO);
+ __add_ino_entry(sbi, ino, 0, ORPHAN_INO);
inode = f2fs_iget_retry(sbi->sb, ino);
if (IS_ERR(inode)) {
@@ -591,6 +614,9 @@ int recover_orphan_inodes(struct f2fs_sb_info *sbi)
block_t start_blk, orphan_blocks, i, j;
unsigned int s_flags = sbi->sb->s_flags;
int err = 0;
+#ifdef CONFIG_QUOTA
+ int quota_enabled;
+#endif
if (!is_set_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG))
return 0;
@@ -603,8 +629,9 @@ int recover_orphan_inodes(struct f2fs_sb_info *sbi)
#ifdef CONFIG_QUOTA
/* Needed for iput() to work correctly and not trash data */
sbi->sb->s_flags |= MS_ACTIVE;
+
/* Turn on quotas so that they are updated correctly */
- f2fs_enable_quota_files(sbi);
+ quota_enabled = f2fs_enable_quota_files(sbi, s_flags & MS_RDONLY);
#endif
start_blk = __start_cp_addr(sbi) + 1 + __cp_payload(sbi);
@@ -632,7 +659,8 @@ int recover_orphan_inodes(struct f2fs_sb_info *sbi)
out:
#ifdef CONFIG_QUOTA
/* Turn quotas off */
- f2fs_quota_off_umount(sbi->sb);
+ if (quota_enabled)
+ f2fs_quota_off_umount(sbi->sb);
#endif
sbi->sb->s_flags = s_flags; /* Restore MS_RDONLY status */
@@ -987,7 +1015,7 @@ int f2fs_sync_inode_meta(struct f2fs_sb_info *sbi)
update_inode_page(inode);
iput(inode);
}
- };
+ }
return 0;
}
@@ -1147,6 +1175,7 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
struct super_block *sb = sbi->sb;
struct curseg_info *seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
u64 kbytes_written;
+ int err;
/* Flush all the NAT/SIT pages */
while (get_pages(sbi, F2FS_DIRTY_META)) {
@@ -1240,6 +1269,11 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
if (unlikely(f2fs_cp_error(sbi)))
return -EIO;
+ /* flush all device cache */
+ err = f2fs_flush_device_cache(sbi);
+ if (err)
+ return err;
+
/* write out checkpoint buffer at block 0 */
update_meta_page(sbi, ckpt, start_blk++);
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index c8583d7a1845..cdccc429325b 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -172,7 +172,7 @@ static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
{
struct bio *bio;
- bio = f2fs_bio_alloc(npages);
+ bio = f2fs_bio_alloc(sbi, npages, true);
f2fs_target_device(sbi, blk_addr, bio);
bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io;
@@ -417,8 +417,8 @@ next:
bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;
- /* set submitted = 1 as a return value */
- fio->submitted = 1;
+ /* set submitted = true as a return value */
+ fio->submitted = true;
inc_page_count(sbi, WB_DATA_TYPE(bio_page));
@@ -472,7 +472,7 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
f2fs_wait_on_block_writeback(sbi, blkaddr);
}
- bio = bio_alloc(GFP_KERNEL, min_t(int, nr_pages, BIO_MAX_PAGES));
+ bio = f2fs_bio_alloc(sbi, min_t(int, nr_pages, BIO_MAX_PAGES), false);
if (!bio) {
if (ctx)
fscrypt_release_ctx(ctx);
@@ -832,6 +832,13 @@ int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
struct f2fs_map_blocks map;
int err = 0;
+ /* convert inline data for Direct I/O*/
+ if (iocb->ki_flags & IOCB_DIRECT) {
+ err = f2fs_convert_inline_inode(inode);
+ if (err)
+ return err;
+ }
+
if (is_inode_flag_set(inode, FI_NO_PREALLOC))
return 0;
@@ -844,15 +851,11 @@ int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
map.m_next_pgofs = NULL;
- if (iocb->ki_flags & IOCB_DIRECT) {
- err = f2fs_convert_inline_inode(inode);
- if (err)
- return err;
+ if (iocb->ki_flags & IOCB_DIRECT)
return f2fs_map_blocks(inode, &map, 1,
__force_buffered_io(inode, WRITE) ?
F2FS_GET_BLOCK_PRE_AIO :
F2FS_GET_BLOCK_PRE_DIO);
- }
if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA(inode)) {
err = f2fs_convert_inline_inode(inode);
if (err)
@@ -1332,7 +1335,7 @@ static int f2fs_read_data_pages(struct file *file,
struct address_space *mapping,
struct list_head *pages, unsigned nr_pages)
{
- struct inode *inode = file->f_mapping->host;
+ struct inode *inode = mapping->host;
struct page *page = list_last_entry(pages, struct page, lru);
trace_f2fs_readpages(inode, page, nr_pages);
@@ -1493,6 +1496,7 @@ static int __write_data_page(struct page *page, bool *submitted,
int err = 0;
struct f2fs_io_info fio = {
.sbi = sbi,
+ .ino = inode->i_ino,
.type = DATA,
.op = REQ_OP_WRITE,
.op_flags = wbc_to_write_flags(wbc),
@@ -1564,8 +1568,11 @@ write:
err = do_write_data_page(&fio);
}
}
+
+ down_write(&F2FS_I(inode)->i_sem);
if (F2FS_I(inode)->last_disk_size < psize)
F2FS_I(inode)->last_disk_size = psize;
+ up_write(&F2FS_I(inode)->i_sem);
done:
if (err && err != -ENOENT)
@@ -1945,6 +1952,12 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
}
trace_f2fs_write_begin(inode, pos, len, flags);
+ if (f2fs_is_atomic_file(inode) &&
+ !available_free_memory(sbi, INMEM_PAGES)) {
+ err = -ENOMEM;
+ goto fail;
+ }
+
/*
* We should check this at this moment to avoid deadlock on inode page
* and #0 page. The locking rule for inline_data conversion should be:
@@ -1960,7 +1973,8 @@ repeat:
* Do not use grab_cache_page_write_begin() to avoid deadlock due to
* wait_for_stable_page. Will wait that below with our IO control.
*/
- page = grab_cache_page(mapping, index);
+ page = f2fs_pagecache_get_page(mapping, index,
+ FGP_LOCK | FGP_WRITE | FGP_CREAT, GFP_NOFS);
if (!page) {
err = -ENOMEM;
goto fail;
@@ -2021,6 +2035,8 @@ repeat:
fail:
f2fs_put_page(page, 1);
f2fs_write_failed(mapping, pos + len);
+ if (f2fs_is_atomic_file(inode))
+ drop_inmem_pages_all(sbi);
return err;
}
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
index 87f449845f5f..ecada8425268 100644
--- a/fs/f2fs/debug.c
+++ b/fs/f2fs/debug.c
@@ -45,9 +45,18 @@ static void update_general_status(struct f2fs_sb_info *sbi)
si->ndirty_dent = get_pages(sbi, F2FS_DIRTY_DENTS);
si->ndirty_meta = get_pages(sbi, F2FS_DIRTY_META);
si->ndirty_data = get_pages(sbi, F2FS_DIRTY_DATA);
+ si->ndirty_qdata = get_pages(sbi, F2FS_DIRTY_QDATA);
si->ndirty_imeta = get_pages(sbi, F2FS_DIRTY_IMETA);
si->ndirty_dirs = sbi->ndirty_inode[DIR_INODE];
si->ndirty_files = sbi->ndirty_inode[FILE_INODE];
+
+ si->nquota_files = 0;
+ if (f2fs_sb_has_quota_ino(sbi->sb)) {
+ for (i = 0; i < MAXQUOTAS; i++) {
+ if (f2fs_qf_ino(sbi->sb, i))
+ si->nquota_files++;
+ }
+ }
si->ndirty_all = sbi->ndirty_inode[DIRTY_META];
si->inmem_pages = get_pages(sbi, F2FS_INMEM_PAGES);
si->aw_cnt = atomic_read(&sbi->aw_cnt);
@@ -61,6 +70,8 @@ static void update_general_status(struct f2fs_sb_info *sbi)
atomic_read(&SM_I(sbi)->fcc_info->issued_flush);
si->nr_flushing =
atomic_read(&SM_I(sbi)->fcc_info->issing_flush);
+ si->flush_list_empty =
+ llist_empty(&SM_I(sbi)->fcc_info->issue_list);
}
if (SM_I(sbi) && SM_I(sbi)->dcc_info) {
si->nr_discarded =
@@ -96,9 +107,9 @@ static void update_general_status(struct f2fs_sb_info *sbi)
si->dirty_nats = NM_I(sbi)->dirty_nat_cnt;
si->sits = MAIN_SEGS(sbi);
si->dirty_sits = SIT_I(sbi)->dirty_sentries;
- si->free_nids = NM_I(sbi)->nid_cnt[FREE_NID_LIST];
+ si->free_nids = NM_I(sbi)->nid_cnt[FREE_NID];
si->avail_nids = NM_I(sbi)->available_nids;
- si->alloc_nids = NM_I(sbi)->nid_cnt[ALLOC_NID_LIST];
+ si->alloc_nids = NM_I(sbi)->nid_cnt[PREALLOC_NID];
si->bg_gc = sbi->bg_gc;
si->util_free = (int)(free_user_blocks(sbi) >> sbi->log_blocks_per_seg)
* 100 / (int)(sbi->user_block_count >> sbi->log_blocks_per_seg)
@@ -231,14 +242,14 @@ get_cache:
}
/* free nids */
- si->cache_mem += (NM_I(sbi)->nid_cnt[FREE_NID_LIST] +
- NM_I(sbi)->nid_cnt[ALLOC_NID_LIST]) *
+ si->cache_mem += (NM_I(sbi)->nid_cnt[FREE_NID] +
+ NM_I(sbi)->nid_cnt[PREALLOC_NID]) *
sizeof(struct free_nid);
si->cache_mem += NM_I(sbi)->nat_cnt * sizeof(struct nat_entry);
si->cache_mem += NM_I(sbi)->dirty_nat_cnt *
sizeof(struct nat_entry_set);
si->cache_mem += si->inmem_pages * sizeof(struct inmem_pages);
- for (i = 0; i <= ORPHAN_INO; i++)
+ for (i = 0; i < MAX_INO_ENTRY; i++)
si->cache_mem += sbi->im[i].ino_num * sizeof(struct ino_entry);
si->cache_mem += atomic_read(&sbi->total_ext_tree) *
sizeof(struct extent_tree);
@@ -262,9 +273,10 @@ static int stat_show(struct seq_file *s, void *v)
list_for_each_entry(si, &f2fs_stat_list, stat_list) {
update_general_status(si->sbi);
- seq_printf(s, "\n=====[ partition info(%pg). #%d, %s]=====\n",
+ seq_printf(s, "\n=====[ partition info(%pg). #%d, %s, CP: %s]=====\n",
si->sbi->sb->s_bdev, i++,
- f2fs_readonly(si->sbi->sb) ? "RO": "RW");
+ f2fs_readonly(si->sbi->sb) ? "RO": "RW",
+ f2fs_cp_error(si->sbi) ? "Error": "Good");
seq_printf(s, "[SB: 1] [CP: 2] [SIT: %d] [NAT: %d] ",
si->sit_area_segs, si->nat_area_segs);
seq_printf(s, "[SSA: %d] [MAIN: %d",
@@ -349,10 +361,11 @@ static int stat_show(struct seq_file *s, void *v)
seq_printf(s, " - Inner Struct Count: tree: %d(%d), node: %d\n",
si->ext_tree, si->zombie_tree, si->ext_node);
seq_puts(s, "\nBalancing F2FS Async:\n");
- seq_printf(s, " - IO (CP: %4d, Data: %4d, Flush: (%4d %4d), "
+ seq_printf(s, " - IO (CP: %4d, Data: %4d, Flush: (%4d %4d %4d), "
"Discard: (%4d %4d)) cmd: %4d undiscard:%4u\n",
si->nr_wb_cp_data, si->nr_wb_data,
si->nr_flushing, si->nr_flushed,
+ si->flush_list_empty,
si->nr_discarding, si->nr_discarded,
si->nr_discard_cmd, si->undiscard_blks);
seq_printf(s, " - inmem: %4d, atomic IO: %4d (Max. %4d), "
@@ -365,6 +378,8 @@ static int stat_show(struct seq_file *s, void *v)
si->ndirty_dent, si->ndirty_dirs, si->ndirty_all);
seq_printf(s, " - datas: %4d in files:%4d\n",
si->ndirty_data, si->ndirty_files);
+ seq_printf(s, " - quota datas: %4d in quota files:%4d\n",
+ si->ndirty_qdata, si->nquota_files);
seq_printf(s, " - meta: %4d in %4d\n",
si->ndirty_meta, si->meta_pages);
seq_printf(s, " - imeta: %4d\n",
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index 4f2a8fedb313..1955707b138b 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -10,10 +10,12 @@
*/
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
+#include <linux/sched.h>
#include "f2fs.h"
#include "node.h"
#include "acl.h"
#include "xattr.h"
+#include <trace/events/f2fs.h>
static unsigned long dir_blocks(struct inode *inode)
{
@@ -847,6 +849,7 @@ static int f2fs_readdir(struct file *file, struct dir_context *ctx)
struct f2fs_dentry_block *dentry_blk = NULL;
struct page *dentry_page = NULL;
struct file_ra_state *ra = &file->f_ra;
+ loff_t start_pos = ctx->pos;
unsigned int n = ((unsigned long)ctx->pos / NR_DENTRY_IN_BLOCK);
struct f2fs_dentry_ptr d;
struct fscrypt_str fstr = FSTR_INIT(NULL, 0);
@@ -855,24 +858,32 @@ static int f2fs_readdir(struct file *file, struct dir_context *ctx)
if (f2fs_encrypted_inode(inode)) {
err = fscrypt_get_encryption_info(inode);
if (err && err != -ENOKEY)
- return err;
+ goto out;
err = fscrypt_fname_alloc_buffer(inode, F2FS_NAME_LEN, &fstr);
if (err < 0)
- return err;
+ goto out;
}
if (f2fs_has_inline_dentry(inode)) {
err = f2fs_read_inline_dir(file, ctx, &fstr);
- goto out;
+ goto out_free;
}
- /* readahead for multi pages of dir */
- if (npages - n > 1 && !ra_has_index(ra, n))
- page_cache_sync_readahead(inode->i_mapping, ra, file, n,
+ for (; n < npages; n++, ctx->pos = n * NR_DENTRY_IN_BLOCK) {
+
+ /* allow readdir() to be interrupted */
+ if (fatal_signal_pending(current)) {
+ err = -ERESTARTSYS;
+ goto out_free;
+ }
+ cond_resched();
+
+ /* readahead for multi pages of dir */
+ if (npages - n > 1 && !ra_has_index(ra, n))
+ page_cache_sync_readahead(inode->i_mapping, ra, file, n,
min(npages - n, (pgoff_t)MAX_DIR_RA_PAGES));
- for (; n < npages; n++) {
dentry_page = get_lock_data_page(inode, n, false);
if (IS_ERR(dentry_page)) {
err = PTR_ERR(dentry_page);
@@ -880,7 +891,7 @@ static int f2fs_readdir(struct file *file, struct dir_context *ctx)
err = 0;
continue;
} else {
- goto out;
+ goto out_free;
}
}
@@ -896,12 +907,13 @@ static int f2fs_readdir(struct file *file, struct dir_context *ctx)
break;
}
- ctx->pos = (n + 1) * NR_DENTRY_IN_BLOCK;
kunmap(dentry_page);
f2fs_put_page(dentry_page, 1);
}
-out:
+out_free:
fscrypt_fname_free_buffer(&fstr);
+out:
+ trace_f2fs_readdir(inode, start_pos, ctx->pos, err);
return err < 0 ? err : 0;
}
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index c1a0aef8efc6..081ec493baae 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -47,6 +47,8 @@
enum {
FAULT_KMALLOC,
FAULT_PAGE_ALLOC,
+ FAULT_PAGE_GET,
+ FAULT_ALLOC_BIO,
FAULT_ALLOC_NID,
FAULT_ORPHAN,
FAULT_BLOCK,
@@ -94,6 +96,7 @@ extern char *fault_name[FAULT_MAX];
#define F2FS_MOUNT_GRPQUOTA 0x00100000
#define F2FS_MOUNT_PRJQUOTA 0x00200000
#define F2FS_MOUNT_QUOTA 0x00400000
+#define F2FS_MOUNT_INLINE_XATTR_SIZE 0x00800000
#define clear_opt(sbi, option) ((sbi)->mount_opt.opt &= ~F2FS_MOUNT_##option)
#define set_opt(sbi, option) ((sbi)->mount_opt.opt |= F2FS_MOUNT_##option)
@@ -119,6 +122,8 @@ struct f2fs_mount_info {
#define F2FS_FEATURE_EXTRA_ATTR 0x0008
#define F2FS_FEATURE_PRJQUOTA 0x0010
#define F2FS_FEATURE_INODE_CHKSUM 0x0020
+#define F2FS_FEATURE_FLEXIBLE_INLINE_XATTR 0x0040
+#define F2FS_FEATURE_QUOTA_INO 0x0080
#define F2FS_HAS_FEATURE(sb, mask) \
((F2FS_SB(sb)->raw_super->feature & cpu_to_le32(mask)) != 0)
@@ -214,7 +219,7 @@ enum {
#define BATCHED_TRIM_BLOCKS(sbi) \
(BATCHED_TRIM_SEGMENTS(sbi) << (sbi)->log_blocks_per_seg)
#define MAX_DISCARD_BLOCKS(sbi) BLKS_PER_SEC(sbi)
-#define DISCARD_ISSUE_RATE 8
+#define DEF_MAX_DISCARD_REQUEST 8 /* issue 8 discards per round */
#define DEF_MIN_DISCARD_ISSUE_TIME 50 /* 50 ms, if exists */
#define DEF_MAX_DISCARD_ISSUE_TIME 60000 /* 60 s, if no candidates */
#define DEF_CP_INTERVAL 60 /* 60 secs */
@@ -225,7 +230,6 @@ struct cp_control {
__u64 trim_start;
__u64 trim_end;
__u64 trim_minlen;
- __u64 trimmed;
};
/*
@@ -244,12 +248,14 @@ enum {
ORPHAN_INO, /* for orphan ino list */
APPEND_INO, /* for append ino list */
UPDATE_INO, /* for update ino list */
+ FLUSH_INO, /* for multiple device flushing */
MAX_INO_ENTRY, /* max. list */
};
struct ino_entry {
- struct list_head list; /* list head */
- nid_t ino; /* inode number */
+ struct list_head list; /* list head */
+ nid_t ino; /* inode number */
+ unsigned int dirty_device; /* dirty device bitmap */
};
/* for the list of inodes to be GCed */
@@ -273,10 +279,6 @@ struct discard_entry {
#define plist_idx(blk_num) ((blk_num) >= MAX_PLIST_NUM ? \
(MAX_PLIST_NUM - 1) : (blk_num - 1))
-#define P_ACTIVE 0x01
-#define P_TRIM 0x02
-#define plist_issue(tag) (((tag) & P_ACTIVE) || ((tag) & P_TRIM))
-
enum {
D_PREP,
D_SUBMIT,
@@ -308,12 +310,32 @@ struct discard_cmd {
int error; /* bio error */
};
+enum {
+ DPOLICY_BG,
+ DPOLICY_FORCE,
+ DPOLICY_FSTRIM,
+ DPOLICY_UMOUNT,
+ MAX_DPOLICY,
+};
+
+struct discard_policy {
+ int type; /* type of discard */
+ unsigned int min_interval; /* used for candidates exist */
+ unsigned int max_interval; /* used for candidates not exist */
+ unsigned int max_requests; /* # of discards issued per round */
+ unsigned int io_aware_gran; /* minimum granularity discard not be aware of I/O */
+ bool io_aware; /* issue discard in idle time */
+ bool sync; /* submit discard with REQ_SYNC flag */
+ unsigned int granularity; /* discard granularity */
+};
+
struct discard_cmd_control {
struct task_struct *f2fs_issue_discard; /* discard thread */
struct list_head entry_list; /* 4KB discard entry list */
struct list_head pend_list[MAX_PLIST_NUM];/* store pending entries */
unsigned char pend_list_tag[MAX_PLIST_NUM];/* tag for pending entries */
struct list_head wait_list; /* store on-flushing entries */
+ struct list_head fstrim_list; /* in-flight discard from fstrim */
wait_queue_head_t discard_wait_queue; /* waiting queue for wake-up */
unsigned int discard_wake; /* to wake up discard thread */
struct mutex cmd_lock;
@@ -443,11 +465,14 @@ struct f2fs_flush_device {
/* for inline stuff */
#define DEF_INLINE_RESERVED_SIZE 1
+#define DEF_MIN_INLINE_SIZE 1
static inline int get_extra_isize(struct inode *inode);
-#define MAX_INLINE_DATA(inode) (sizeof(__le32) * \
- (CUR_ADDRS_PER_INODE(inode) - \
- DEF_INLINE_RESERVED_SIZE - \
- F2FS_INLINE_XATTR_ADDRS))
+static inline int get_inline_xattr_addrs(struct inode *inode);
+#define F2FS_INLINE_XATTR_ADDRS(inode) get_inline_xattr_addrs(inode)
+#define MAX_INLINE_DATA(inode) (sizeof(__le32) * \
+ (CUR_ADDRS_PER_INODE(inode) - \
+ F2FS_INLINE_XATTR_ADDRS(inode) - \
+ DEF_INLINE_RESERVED_SIZE))
/* for inline dir */
#define NR_INLINE_DENTRY(inode) (MAX_INLINE_DATA(inode) * BITS_PER_BYTE / \
@@ -647,6 +672,7 @@ struct f2fs_inode_info {
#endif
struct list_head dirty_list; /* dirty list for dirs and files */
struct list_head gdirty_list; /* linked in global dirty list */
+ struct list_head inmem_ilist; /* list for inmem inodes */
struct list_head inmem_pages; /* inmemory pages managed by f2fs */
struct task_struct *inmem_task; /* store inmemory task */
struct mutex inmem_lock; /* lock for inmemory pages */
@@ -657,6 +683,7 @@ struct f2fs_inode_info {
int i_extra_isize; /* size of extra space located in i_addr */
kprojid_t i_projid; /* id for project quota */
+ int i_inline_xattr_size; /* inline xattr size */
};
static inline void get_extent_info(struct extent_info *ext,
@@ -730,10 +757,13 @@ static inline void __try_update_largest_extent(struct inode *inode,
}
}
-enum nid_list {
- FREE_NID_LIST,
- ALLOC_NID_LIST,
- MAX_NID_LIST,
+/*
+ * For free nid management
+ */
+enum nid_state {
+ FREE_NID, /* newly added to free nid list */
+ PREALLOC_NID, /* it is preallocated */
+ MAX_NID_STATE,
};
struct f2fs_nm_info {
@@ -756,8 +786,8 @@ struct f2fs_nm_info {
/* free node ids management */
struct radix_tree_root free_nid_root;/* root of the free_nid cache */
- struct list_head nid_list[MAX_NID_LIST];/* lists for free nids */
- unsigned int nid_cnt[MAX_NID_LIST]; /* the number of free node id */
+ struct list_head free_nid_list; /* list for free nids excluding preallocated nids */
+ unsigned int nid_cnt[MAX_NID_STATE]; /* the number of free node id */
spinlock_t nid_list_lock; /* protect nid lists ops */
struct mutex build_lock; /* lock for build free nids */
unsigned char (*free_nid_bitmap)[NAT_ENTRY_BITMAP_SIZE];
@@ -835,6 +865,7 @@ enum {
struct flush_cmd {
struct completion wait;
struct llist_node llnode;
+ nid_t ino;
int ret;
};
@@ -853,6 +884,8 @@ struct f2fs_sm_info {
struct dirty_seglist_info *dirty_info; /* dirty segment information */
struct curseg_info *curseg_array; /* active segment information */
+ struct rw_semaphore curseg_lock; /* for preventing curseg change */
+
block_t seg0_blkaddr; /* block address of 0'th segment */
block_t main_blkaddr; /* start block address of main area */
block_t ssa_blkaddr; /* start block address of SSA area */
@@ -874,6 +907,7 @@ struct f2fs_sm_info {
unsigned int min_ipu_util; /* in-place-update threshold */
unsigned int min_fsync_blocks; /* threshold for fsync */
unsigned int min_hot_blocks; /* threshold for hot block allocation */
+ unsigned int min_ssr_sections; /* threshold to trigger SSR allocation */
/* for flush command control */
struct flush_cmd_control *fcc_info;
@@ -895,6 +929,7 @@ struct f2fs_sm_info {
enum count_type {
F2FS_DIRTY_DENTS,
F2FS_DIRTY_DATA,
+ F2FS_DIRTY_QDATA,
F2FS_DIRTY_NODES,
F2FS_DIRTY_META,
F2FS_INMEM_PAGES,
@@ -943,6 +978,18 @@ enum need_lock_type {
LOCK_RETRY,
};
+enum cp_reason_type {
+ CP_NO_NEEDED,
+ CP_NON_REGULAR,
+ CP_HARDLINK,
+ CP_SB_NEED_CP,
+ CP_WRONG_PINO,
+ CP_NO_SPC_ROLL,
+ CP_NODE_NEED_CP,
+ CP_FASTBOOT_MODE,
+ CP_SPEC_LOG_NUM,
+};
+
enum iostat_type {
APP_DIRECT_IO, /* app direct IOs */
APP_BUFFERED_IO, /* app buffered IOs */
@@ -962,6 +1009,7 @@ enum iostat_type {
struct f2fs_io_info {
struct f2fs_sb_info *sbi; /* f2fs_sb_info pointer */
+ nid_t ino; /* inode number */
enum page_type type; /* contains DATA/NODE/META/META_FLUSH */
enum temp_type temp; /* contains HOT/WARM/COLD */
int op; /* contains REQ_OP_ */
@@ -1006,6 +1054,7 @@ enum inode_type {
DIR_INODE, /* for dirty dir inode */
FILE_INODE, /* for dirty regular/symlink inode */
DIRTY_META, /* for all dirtied inode metadata */
+ ATOMIC_FILE, /* for all atomic files */
NR_INODE_TYPE,
};
@@ -1108,12 +1157,15 @@ struct f2fs_sb_info {
loff_t max_file_blocks; /* max block index of file */
int active_logs; /* # of active logs */
int dir_level; /* directory level */
+ int inline_xattr_size; /* inline xattr size */
+ unsigned int trigger_ssr_threshold; /* threshold to trigger ssr */
block_t user_block_count; /* # of user blocks */
block_t total_valid_block_count; /* # of valid blocks */
block_t discard_blks; /* discard command candidats */
block_t last_valid_block_count; /* for recovery */
block_t reserved_blocks; /* configurable reserved blocks */
+ block_t current_reserved_blocks; /* current reserved blocks */
u32 s_next_generation; /* for NFS support */
@@ -1179,6 +1231,8 @@ struct f2fs_sb_info {
struct list_head s_list;
int s_ndevs; /* number of devices */
struct f2fs_dev_info *devs; /* for device list */
+ unsigned int dirty_device; /* for checkpoint data flush */
+ spinlock_t dev_lock; /* protect dirty_device */
struct mutex umount_mutex;
unsigned int shrinker_run_no;
@@ -1242,8 +1296,7 @@ static inline void f2fs_update_time(struct f2fs_sb_info *sbi, int type)
static inline bool f2fs_time_over(struct f2fs_sb_info *sbi, int type)
{
- struct timespec ts = {sbi->interval_time[type], 0};
- unsigned long interval = timespec_to_jiffies(&ts);
+ unsigned long interval = sbi->interval_time[type] * HZ;
return time_after(jiffies, sbi->last_time[type] + interval);
}
@@ -1410,6 +1463,13 @@ static inline unsigned long long cur_cp_version(struct f2fs_checkpoint *cp)
return le64_to_cpu(cp->checkpoint_ver);
}
+static inline unsigned long f2fs_qf_ino(struct super_block *sb, int type)
+{
+ if (type < F2FS_MAX_QUOTAS)
+ return le32_to_cpu(F2FS_SB(sb)->raw_super->qf_ino[type]);
+ return 0;
+}
+
static inline __u64 cur_cp_crc(struct f2fs_checkpoint *cp)
{
size_t crc_offset = le32_to_cpu(cp->checksum_offset);
@@ -1588,7 +1648,8 @@ static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
spin_lock(&sbi->stat_lock);
sbi->total_valid_block_count += (block_t)(*count);
- avail_user_block_count = sbi->user_block_count - sbi->reserved_blocks;
+ avail_user_block_count = sbi->user_block_count -
+ sbi->current_reserved_blocks;
if (unlikely(sbi->total_valid_block_count > avail_user_block_count)) {
diff = sbi->total_valid_block_count - avail_user_block_count;
*count -= diff;
@@ -1622,6 +1683,10 @@ static inline void dec_valid_block_count(struct f2fs_sb_info *sbi,
f2fs_bug_on(sbi, sbi->total_valid_block_count < (block_t) count);
f2fs_bug_on(sbi, inode->i_blocks < sectors);
sbi->total_valid_block_count -= (block_t)count;
+ if (sbi->reserved_blocks &&
+ sbi->current_reserved_blocks < sbi->reserved_blocks)
+ sbi->current_reserved_blocks = min(sbi->reserved_blocks,
+ sbi->current_reserved_blocks + count);
spin_unlock(&sbi->stat_lock);
f2fs_i_blocks_write(inode, count, false, true);
}
@@ -1642,6 +1707,8 @@ static inline void inode_inc_dirty_pages(struct inode *inode)
atomic_inc(&F2FS_I(inode)->dirty_pages);
inc_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ?
F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA);
+ if (IS_NOQUOTA(inode))
+ inc_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA);
}
static inline void dec_page_count(struct f2fs_sb_info *sbi, int count_type)
@@ -1658,6 +1725,8 @@ static inline void inode_dec_dirty_pages(struct inode *inode)
atomic_dec(&F2FS_I(inode)->dirty_pages);
dec_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ?
F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA);
+ if (IS_NOQUOTA(inode))
+ dec_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA);
}
static inline s64 get_pages(struct f2fs_sb_info *sbi, int count_type)
@@ -1765,10 +1834,17 @@ static inline int inc_valid_node_count(struct f2fs_sb_info *sbi,
return ret;
}
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ if (time_to_inject(sbi, FAULT_BLOCK)) {
+ f2fs_show_injection_info(FAULT_BLOCK);
+ goto enospc;
+ }
+#endif
+
spin_lock(&sbi->stat_lock);
valid_block_count = sbi->total_valid_block_count + 1;
- if (unlikely(valid_block_count + sbi->reserved_blocks >
+ if (unlikely(valid_block_count + sbi->current_reserved_blocks >
sbi->user_block_count)) {
spin_unlock(&sbi->stat_lock);
goto enospc;
@@ -1811,6 +1887,9 @@ static inline void dec_valid_node_count(struct f2fs_sb_info *sbi,
sbi->total_valid_node_count--;
sbi->total_valid_block_count--;
+ if (sbi->reserved_blocks &&
+ sbi->current_reserved_blocks < sbi->reserved_blocks)
+ sbi->current_reserved_blocks++;
spin_unlock(&sbi->stat_lock);
@@ -1857,6 +1936,19 @@ static inline struct page *f2fs_grab_cache_page(struct address_space *mapping,
return grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS);
}
+static inline struct page *f2fs_pagecache_get_page(
+ struct address_space *mapping, pgoff_t index,
+ int fgp_flags, gfp_t gfp_mask)
+{
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_GET)) {
+ f2fs_show_injection_info(FAULT_PAGE_GET);
+ return NULL;
+ }
+#endif
+ return pagecache_get_page(mapping, index, fgp_flags, gfp_mask);
+}
+
static inline void f2fs_copy_page(struct page *src, struct page *dst)
{
char *src_kaddr = kmap(src);
@@ -1906,15 +1998,25 @@ static inline void *f2fs_kmem_cache_alloc(struct kmem_cache *cachep,
return entry;
}
-static inline struct bio *f2fs_bio_alloc(int npages)
+static inline struct bio *f2fs_bio_alloc(struct f2fs_sb_info *sbi,
+ int npages, bool no_fail)
{
struct bio *bio;
- /* No failure on bio allocation */
- bio = bio_alloc(GFP_NOIO, npages);
- if (!bio)
- bio = bio_alloc(GFP_NOIO | __GFP_NOFAIL, npages);
- return bio;
+ if (no_fail) {
+ /* No failure on bio allocation */
+ bio = bio_alloc(GFP_NOIO, npages);
+ if (!bio)
+ bio = bio_alloc(GFP_NOIO | __GFP_NOFAIL, npages);
+ return bio;
+ }
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ if (time_to_inject(sbi, FAULT_ALLOC_BIO)) {
+ f2fs_show_injection_info(FAULT_ALLOC_BIO);
+ return NULL;
+ }
+#endif
+ return bio_alloc(GFP_KERNEL, npages);
}
static inline void f2fs_radix_tree_insert(struct radix_tree_root *root,
@@ -2224,25 +2326,20 @@ static inline int f2fs_has_inline_xattr(struct inode *inode)
static inline unsigned int addrs_per_inode(struct inode *inode)
{
- if (f2fs_has_inline_xattr(inode))
- return CUR_ADDRS_PER_INODE(inode) - F2FS_INLINE_XATTR_ADDRS;
- return CUR_ADDRS_PER_INODE(inode);
+ return CUR_ADDRS_PER_INODE(inode) - F2FS_INLINE_XATTR_ADDRS(inode);
}
-static inline void *inline_xattr_addr(struct page *page)
+static inline void *inline_xattr_addr(struct inode *inode, struct page *page)
{
struct f2fs_inode *ri = F2FS_INODE(page);
return (void *)&(ri->i_addr[DEF_ADDRS_PER_INODE -
- F2FS_INLINE_XATTR_ADDRS]);
+ F2FS_INLINE_XATTR_ADDRS(inode)]);
}
static inline int inline_xattr_size(struct inode *inode)
{
- if (f2fs_has_inline_xattr(inode))
- return F2FS_INLINE_XATTR_ADDRS << 2;
- else
- return 0;
+ return get_inline_xattr_addrs(inode) * sizeof(__le32);
}
static inline int f2fs_has_inline_data(struct inode *inode)
@@ -2323,9 +2420,10 @@ static inline void clear_file(struct inode *inode, int type)
static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync)
{
+ bool ret;
+
if (dsync) {
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- bool ret;
spin_lock(&sbi->inode_lock[DIRTY_META]);
ret = list_empty(&F2FS_I(inode)->gdirty_list);
@@ -2336,9 +2434,15 @@ static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync)
file_keep_isize(inode) ||
i_size_read(inode) & PAGE_MASK)
return false;
- return F2FS_I(inode)->last_disk_size == i_size_read(inode);
+
+ down_read(&F2FS_I(inode)->i_sem);
+ ret = F2FS_I(inode)->last_disk_size == i_size_read(inode);
+ up_read(&F2FS_I(inode)->i_sem);
+
+ return ret;
}
+#define sb_rdonly f2fs_readonly
static inline int f2fs_readonly(struct super_block *sb)
{
return sb->s_flags & MS_RDONLY;
@@ -2406,6 +2510,12 @@ static inline int get_extra_isize(struct inode *inode)
return F2FS_I(inode)->i_extra_isize / sizeof(__le32);
}
+static inline int f2fs_sb_has_flexible_inline_xattr(struct super_block *sb);
+static inline int get_inline_xattr_addrs(struct inode *inode)
+{
+ return F2FS_I(inode)->i_inline_xattr_size;
+}
+
#define get_inode_mode(i) \
((is_inode_flag_set(i, FI_ACL_MODE)) ? \
(F2FS_I(i)->i_acl_mode) : ((i)->i_mode))
@@ -2534,7 +2644,7 @@ static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode)
*/
int f2fs_inode_dirtied(struct inode *inode, bool sync);
void f2fs_inode_synced(struct inode *inode);
-void f2fs_enable_quota_files(struct f2fs_sb_info *sbi);
+int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly);
void f2fs_quota_off_umount(struct super_block *sb);
int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover);
int f2fs_sync_fs(struct super_block *sb, int sync);
@@ -2562,7 +2672,7 @@ void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni);
pgoff_t get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs);
int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode);
int truncate_inode_blocks(struct inode *inode, pgoff_t from);
-int truncate_xattr_node(struct inode *inode, struct page *page);
+int truncate_xattr_node(struct inode *inode);
int wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino);
int remove_inode_page(struct inode *inode);
struct page *new_inode_page(struct inode *inode);
@@ -2597,19 +2707,22 @@ void destroy_node_manager_caches(void);
*/
bool need_SSR(struct f2fs_sb_info *sbi);
void register_inmem_page(struct inode *inode, struct page *page);
+void drop_inmem_pages_all(struct f2fs_sb_info *sbi);
void drop_inmem_pages(struct inode *inode);
void drop_inmem_page(struct inode *inode, struct page *page);
int commit_inmem_pages(struct inode *inode);
void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need);
void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi);
-int f2fs_issue_flush(struct f2fs_sb_info *sbi);
+int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino);
int create_flush_cmd_control(struct f2fs_sb_info *sbi);
+int f2fs_flush_device_cache(struct f2fs_sb_info *sbi);
void destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free);
void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr);
bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr);
-void refresh_sit_entry(struct f2fs_sb_info *sbi, block_t old, block_t new);
+void init_discard_policy(struct discard_policy *dpolicy, int discard_type,
+ unsigned int granularity);
void stop_discard_thread(struct f2fs_sb_info *sbi);
-void f2fs_wait_discard_bios(struct f2fs_sb_info *sbi, bool umount);
+bool f2fs_wait_discard_bios(struct f2fs_sb_info *sbi);
void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc);
void release_discard_addrs(struct f2fs_sb_info *sbi);
int npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra);
@@ -2664,6 +2777,10 @@ void add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type);
void remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type);
void release_ino_entry(struct f2fs_sb_info *sbi, bool all);
bool exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode);
+void set_dirty_device(struct f2fs_sb_info *sbi, nid_t ino,
+ unsigned int devidx, int type);
+bool is_dirty_device(struct f2fs_sb_info *sbi, nid_t ino,
+ unsigned int devidx, int type);
int f2fs_sync_inode_meta(struct f2fs_sb_info *sbi);
int acquire_orphan_inode(struct f2fs_sb_info *sbi);
void release_orphan_inode(struct f2fs_sb_info *sbi);
@@ -2751,14 +2868,16 @@ struct f2fs_stat_info {
unsigned long long hit_largest, hit_cached, hit_rbtree;
unsigned long long hit_total, total_ext;
int ext_tree, zombie_tree, ext_node;
- int ndirty_node, ndirty_dent, ndirty_meta, ndirty_data, ndirty_imeta;
+ int ndirty_node, ndirty_dent, ndirty_meta, ndirty_imeta;
+ int ndirty_data, ndirty_qdata;
int inmem_pages;
- unsigned int ndirty_dirs, ndirty_files, ndirty_all;
+ unsigned int ndirty_dirs, ndirty_files, nquota_files, ndirty_all;
int nats, dirty_nats, sits, dirty_sits;
int free_nids, avail_nids, alloc_nids;
int total_count, utilization;
int bg_gc, nr_wb_cp_data, nr_wb_data;
- int nr_flushing, nr_flushed, nr_discarding, nr_discarded;
+ int nr_flushing, nr_flushed, flush_list_empty;
+ int nr_discarding, nr_discarded;
int nr_discard_cmd;
unsigned int undiscard_blks;
int inline_xattr, inline_inode, inline_dir, append, update, orphans;
@@ -3066,6 +3185,16 @@ static inline int f2fs_sb_has_inode_chksum(struct super_block *sb)
return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_INODE_CHKSUM);
}
+static inline int f2fs_sb_has_flexible_inline_xattr(struct super_block *sb)
+{
+ return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_FLEXIBLE_INLINE_XATTR);
+}
+
+static inline int f2fs_sb_has_quota_ino(struct super_block *sb)
+{
+ return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_QUOTA_INO);
+}
+
#ifdef CONFIG_BLK_DEV_ZONED
static inline int get_blkz_type(struct f2fs_sb_info *sbi,
struct block_device *bdev, block_t blkaddr)
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index a9e1655a6bf8..bfff53f658e1 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -56,6 +56,11 @@ static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
struct dnode_of_data dn;
int err;
+ if (unlikely(f2fs_cp_error(sbi))) {
+ err = -EIO;
+ goto err;
+ }
+
sb_start_pagefault(inode->i_sb);
f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
@@ -117,6 +122,7 @@ out_sem:
out:
sb_end_pagefault(inode->i_sb);
f2fs_update_time(sbi, REQ_TIME);
+err:
return block_page_mkwrite_return(err);
}
@@ -141,27 +147,29 @@ static int get_parent_ino(struct inode *inode, nid_t *pino)
return 1;
}
-static inline bool need_do_checkpoint(struct inode *inode)
+static inline enum cp_reason_type need_do_checkpoint(struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- bool need_cp = false;
+ enum cp_reason_type cp_reason = CP_NO_NEEDED;
- if (!S_ISREG(inode->i_mode) || inode->i_nlink != 1)
- need_cp = true;
+ if (!S_ISREG(inode->i_mode))
+ cp_reason = CP_NON_REGULAR;
+ else if (inode->i_nlink != 1)
+ cp_reason = CP_HARDLINK;
else if (is_sbi_flag_set(sbi, SBI_NEED_CP))
- need_cp = true;
+ cp_reason = CP_SB_NEED_CP;
else if (file_wrong_pino(inode))
- need_cp = true;
+ cp_reason = CP_WRONG_PINO;
else if (!space_for_roll_forward(sbi))
- need_cp = true;
+ cp_reason = CP_NO_SPC_ROLL;
else if (!is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
- need_cp = true;
+ cp_reason = CP_NODE_NEED_CP;
else if (test_opt(sbi, FASTBOOT))
- need_cp = true;
+ cp_reason = CP_FASTBOOT_MODE;
else if (sbi->active_logs == 2)
- need_cp = true;
+ cp_reason = CP_SPEC_LOG_NUM;
- return need_cp;
+ return cp_reason;
}
static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino)
@@ -196,7 +204,7 @@ static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
nid_t ino = inode->i_ino;
int ret = 0;
- bool need_cp = false;
+ enum cp_reason_type cp_reason = 0;
struct writeback_control wbc = {
.sync_mode = WB_SYNC_ALL,
.nr_to_write = LONG_MAX,
@@ -215,7 +223,7 @@ static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
clear_inode_flag(inode, FI_NEED_IPU);
if (ret) {
- trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret);
+ trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
return ret;
}
@@ -246,10 +254,10 @@ go_write:
* sudden-power-off.
*/
down_read(&F2FS_I(inode)->i_sem);
- need_cp = need_do_checkpoint(inode);
+ cp_reason = need_do_checkpoint(inode);
up_read(&F2FS_I(inode)->i_sem);
- if (need_cp) {
+ if (cp_reason) {
/* all the dirty node pages should be flushed for POR */
ret = f2fs_sync_fs(inode->i_sb, 1);
@@ -297,19 +305,24 @@ sync_nodes:
remove_ino_entry(sbi, ino, APPEND_INO);
clear_inode_flag(inode, FI_APPEND_WRITE);
flush_out:
- remove_ino_entry(sbi, ino, UPDATE_INO);
- clear_inode_flag(inode, FI_UPDATE_WRITE);
if (!atomic)
- ret = f2fs_issue_flush(sbi);
+ ret = f2fs_issue_flush(sbi, inode->i_ino);
+ if (!ret) {
+ remove_ino_entry(sbi, ino, UPDATE_INO);
+ clear_inode_flag(inode, FI_UPDATE_WRITE);
+ remove_ino_entry(sbi, ino, FLUSH_INO);
+ }
f2fs_update_time(sbi, REQ_TIME);
out:
- trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret);
+ trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
f2fs_trace_ios(NULL, 1);
return ret;
}
int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
{
+ if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
+ return -EIO;
return f2fs_do_sync_file(file, start, end, datasync, false);
}
@@ -446,6 +459,9 @@ static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
struct inode *inode = file_inode(file);
int err;
+ if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
+ return -EIO;
+
/* we don't need to use inline_data strictly */
err = f2fs_convert_inline_inode(inode);
if (err)
@@ -632,6 +648,9 @@ int f2fs_truncate(struct inode *inode)
{
int err;
+ if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
+ return -EIO;
+
if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
S_ISLNK(inode->i_mode)))
return 0;
@@ -667,7 +686,8 @@ int f2fs_getattr(struct vfsmount *mnt,
generic_fillattr(inode, stat);
/* we need to show initial sectors used for inline_data/dentries */
- if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode))
+ if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) ||
+ f2fs_has_inline_dentry(inode))
stat->blocks += (stat->size + 511) >> 9;
return 0;
@@ -709,6 +729,9 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
int err;
bool size_changed = false;
+ if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
+ return -EIO;
+
err = inode_change_ok(inode, attr);
if (err)
return err;
@@ -761,6 +784,10 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
inode->i_mtime = inode->i_ctime = current_time(inode);
}
+ down_write(&F2FS_I(inode)->i_sem);
+ F2FS_I(inode)->last_disk_size = i_size_read(inode);
+ up_write(&F2FS_I(inode)->i_sem);
+
size_changed = true;
}
@@ -834,7 +861,7 @@ int truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
err = get_dnode_of_data(&dn, pg_start, LOOKUP_NODE);
if (err) {
if (err == -ENOENT) {
- pg_start++;
+ pg_start = get_next_page_offset(&dn, pg_start);
continue;
}
return err;
@@ -1149,11 +1176,14 @@ static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
if (ret)
goto out;
+ /* avoid gc operation during block exchange */
+ down_write(&F2FS_I(inode)->dio_rwsem[WRITE]);
+
truncate_pagecache(inode, offset);
ret = f2fs_do_collapse(inode, pg_start, pg_end);
if (ret)
- goto out;
+ goto out_unlock;
/* write out all moved pages, if possible */
filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
@@ -1165,7 +1195,8 @@ static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
ret = truncate_blocks(inode, new_size, true);
if (!ret)
f2fs_i_size_write(inode, new_size);
-
+out_unlock:
+ up_write(&F2FS_I(inode)->dio_rwsem[WRITE]);
out:
up_write(&F2FS_I(inode)->i_mmap_sem);
return ret;
@@ -1348,6 +1379,9 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
if (ret)
goto out;
+ /* avoid gc operation during block exchange */
+ down_write(&F2FS_I(inode)->dio_rwsem[WRITE]);
+
truncate_pagecache(inode, offset);
pg_start = offset >> PAGE_SHIFT;
@@ -1375,6 +1409,8 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
if (!ret)
f2fs_i_size_write(inode, new_size);
+
+ up_write(&F2FS_I(inode)->dio_rwsem[WRITE]);
out:
up_write(&F2FS_I(inode)->i_mmap_sem);
return ret;
@@ -1424,8 +1460,12 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end;
}
- if (!(mode & FALLOC_FL_KEEP_SIZE) && i_size_read(inode) < new_size)
- f2fs_i_size_write(inode, new_size);
+ if (new_size > i_size_read(inode)) {
+ if (mode & FALLOC_FL_KEEP_SIZE)
+ file_set_keep_isize(inode);
+ else
+ f2fs_i_size_write(inode, new_size);
+ }
return err;
}
@@ -1436,6 +1476,9 @@ static long f2fs_fallocate(struct file *file, int mode,
struct inode *inode = file_inode(file);
long ret = 0;
+ if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
+ return -EIO;
+
/* f2fs only support ->fallocate for regular file */
if (!S_ISREG(inode->i_mode))
return -EINVAL;
@@ -1469,8 +1512,6 @@ static long f2fs_fallocate(struct file *file, int mode,
if (!ret) {
inode->i_mtime = inode->i_ctime = current_time(inode);
f2fs_mark_inode_dirty_sync(inode, false);
- if (mode & FALLOC_FL_KEEP_SIZE)
- file_set_keep_isize(inode);
f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
}
@@ -1864,6 +1905,9 @@ static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
{
struct inode *inode = file_inode(filp);
+ if (!f2fs_sb_has_crypto(inode->i_sb))
+ return -EOPNOTSUPP;
+
f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
return fscrypt_ioctl_set_policy(filp, (const void __user *)arg);
@@ -1871,6 +1915,8 @@ static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
{
+ if (!f2fs_sb_has_crypto(file_inode(filp)->i_sb))
+ return -EOPNOTSUPP;
return fscrypt_ioctl_get_policy(filp, (void __user *)arg);
}
@@ -2226,9 +2272,13 @@ static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
}
inode_lock(src);
+ down_write(&F2FS_I(src)->dio_rwsem[WRITE]);
if (src != dst) {
- if (!inode_trylock(dst)) {
- ret = -EBUSY;
+ ret = -EBUSY;
+ if (!inode_trylock(dst))
+ goto out;
+ if (!down_write_trylock(&F2FS_I(dst)->dio_rwsem[WRITE])) {
+ inode_unlock(dst);
goto out;
}
}
@@ -2288,9 +2338,12 @@ static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
}
f2fs_unlock_op(sbi);
out_unlock:
- if (src != dst)
+ if (src != dst) {
+ up_write(&F2FS_I(dst)->dio_rwsem[WRITE]);
inode_unlock(dst);
+ }
out:
+ up_write(&F2FS_I(src)->dio_rwsem[WRITE]);
inode_unlock(src);
return ret;
}
@@ -2412,6 +2465,9 @@ static int f2fs_ioc_get_features(struct file *filp, unsigned long arg)
long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
+ if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp)))))
+ return -EIO;
+
switch (cmd) {
case F2FS_IOC_GETFLAGS:
return f2fs_ioc_getflags(filp, arg);
@@ -2465,6 +2521,9 @@ static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
struct blk_plug plug;
ssize_t ret;
+ if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
+ return -EIO;
+
inode_lock(inode);
ret = generic_write_checks(iocb, from);
if (ret > 0) {
@@ -2475,6 +2534,7 @@ static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
err = f2fs_preallocate_blocks(iocb, from);
if (err) {
+ clear_inode_flag(inode, FI_NO_PREALLOC);
inode_unlock(inode);
return err;
}
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index bd16e6631cf3..be9fd616736b 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -267,16 +267,6 @@ static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
}
-static unsigned int get_greedy_cost(struct f2fs_sb_info *sbi,
- unsigned int segno)
-{
- unsigned int valid_blocks =
- get_valid_blocks(sbi, segno, true);
-
- return IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
- valid_blocks * 2 : valid_blocks;
-}
-
static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
unsigned int segno, struct victim_sel_policy *p)
{
@@ -285,7 +275,7 @@ static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
/* alloc_mode == LFS */
if (p->gc_mode == GC_GREEDY)
- return get_greedy_cost(sbi, segno);
+ return get_valid_blocks(sbi, segno, true);
else
return get_cb_cost(sbi, segno);
}
@@ -466,10 +456,10 @@ static int check_valid_map(struct f2fs_sb_info *sbi,
struct seg_entry *sentry;
int ret;
- mutex_lock(&sit_i->sentry_lock);
+ down_read(&sit_i->sentry_lock);
sentry = get_seg_entry(sbi, segno);
ret = f2fs_test_bit(offset, sentry->cur_valid_map);
- mutex_unlock(&sit_i->sentry_lock);
+ up_read(&sit_i->sentry_lock);
return ret;
}
@@ -608,6 +598,7 @@ static void move_data_block(struct inode *inode, block_t bidx,
{
struct f2fs_io_info fio = {
.sbi = F2FS_I_SB(inode),
+ .ino = inode->i_ino,
.type = DATA,
.temp = COLD,
.op = REQ_OP_READ,
@@ -659,8 +650,8 @@ static void move_data_block(struct inode *inode, block_t bidx,
allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
&sum, CURSEG_COLD_DATA, NULL, false);
- fio.encrypted_page = pagecache_get_page(META_MAPPING(fio.sbi), newaddr,
- FGP_LOCK | FGP_CREAT, GFP_NOFS);
+ fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
+ newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS);
if (!fio.encrypted_page) {
err = -ENOMEM;
goto recover_block;
@@ -738,6 +729,7 @@ static void move_data_page(struct inode *inode, block_t bidx, int gc_type,
} else {
struct f2fs_io_info fio = {
.sbi = F2FS_I_SB(inode),
+ .ino = inode->i_ino,
.type = DATA,
.temp = COLD,
.op = REQ_OP_WRITE,
@@ -840,10 +832,17 @@ next_step:
continue;
}
+ if (!down_write_trylock(
+ &F2FS_I(inode)->dio_rwsem[WRITE])) {
+ iput(inode);
+ continue;
+ }
+
start_bidx = start_bidx_of_node(nofs, inode);
data_page = get_read_data_page(inode,
start_bidx + ofs_in_node, REQ_RAHEAD,
true);
+ up_write(&F2FS_I(inode)->dio_rwsem[WRITE]);
if (IS_ERR(data_page)) {
iput(inode);
continue;
@@ -901,10 +900,10 @@ static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
struct sit_info *sit_i = SIT_I(sbi);
int ret;
- mutex_lock(&sit_i->sentry_lock);
+ down_write(&sit_i->sentry_lock);
ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type,
NO_CHECK_TYPE, LFS);
- mutex_unlock(&sit_i->sentry_lock);
+ up_write(&sit_i->sentry_lock);
return ret;
}
@@ -952,8 +951,8 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
/*
* this is to avoid deadlock:
* - lock_page(sum_page) - f2fs_replace_block
- * - check_valid_map() - mutex_lock(sentry_lock)
- * - mutex_lock(sentry_lock) - change_curseg()
+ * - check_valid_map() - down_write(sentry_lock)
+ * - down_read(sentry_lock) - change_curseg()
* - lock_page(sum_page)
*/
if (type == SUM_TYPE_NODE)
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index fbf22b0f667f..91d5d831be72 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -130,6 +130,7 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
{
struct f2fs_io_info fio = {
.sbi = F2FS_I_SB(dn->inode),
+ .ino = dn->inode->i_ino,
.type = DATA,
.op = REQ_OP_WRITE,
.op_flags = REQ_SYNC | REQ_NOIDLE | REQ_PRIO,
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index 50c88e37ed66..9684d53563f1 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -232,6 +232,23 @@ static int do_read_inode(struct inode *inode)
fi->i_extra_isize = f2fs_has_extra_attr(inode) ?
le16_to_cpu(ri->i_extra_isize) : 0;
+ if (f2fs_sb_has_flexible_inline_xattr(sbi->sb)) {
+ f2fs_bug_on(sbi, !f2fs_has_extra_attr(inode));
+ fi->i_inline_xattr_size = le16_to_cpu(ri->i_inline_xattr_size);
+ } else if (f2fs_has_inline_xattr(inode) ||
+ f2fs_has_inline_dentry(inode)) {
+ fi->i_inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS;
+ } else {
+
+ /*
+ * Previous inline data or directory always reserved 200 bytes
+ * in inode layout, even if inline_xattr is disabled. In order
+ * to keep inline_dentry's structure for backward compatibility,
+ * we get the space back only from inline_data.
+ */
+ fi->i_inline_xattr_size = 0;
+ }
+
/* check data exist */
if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode))
__recover_inline_status(inode, node_page);
@@ -384,6 +401,10 @@ int update_inode(struct inode *inode, struct page *node_page)
if (f2fs_has_extra_attr(inode)) {
ri->i_extra_isize = cpu_to_le16(F2FS_I(inode)->i_extra_isize);
+ if (f2fs_sb_has_flexible_inline_xattr(F2FS_I_SB(inode)->sb))
+ ri->i_inline_xattr_size =
+ cpu_to_le16(F2FS_I(inode)->i_inline_xattr_size);
+
if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)->sb) &&
F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
i_projid)) {
@@ -480,6 +501,7 @@ void f2fs_evict_inode(struct inode *inode)
remove_ino_entry(sbi, inode->i_ino, APPEND_INO);
remove_ino_entry(sbi, inode->i_ino, UPDATE_INO);
+ remove_ino_entry(sbi, inode->i_ino, FLUSH_INO);
sb_start_intwrite(inode->i_sb);
set_inode_flag(inode, FI_NO_ALLOC);
@@ -519,8 +541,10 @@ no_delete:
stat_dec_inline_dir(inode);
stat_dec_inline_inode(inode);
- if (!is_set_ckpt_flags(sbi, CP_ERROR_FLAG))
+ if (likely(!is_set_ckpt_flags(sbi, CP_ERROR_FLAG)))
f2fs_bug_on(sbi, is_inode_flag_set(inode, FI_DIRTY_INODE));
+ else
+ f2fs_inode_synced(inode);
/* ino == 0, if f2fs_new_inode() was failed t*/
if (inode->i_ino)
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index d92b8e9064cb..cf8f4370d256 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -29,6 +29,7 @@ static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode)
nid_t ino;
struct inode *inode;
bool nid_free = false;
+ int xattr_size = 0;
int err;
inode = new_inode(dir->i_sb);
@@ -86,11 +87,23 @@ static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode)
if (test_opt(sbi, INLINE_XATTR))
set_inode_flag(inode, FI_INLINE_XATTR);
+
if (test_opt(sbi, INLINE_DATA) && f2fs_may_inline_data(inode))
set_inode_flag(inode, FI_INLINE_DATA);
if (f2fs_may_inline_dentry(inode))
set_inode_flag(inode, FI_INLINE_DENTRY);
+ if (f2fs_sb_has_flexible_inline_xattr(sbi->sb)) {
+ f2fs_bug_on(sbi, !f2fs_has_extra_attr(inode));
+ if (f2fs_has_inline_xattr(inode))
+ xattr_size = sbi->inline_xattr_size;
+ /* Otherwise, will be 0 */
+ } else if (f2fs_has_inline_xattr(inode) ||
+ f2fs_has_inline_dentry(inode)) {
+ xattr_size = DEFAULT_INLINE_XATTR_ADDRS;
+ }
+ F2FS_I(inode)->i_inline_xattr_size = xattr_size;
+
f2fs_init_extent_tree(inode, NULL);
stat_inc_inline_xattr(inode);
@@ -177,6 +190,9 @@ static int f2fs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
nid_t ino = 0;
int err;
+ if (unlikely(f2fs_cp_error(sbi)))
+ return -EIO;
+
err = dquot_initialize(dir);
if (err)
return err;
@@ -221,6 +237,9 @@ static int f2fs_link(struct dentry *old_dentry, struct inode *dir,
struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
int err;
+ if (unlikely(f2fs_cp_error(sbi)))
+ return -EIO;
+
if (f2fs_encrypted_inode(dir) &&
!fscrypt_has_permitted_context(dir, inode))
return -EPERM;
@@ -331,12 +350,15 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry,
struct inode *inode = NULL;
struct f2fs_dir_entry *de;
struct page *page;
- nid_t ino;
+ struct dentry *new;
+ nid_t ino = -1;
int err = 0;
unsigned int root_ino = F2FS_ROOT_INO(F2FS_I_SB(dir));
+ trace_f2fs_lookup_start(dir, dentry, flags);
+
if (f2fs_encrypted_inode(dir)) {
- int res = fscrypt_get_encryption_info(dir);
+ err = fscrypt_get_encryption_info(dir);
/*
* DCACHE_ENCRYPTED_WITH_KEY is set if the dentry is
@@ -346,18 +368,22 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry,
if (fscrypt_has_encryption_key(dir))
fscrypt_set_encrypted_dentry(dentry);
fscrypt_set_d_op(dentry);
- if (res && res != -ENOKEY)
- return ERR_PTR(res);
+ if (err && err != -ENOKEY)
+ goto out;
}
- if (dentry->d_name.len > F2FS_NAME_LEN)
- return ERR_PTR(-ENAMETOOLONG);
+ if (dentry->d_name.len > F2FS_NAME_LEN) {
+ err = -ENAMETOOLONG;
+ goto out;
+ }
de = f2fs_find_entry(dir, &dentry->d_name, &page);
if (!de) {
- if (IS_ERR(page))
- return (struct dentry *)page;
- return d_splice_alias(inode, dentry);
+ if (IS_ERR(page)) {
+ err = PTR_ERR(page);
+ goto out;
+ }
+ goto out_splice;
}
ino = le32_to_cpu(de->ino);
@@ -365,19 +391,21 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry,
f2fs_put_page(page, 0);
inode = f2fs_iget(dir->i_sb, ino);
- if (IS_ERR(inode))
- return ERR_CAST(inode);
+ if (IS_ERR(inode)) {
+ err = PTR_ERR(inode);
+ goto out;
+ }
if ((dir->i_ino == root_ino) && f2fs_has_inline_dots(dir)) {
err = __recover_dot_dentries(dir, root_ino);
if (err)
- goto err_out;
+ goto out_iput;
}
if (f2fs_has_inline_dots(inode)) {
err = __recover_dot_dentries(inode, dir->i_ino);
if (err)
- goto err_out;
+ goto out_iput;
}
if (f2fs_encrypted_inode(dir) &&
(S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) &&
@@ -386,12 +414,18 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry,
"Inconsistent encryption contexts: %lu/%lu",
dir->i_ino, inode->i_ino);
err = -EPERM;
- goto err_out;
+ goto out_iput;
}
- return d_splice_alias(inode, dentry);
-
-err_out:
+out_splice:
+ new = d_splice_alias(inode, dentry);
+ if (IS_ERR(new))
+ err = PTR_ERR(new);
+ trace_f2fs_lookup_end(dir, dentry, ino, err);
+ return new;
+out_iput:
iput(inode);
+out:
+ trace_f2fs_lookup_end(dir, dentry, ino, err);
return ERR_PTR(err);
}
@@ -405,9 +439,15 @@ static int f2fs_unlink(struct inode *dir, struct dentry *dentry)
trace_f2fs_unlink_enter(dir, dentry);
+ if (unlikely(f2fs_cp_error(sbi)))
+ return -EIO;
+
err = dquot_initialize(dir);
if (err)
return err;
+ err = dquot_initialize(inode);
+ if (err)
+ return err;
de = f2fs_find_entry(dir, &dentry->d_name, &page);
if (!de) {
@@ -457,6 +497,9 @@ static int f2fs_symlink(struct inode *dir, struct dentry *dentry,
struct fscrypt_symlink_data *sd = NULL;
int err;
+ if (unlikely(f2fs_cp_error(sbi)))
+ return -EIO;
+
if (f2fs_encrypted_inode(dir)) {
err = fscrypt_get_encryption_info(dir);
if (err)
@@ -563,6 +606,9 @@ static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
struct inode *inode;
int err;
+ if (unlikely(f2fs_cp_error(sbi)))
+ return -EIO;
+
err = dquot_initialize(dir);
if (err)
return err;
@@ -615,6 +661,9 @@ static int f2fs_mknod(struct inode *dir, struct dentry *dentry,
struct inode *inode;
int err = 0;
+ if (unlikely(f2fs_cp_error(sbi)))
+ return -EIO;
+
err = dquot_initialize(dir);
if (err)
return err;
@@ -709,6 +758,9 @@ out:
static int f2fs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
{
+ if (unlikely(f2fs_cp_error(F2FS_I_SB(dir))))
+ return -EIO;
+
if (f2fs_encrypted_inode(dir)) {
int err = fscrypt_get_encryption_info(dir);
if (err)
@@ -720,6 +772,9 @@ static int f2fs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
static int f2fs_create_whiteout(struct inode *dir, struct inode **whiteout)
{
+ if (unlikely(f2fs_cp_error(F2FS_I_SB(dir))))
+ return -EIO;
+
return __f2fs_tmpfile(dir, NULL, S_IFCHR | WHITEOUT_MODE, whiteout);
}
@@ -739,6 +794,9 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
bool is_old_inline = f2fs_has_inline_dentry(old_dir);
int err = -ENOENT;
+ if (unlikely(f2fs_cp_error(sbi)))
+ return -EIO;
+
if ((f2fs_encrypted_inode(old_dir) &&
!fscrypt_has_encryption_key(old_dir)) ||
(f2fs_encrypted_inode(new_dir) &&
@@ -764,6 +822,12 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
if (err)
goto out;
+ if (new_inode) {
+ err = dquot_initialize(new_inode);
+ if (err)
+ goto out;
+ }
+
old_entry = f2fs_find_entry(old_dir, &old_dentry->d_name, &old_page);
if (!old_entry) {
if (IS_ERR(old_page))
@@ -932,6 +996,9 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
int old_nlink = 0, new_nlink = 0;
int err = -ENOENT;
+ if (unlikely(f2fs_cp_error(sbi)))
+ return -EIO;
+
if ((f2fs_encrypted_inode(old_dir) &&
!fscrypt_has_encryption_key(old_dir)) ||
(f2fs_encrypted_inode(new_dir) &&
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 32474db18ad9..964c99655942 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -46,7 +46,7 @@ bool available_free_memory(struct f2fs_sb_info *sbi, int type)
* give 25%, 25%, 50%, 50%, 50% memory for each components respectively
*/
if (type == FREE_NIDS) {
- mem_size = (nm_i->nid_cnt[FREE_NID_LIST] *
+ mem_size = (nm_i->nid_cnt[FREE_NID] *
sizeof(struct free_nid)) >> PAGE_SHIFT;
res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
} else if (type == NAT_ENTRIES) {
@@ -63,7 +63,7 @@ bool available_free_memory(struct f2fs_sb_info *sbi, int type)
} else if (type == INO_ENTRIES) {
int i;
- for (i = 0; i <= UPDATE_INO; i++)
+ for (i = 0; i < MAX_INO_ENTRY; i++)
mem_size += sbi->im[i].ino_num *
sizeof(struct ino_entry);
mem_size >>= PAGE_SHIFT;
@@ -74,6 +74,10 @@ bool available_free_memory(struct f2fs_sb_info *sbi, int type)
atomic_read(&sbi->total_ext_node) *
sizeof(struct extent_node)) >> PAGE_SHIFT;
res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
+ } else if (type == INMEM_PAGES) {
+ /* it allows 20% / total_ram for inmemory pages */
+ mem_size = get_pages(sbi, F2FS_INMEM_PAGES);
+ res = mem_size < (val.totalram / 5);
} else {
if (!sbi->sb->s_bdi->wb.dirty_exceeded)
return true;
@@ -134,6 +138,44 @@ static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
return dst_page;
}
+static struct nat_entry *__alloc_nat_entry(nid_t nid, bool no_fail)
+{
+ struct nat_entry *new;
+
+ if (no_fail)
+ new = f2fs_kmem_cache_alloc(nat_entry_slab,
+ GFP_NOFS | __GFP_ZERO);
+ else
+ new = kmem_cache_alloc(nat_entry_slab,
+ GFP_NOFS | __GFP_ZERO);
+ if (new) {
+ nat_set_nid(new, nid);
+ nat_reset_flag(new);
+ }
+ return new;
+}
+
+static void __free_nat_entry(struct nat_entry *e)
+{
+ kmem_cache_free(nat_entry_slab, e);
+}
+
+/* must be locked by nat_tree_lock */
+static struct nat_entry *__init_nat_entry(struct f2fs_nm_info *nm_i,
+ struct nat_entry *ne, struct f2fs_nat_entry *raw_ne, bool no_fail)
+{
+ if (no_fail)
+ f2fs_radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne);
+ else if (radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne))
+ return NULL;
+
+ if (raw_ne)
+ node_info_from_raw_nat(&ne->ni, raw_ne);
+ list_add_tail(&ne->list, &nm_i->nat_entries);
+ nm_i->nat_cnt++;
+ return ne;
+}
+
static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
{
return radix_tree_lookup(&nm_i->nat_root, n);
@@ -150,7 +192,7 @@ static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
list_del(&e->list);
radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
nm_i->nat_cnt--;
- kmem_cache_free(nat_entry_slab, e);
+ __free_nat_entry(e);
}
static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i,
@@ -246,49 +288,29 @@ bool need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino)
return need_update;
}
-static struct nat_entry *grab_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid,
- bool no_fail)
-{
- struct nat_entry *new;
-
- if (no_fail) {
- new = f2fs_kmem_cache_alloc(nat_entry_slab, GFP_NOFS);
- f2fs_radix_tree_insert(&nm_i->nat_root, nid, new);
- } else {
- new = kmem_cache_alloc(nat_entry_slab, GFP_NOFS);
- if (!new)
- return NULL;
- if (radix_tree_insert(&nm_i->nat_root, nid, new)) {
- kmem_cache_free(nat_entry_slab, new);
- return NULL;
- }
- }
-
- memset(new, 0, sizeof(struct nat_entry));
- nat_set_nid(new, nid);
- nat_reset_flag(new);
- list_add_tail(&new->list, &nm_i->nat_entries);
- nm_i->nat_cnt++;
- return new;
-}
-
+/* must be locked by nat_tree_lock */
static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid,
struct f2fs_nat_entry *ne)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
- struct nat_entry *e;
+ struct nat_entry *new, *e;
+ new = __alloc_nat_entry(nid, false);
+ if (!new)
+ return;
+
+ down_write(&nm_i->nat_tree_lock);
e = __lookup_nat_cache(nm_i, nid);
- if (!e) {
- e = grab_nat_entry(nm_i, nid, false);
- if (e)
- node_info_from_raw_nat(&e->ni, ne);
- } else {
+ if (!e)
+ e = __init_nat_entry(nm_i, new, ne, false);
+ else
f2fs_bug_on(sbi, nat_get_ino(e) != le32_to_cpu(ne->ino) ||
nat_get_blkaddr(e) !=
le32_to_cpu(ne->block_addr) ||
nat_get_version(e) != ne->version);
- }
+ up_write(&nm_i->nat_tree_lock);
+ if (e != new)
+ __free_nat_entry(new);
}
static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
@@ -296,11 +318,12 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct nat_entry *e;
+ struct nat_entry *new = __alloc_nat_entry(ni->nid, true);
down_write(&nm_i->nat_tree_lock);
e = __lookup_nat_cache(nm_i, ni->nid);
if (!e) {
- e = grab_nat_entry(nm_i, ni->nid, true);
+ e = __init_nat_entry(nm_i, new, NULL, true);
copy_node_info(&e->ni, ni);
f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR);
} else if (new_blkaddr == NEW_ADDR) {
@@ -312,6 +335,9 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
copy_node_info(&e->ni, ni);
f2fs_bug_on(sbi, ni->blk_addr != NULL_ADDR);
}
+ /* let's free early to reduce memory consumption */
+ if (e != new)
+ __free_nat_entry(new);
/* sanity check */
f2fs_bug_on(sbi, nat_get_blkaddr(e) != ni->blk_addr);
@@ -327,10 +353,6 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) {
unsigned char version = nat_get_version(e);
nat_set_version(e, inc_node_version(version));
-
- /* in order to reuse the nid */
- if (nm_i->next_scan_nid > ni->nid)
- nm_i->next_scan_nid = ni->nid;
}
/* change address */
@@ -424,9 +446,7 @@ void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
f2fs_put_page(page, 1);
cache:
/* cache nat entry */
- down_write(&nm_i->nat_tree_lock);
cache_nat_entry(sbi, nid, &ne);
- up_write(&nm_i->nat_tree_lock);
}
/*
@@ -962,7 +982,8 @@ fail:
return err > 0 ? 0 : err;
}
-int truncate_xattr_node(struct inode *inode, struct page *page)
+/* caller must lock inode page */
+int truncate_xattr_node(struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
nid_t nid = F2FS_I(inode)->i_xattr_nid;
@@ -978,10 +999,7 @@ int truncate_xattr_node(struct inode *inode, struct page *page)
f2fs_i_xnid_write(inode, 0);
- set_new_dnode(&dn, inode, page, npage, nid);
-
- if (page)
- dn.inode_page_locked = true;
+ set_new_dnode(&dn, inode, NULL, npage, nid);
truncate_node(&dn);
return 0;
}
@@ -1000,7 +1018,7 @@ int remove_inode_page(struct inode *inode)
if (err)
return err;
- err = truncate_xattr_node(inode, dn.inode_page);
+ err = truncate_xattr_node(inode);
if (err) {
f2fs_put_dnode(&dn);
return err;
@@ -1220,7 +1238,8 @@ static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino)
if (!inode)
return;
- page = pagecache_get_page(inode->i_mapping, 0, FGP_LOCK|FGP_NOWAIT, 0);
+ page = f2fs_pagecache_get_page(inode->i_mapping, 0,
+ FGP_LOCK|FGP_NOWAIT, 0);
if (!page)
goto iput_out;
@@ -1244,37 +1263,6 @@ iput_out:
iput(inode);
}
-void move_node_page(struct page *node_page, int gc_type)
-{
- if (gc_type == FG_GC) {
- struct f2fs_sb_info *sbi = F2FS_P_SB(node_page);
- struct writeback_control wbc = {
- .sync_mode = WB_SYNC_ALL,
- .nr_to_write = 1,
- .for_reclaim = 0,
- };
-
- set_page_dirty(node_page);
- f2fs_wait_on_page_writeback(node_page, NODE, true);
-
- f2fs_bug_on(sbi, PageWriteback(node_page));
- if (!clear_page_dirty_for_io(node_page))
- goto out_page;
-
- if (NODE_MAPPING(sbi)->a_ops->writepage(node_page, &wbc))
- unlock_page(node_page);
- goto release_page;
- } else {
- /* set page dirty and write it */
- if (!PageWriteback(node_page))
- set_page_dirty(node_page);
- }
-out_page:
- unlock_page(node_page);
-release_page:
- f2fs_put_page(node_page, 0);
-}
-
static struct page *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino)
{
pgoff_t index, end;
@@ -1344,6 +1332,7 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
struct node_info ni;
struct f2fs_io_info fio = {
.sbi = sbi,
+ .ino = ino_of_node(page),
.type = NODE,
.op = REQ_OP_WRITE,
.op_flags = wbc_to_write_flags(wbc),
@@ -1416,6 +1405,37 @@ redirty_out:
return AOP_WRITEPAGE_ACTIVATE;
}
+void move_node_page(struct page *node_page, int gc_type)
+{
+ if (gc_type == FG_GC) {
+ struct writeback_control wbc = {
+ .sync_mode = WB_SYNC_ALL,
+ .nr_to_write = 1,
+ .for_reclaim = 0,
+ };
+
+ set_page_dirty(node_page);
+ f2fs_wait_on_page_writeback(node_page, NODE, true);
+
+ f2fs_bug_on(F2FS_P_SB(node_page), PageWriteback(node_page));
+ if (!clear_page_dirty_for_io(node_page))
+ goto out_page;
+
+ if (__write_node_page(node_page, false, NULL,
+ &wbc, false, FS_GC_NODE_IO))
+ unlock_page(node_page);
+ goto release_page;
+ } else {
+ /* set page dirty and write it */
+ if (!PageWriteback(node_page))
+ set_page_dirty(node_page);
+ }
+out_page:
+ unlock_page(node_page);
+release_page:
+ f2fs_put_page(node_page, 0);
+}
+
static int f2fs_write_node_page(struct page *page,
struct writeback_control *wbc)
{
@@ -1764,35 +1784,54 @@ static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i,
return radix_tree_lookup(&nm_i->free_nid_root, n);
}
-static int __insert_nid_to_list(struct f2fs_sb_info *sbi,
- struct free_nid *i, enum nid_list list, bool new)
+static int __insert_free_nid(struct f2fs_sb_info *sbi,
+ struct free_nid *i, enum nid_state state)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
- if (new) {
- int err = radix_tree_insert(&nm_i->free_nid_root, i->nid, i);
- if (err)
- return err;
- }
+ int err = radix_tree_insert(&nm_i->free_nid_root, i->nid, i);
+ if (err)
+ return err;
- f2fs_bug_on(sbi, list == FREE_NID_LIST ? i->state != NID_NEW :
- i->state != NID_ALLOC);
- nm_i->nid_cnt[list]++;
- list_add_tail(&i->list, &nm_i->nid_list[list]);
+ f2fs_bug_on(sbi, state != i->state);
+ nm_i->nid_cnt[state]++;
+ if (state == FREE_NID)
+ list_add_tail(&i->list, &nm_i->free_nid_list);
return 0;
}
-static void __remove_nid_from_list(struct f2fs_sb_info *sbi,
- struct free_nid *i, enum nid_list list, bool reuse)
+static void __remove_free_nid(struct f2fs_sb_info *sbi,
+ struct free_nid *i, enum nid_state state)
+{
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
+
+ f2fs_bug_on(sbi, state != i->state);
+ nm_i->nid_cnt[state]--;
+ if (state == FREE_NID)
+ list_del(&i->list);
+ radix_tree_delete(&nm_i->free_nid_root, i->nid);
+}
+
+static void __move_free_nid(struct f2fs_sb_info *sbi, struct free_nid *i,
+ enum nid_state org_state, enum nid_state dst_state)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
- f2fs_bug_on(sbi, list == FREE_NID_LIST ? i->state != NID_NEW :
- i->state != NID_ALLOC);
- nm_i->nid_cnt[list]--;
- list_del(&i->list);
- if (!reuse)
- radix_tree_delete(&nm_i->free_nid_root, i->nid);
+ f2fs_bug_on(sbi, org_state != i->state);
+ i->state = dst_state;
+ nm_i->nid_cnt[org_state]--;
+ nm_i->nid_cnt[dst_state]++;
+
+ switch (dst_state) {
+ case PREALLOC_NID:
+ list_del(&i->list);
+ break;
+ case FREE_NID:
+ list_add_tail(&i->list, &nm_i->free_nid_list);
+ break;
+ default:
+ BUG_ON(1);
+ }
}
/* return if the nid is recognized as free */
@@ -1810,7 +1849,7 @@ static bool add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS);
i->nid = nid;
- i->state = NID_NEW;
+ i->state = FREE_NID;
if (radix_tree_preload(GFP_NOFS))
goto err;
@@ -1823,7 +1862,7 @@ static bool add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
* - f2fs_create
* - f2fs_new_inode
* - alloc_nid
- * - __insert_nid_to_list(ALLOC_NID_LIST)
+ * - __insert_nid_to_list(PREALLOC_NID)
* - f2fs_balance_fs_bg
* - build_free_nids
* - __build_free_nids
@@ -1836,8 +1875,8 @@ static bool add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
* - new_node_page
* - set_node_addr
* - alloc_nid_done
- * - __remove_nid_from_list(ALLOC_NID_LIST)
- * - __insert_nid_to_list(FREE_NID_LIST)
+ * - __remove_nid_from_list(PREALLOC_NID)
+ * - __insert_nid_to_list(FREE_NID)
*/
ne = __lookup_nat_cache(nm_i, nid);
if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) ||
@@ -1846,13 +1885,13 @@ static bool add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
e = __lookup_free_nid_list(nm_i, nid);
if (e) {
- if (e->state == NID_NEW)
+ if (e->state == FREE_NID)
ret = true;
goto err_out;
}
}
ret = true;
- err = __insert_nid_to_list(sbi, i, FREE_NID_LIST, true);
+ err = __insert_free_nid(sbi, i, FREE_NID);
err_out:
spin_unlock(&nm_i->nid_list_lock);
radix_tree_preload_end();
@@ -1870,8 +1909,8 @@ static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid)
spin_lock(&nm_i->nid_list_lock);
i = __lookup_free_nid_list(nm_i, nid);
- if (i && i->state == NID_NEW) {
- __remove_nid_from_list(sbi, i, FREE_NID_LIST, false);
+ if (i && i->state == FREE_NID) {
+ __remove_free_nid(sbi, i, FREE_NID);
need_free = true;
}
spin_unlock(&nm_i->nid_list_lock);
@@ -1890,15 +1929,18 @@ static void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid,
if (!test_bit_le(nat_ofs, nm_i->nat_block_bitmap))
return;
- if (set)
+ if (set) {
+ if (test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]))
+ return;
__set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
- else
- __clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
-
- if (set)
nm_i->free_nid_count[nat_ofs]++;
- else if (!build)
- nm_i->free_nid_count[nat_ofs]--;
+ } else {
+ if (!test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]))
+ return;
+ __clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
+ if (!build)
+ nm_i->free_nid_count[nat_ofs]--;
+ }
}
static void scan_nat_page(struct f2fs_sb_info *sbi,
@@ -1933,12 +1975,32 @@ static void scan_nat_page(struct f2fs_sb_info *sbi,
}
}
-static void scan_free_nid_bits(struct f2fs_sb_info *sbi)
+static void scan_curseg_cache(struct f2fs_sb_info *sbi)
{
- struct f2fs_nm_info *nm_i = NM_I(sbi);
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
struct f2fs_journal *journal = curseg->journal;
+ int i;
+
+ down_read(&curseg->journal_rwsem);
+ for (i = 0; i < nats_in_cursum(journal); i++) {
+ block_t addr;
+ nid_t nid;
+
+ addr = le32_to_cpu(nat_in_journal(journal, i).block_addr);
+ nid = le32_to_cpu(nid_in_journal(journal, i));
+ if (addr == NULL_ADDR)
+ add_free_nid(sbi, nid, true);
+ else
+ remove_free_nid(sbi, nid);
+ }
+ up_read(&curseg->journal_rwsem);
+}
+
+static void scan_free_nid_bits(struct f2fs_sb_info *sbi)
+{
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
unsigned int i, idx;
+ nid_t nid;
down_read(&nm_i->nat_tree_lock);
@@ -1948,40 +2010,27 @@ static void scan_free_nid_bits(struct f2fs_sb_info *sbi)
if (!nm_i->free_nid_count[i])
continue;
for (idx = 0; idx < NAT_ENTRY_PER_BLOCK; idx++) {
- nid_t nid;
-
- if (!test_bit_le(idx, nm_i->free_nid_bitmap[i]))
- continue;
+ idx = find_next_bit_le(nm_i->free_nid_bitmap[i],
+ NAT_ENTRY_PER_BLOCK, idx);
+ if (idx >= NAT_ENTRY_PER_BLOCK)
+ break;
nid = i * NAT_ENTRY_PER_BLOCK + idx;
add_free_nid(sbi, nid, true);
- if (nm_i->nid_cnt[FREE_NID_LIST] >= MAX_FREE_NIDS)
+ if (nm_i->nid_cnt[FREE_NID] >= MAX_FREE_NIDS)
goto out;
}
}
out:
- down_read(&curseg->journal_rwsem);
- for (i = 0; i < nats_in_cursum(journal); i++) {
- block_t addr;
- nid_t nid;
+ scan_curseg_cache(sbi);
- addr = le32_to_cpu(nat_in_journal(journal, i).block_addr);
- nid = le32_to_cpu(nid_in_journal(journal, i));
- if (addr == NULL_ADDR)
- add_free_nid(sbi, nid, true);
- else
- remove_free_nid(sbi, nid);
- }
- up_read(&curseg->journal_rwsem);
up_read(&nm_i->nat_tree_lock);
}
static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
- struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
- struct f2fs_journal *journal = curseg->journal;
int i = 0;
nid_t nid = nm_i->next_scan_nid;
@@ -1989,7 +2038,7 @@ static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
nid = 0;
/* Enough entries */
- if (nm_i->nid_cnt[FREE_NID_LIST] >= NAT_ENTRY_PER_BLOCK)
+ if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
return;
if (!sync && !available_free_memory(sbi, FREE_NIDS))
@@ -1999,7 +2048,7 @@ static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
/* try to find free nids in free_nid_bitmap */
scan_free_nid_bits(sbi);
- if (nm_i->nid_cnt[FREE_NID_LIST])
+ if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
return;
}
@@ -2027,18 +2076,8 @@ static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
nm_i->next_scan_nid = nid;
/* find free nids from current sum_pages */
- down_read(&curseg->journal_rwsem);
- for (i = 0; i < nats_in_cursum(journal); i++) {
- block_t addr;
+ scan_curseg_cache(sbi);
- addr = le32_to_cpu(nat_in_journal(journal, i).block_addr);
- nid = le32_to_cpu(nid_in_journal(journal, i));
- if (addr == NULL_ADDR)
- add_free_nid(sbi, nid, true);
- else
- remove_free_nid(sbi, nid);
- }
- up_read(&curseg->journal_rwsem);
up_read(&nm_i->nat_tree_lock);
ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid),
@@ -2076,15 +2115,13 @@ retry:
}
/* We should not use stale free nids created by build_free_nids */
- if (nm_i->nid_cnt[FREE_NID_LIST] && !on_build_free_nids(nm_i)) {
- f2fs_bug_on(sbi, list_empty(&nm_i->nid_list[FREE_NID_LIST]));
- i = list_first_entry(&nm_i->nid_list[FREE_NID_LIST],
+ if (nm_i->nid_cnt[FREE_NID] && !on_build_free_nids(nm_i)) {
+ f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list));
+ i = list_first_entry(&nm_i->free_nid_list,
struct free_nid, list);
*nid = i->nid;
- __remove_nid_from_list(sbi, i, FREE_NID_LIST, true);
- i->state = NID_ALLOC;
- __insert_nid_to_list(sbi, i, ALLOC_NID_LIST, false);
+ __move_free_nid(sbi, i, FREE_NID, PREALLOC_NID);
nm_i->available_nids--;
update_free_nid_bitmap(sbi, *nid, false, false);
@@ -2110,7 +2147,7 @@ void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid)
spin_lock(&nm_i->nid_list_lock);
i = __lookup_free_nid_list(nm_i, nid);
f2fs_bug_on(sbi, !i);
- __remove_nid_from_list(sbi, i, ALLOC_NID_LIST, false);
+ __remove_free_nid(sbi, i, PREALLOC_NID);
spin_unlock(&nm_i->nid_list_lock);
kmem_cache_free(free_nid_slab, i);
@@ -2133,12 +2170,10 @@ void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
f2fs_bug_on(sbi, !i);
if (!available_free_memory(sbi, FREE_NIDS)) {
- __remove_nid_from_list(sbi, i, ALLOC_NID_LIST, false);
+ __remove_free_nid(sbi, i, PREALLOC_NID);
need_free = true;
} else {
- __remove_nid_from_list(sbi, i, ALLOC_NID_LIST, true);
- i->state = NID_NEW;
- __insert_nid_to_list(sbi, i, FREE_NID_LIST, false);
+ __move_free_nid(sbi, i, PREALLOC_NID, FREE_NID);
}
nm_i->available_nids++;
@@ -2157,20 +2192,19 @@ int try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
struct free_nid *i, *next;
int nr = nr_shrink;
- if (nm_i->nid_cnt[FREE_NID_LIST] <= MAX_FREE_NIDS)
+ if (nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
return 0;
if (!mutex_trylock(&nm_i->build_lock))
return 0;
spin_lock(&nm_i->nid_list_lock);
- list_for_each_entry_safe(i, next, &nm_i->nid_list[FREE_NID_LIST],
- list) {
+ list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) {
if (nr_shrink <= 0 ||
- nm_i->nid_cnt[FREE_NID_LIST] <= MAX_FREE_NIDS)
+ nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
break;
- __remove_nid_from_list(sbi, i, FREE_NID_LIST, false);
+ __remove_free_nid(sbi, i, FREE_NID);
kmem_cache_free(free_nid_slab, i);
nr_shrink--;
}
@@ -2196,8 +2230,8 @@ void recover_inline_xattr(struct inode *inode, struct page *page)
goto update_inode;
}
- dst_addr = inline_xattr_addr(ipage);
- src_addr = inline_xattr_addr(page);
+ dst_addr = inline_xattr_addr(inode, ipage);
+ src_addr = inline_xattr_addr(inode, page);
inline_size = inline_xattr_size(inode);
f2fs_wait_on_page_writeback(ipage, NODE, true);
@@ -2286,6 +2320,12 @@ retry:
dst->i_inline = src->i_inline & (F2FS_INLINE_XATTR | F2FS_EXTRA_ATTR);
if (dst->i_inline & F2FS_EXTRA_ATTR) {
dst->i_extra_isize = src->i_extra_isize;
+
+ if (f2fs_sb_has_flexible_inline_xattr(sbi->sb) &&
+ F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
+ i_inline_xattr_size))
+ dst->i_inline_xattr_size = src->i_inline_xattr_size;
+
if (f2fs_sb_has_project_quota(sbi->sb) &&
F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
i_projid))
@@ -2357,8 +2397,8 @@ static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
ne = __lookup_nat_cache(nm_i, nid);
if (!ne) {
- ne = grab_nat_entry(nm_i, nid, true);
- node_info_from_raw_nat(&ne->ni, &raw_ne);
+ ne = __alloc_nat_entry(nid, true);
+ __init_nat_entry(nm_i, ne, &raw_ne, true);
}
/*
@@ -2404,15 +2444,17 @@ static void __update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid,
unsigned int nat_index = start_nid / NAT_ENTRY_PER_BLOCK;
struct f2fs_nat_block *nat_blk = page_address(page);
int valid = 0;
- int i;
+ int i = 0;
if (!enabled_nat_bits(sbi, NULL))
return;
- for (i = 0; i < NAT_ENTRY_PER_BLOCK; i++) {
- if (start_nid == 0 && i == 0)
- valid++;
- if (nat_blk->entries[i].block_addr)
+ if (nat_index == 0) {
+ valid = 1;
+ i = 1;
+ }
+ for (; i < NAT_ENTRY_PER_BLOCK; i++) {
+ if (nat_blk->entries[i].block_addr != NULL_ADDR)
valid++;
}
if (valid == 0) {
@@ -2607,7 +2649,7 @@ static inline void load_free_nid_bitmap(struct f2fs_sb_info *sbi)
__set_bit_le(i, nm_i->nat_block_bitmap);
nid = i * NAT_ENTRY_PER_BLOCK;
- last_nid = (i + 1) * NAT_ENTRY_PER_BLOCK;
+ last_nid = nid + NAT_ENTRY_PER_BLOCK;
spin_lock(&NM_I(sbi)->nid_list_lock);
for (; nid < last_nid; nid++)
@@ -2642,16 +2684,15 @@ static int init_node_manager(struct f2fs_sb_info *sbi)
/* not used nids: 0, node, meta, (and root counted as valid node) */
nm_i->available_nids = nm_i->max_nid - sbi->total_valid_node_count -
F2FS_RESERVED_NODE_NUM;
- nm_i->nid_cnt[FREE_NID_LIST] = 0;
- nm_i->nid_cnt[ALLOC_NID_LIST] = 0;
+ nm_i->nid_cnt[FREE_NID] = 0;
+ nm_i->nid_cnt[PREALLOC_NID] = 0;
nm_i->nat_cnt = 0;
nm_i->ram_thresh = DEF_RAM_THRESHOLD;
nm_i->ra_nid_pages = DEF_RA_NID_PAGES;
nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD;
INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC);
- INIT_LIST_HEAD(&nm_i->nid_list[FREE_NID_LIST]);
- INIT_LIST_HEAD(&nm_i->nid_list[ALLOC_NID_LIST]);
+ INIT_LIST_HEAD(&nm_i->free_nid_list);
INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO);
INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO);
INIT_LIST_HEAD(&nm_i->nat_entries);
@@ -2743,16 +2784,15 @@ void destroy_node_manager(struct f2fs_sb_info *sbi)
/* destroy free nid list */
spin_lock(&nm_i->nid_list_lock);
- list_for_each_entry_safe(i, next_i, &nm_i->nid_list[FREE_NID_LIST],
- list) {
- __remove_nid_from_list(sbi, i, FREE_NID_LIST, false);
+ list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) {
+ __remove_free_nid(sbi, i, FREE_NID);
spin_unlock(&nm_i->nid_list_lock);
kmem_cache_free(free_nid_slab, i);
spin_lock(&nm_i->nid_list_lock);
}
- f2fs_bug_on(sbi, nm_i->nid_cnt[FREE_NID_LIST]);
- f2fs_bug_on(sbi, nm_i->nid_cnt[ALLOC_NID_LIST]);
- f2fs_bug_on(sbi, !list_empty(&nm_i->nid_list[ALLOC_NID_LIST]));
+ f2fs_bug_on(sbi, nm_i->nid_cnt[FREE_NID]);
+ f2fs_bug_on(sbi, nm_i->nid_cnt[PREALLOC_NID]);
+ f2fs_bug_on(sbi, !list_empty(&nm_i->free_nid_list));
spin_unlock(&nm_i->nid_list_lock);
/* destroy nat cache */
diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h
index bb53e9955ff2..0ee3e5ff49a3 100644
--- a/fs/f2fs/node.h
+++ b/fs/f2fs/node.h
@@ -140,6 +140,7 @@ enum mem_type {
DIRTY_DENTS, /* indicates dirty dentry pages */
INO_ENTRIES, /* indicates inode entries */
EXTENT_CACHE, /* indicates extent cache */
+ INMEM_PAGES, /* indicates inmemory pages */
BASE_CHECK, /* check kernel status */
};
@@ -150,18 +151,10 @@ struct nat_entry_set {
unsigned int entry_cnt; /* the # of nat entries in set */
};
-/*
- * For free nid mangement
- */
-enum nid_state {
- NID_NEW, /* newly added to free nid list */
- NID_ALLOC /* it is allocated */
-};
-
struct free_nid {
struct list_head list; /* for free node id list */
nid_t nid; /* node id */
- int state; /* in use or not: NID_NEW or NID_ALLOC */
+ int state; /* in use or not: FREE_NID or PREALLOC_NID */
};
static inline void next_free_nid(struct f2fs_sb_info *sbi, nid_t *nid)
@@ -170,12 +163,11 @@ static inline void next_free_nid(struct f2fs_sb_info *sbi, nid_t *nid)
struct free_nid *fnid;
spin_lock(&nm_i->nid_list_lock);
- if (nm_i->nid_cnt[FREE_NID_LIST] <= 0) {
+ if (nm_i->nid_cnt[FREE_NID] <= 0) {
spin_unlock(&nm_i->nid_list_lock);
return;
}
- fnid = list_first_entry(&nm_i->nid_list[FREE_NID_LIST],
- struct free_nid, list);
+ fnid = list_first_entry(&nm_i->free_nid_list, struct free_nid, list);
*nid = fnid->nid;
spin_unlock(&nm_i->nid_list_lock);
}
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index 9626758bc762..92c57ace1939 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -594,6 +594,9 @@ int recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
int ret = 0;
unsigned long s_flags = sbi->sb->s_flags;
bool need_writecp = false;
+#ifdef CONFIG_QUOTA
+ int quota_enabled;
+#endif
if (s_flags & MS_RDONLY) {
f2fs_msg(sbi->sb, KERN_INFO, "orphan cleanup on readonly fs");
@@ -604,7 +607,7 @@ int recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
/* Needed for iput() to work correctly and not trash data */
sbi->sb->s_flags |= MS_ACTIVE;
/* Turn on quotas so that they are updated correctly */
- f2fs_enable_quota_files(sbi);
+ quota_enabled = f2fs_enable_quota_files(sbi, s_flags & MS_RDONLY);
#endif
fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
@@ -665,7 +668,8 @@ skip:
out:
#ifdef CONFIG_QUOTA
/* Turn quotas off */
- f2fs_quota_off_umount(sbi->sb);
+ if (quota_enabled)
+ f2fs_quota_off_umount(sbi->sb);
#endif
sbi->sb->s_flags = s_flags; /* Restore MS_RDONLY status */
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index f5c494389483..94939a5a96c8 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -181,11 +181,12 @@ bool need_SSR(struct f2fs_sb_info *sbi)
return true;
return free_sections(sbi) <= (node_secs + 2 * dent_secs + imeta_secs +
- 2 * reserved_sections(sbi));
+ SM_I(sbi)->min_ssr_sections + reserved_sections(sbi));
}
void register_inmem_page(struct inode *inode, struct page *page)
{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct f2fs_inode_info *fi = F2FS_I(inode);
struct inmem_pages *new;
@@ -204,6 +205,10 @@ void register_inmem_page(struct inode *inode, struct page *page)
mutex_lock(&fi->inmem_lock);
get_page(page);
list_add_tail(&new->list, &fi->inmem_pages);
+ spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
+ if (list_empty(&fi->inmem_ilist))
+ list_add_tail(&fi->inmem_ilist, &sbi->inode_list[ATOMIC_FILE]);
+ spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
inc_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
mutex_unlock(&fi->inmem_lock);
@@ -262,12 +267,41 @@ next:
return err;
}
+void drop_inmem_pages_all(struct f2fs_sb_info *sbi)
+{
+ struct list_head *head = &sbi->inode_list[ATOMIC_FILE];
+ struct inode *inode;
+ struct f2fs_inode_info *fi;
+next:
+ spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
+ if (list_empty(head)) {
+ spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
+ return;
+ }
+ fi = list_first_entry(head, struct f2fs_inode_info, inmem_ilist);
+ inode = igrab(&fi->vfs_inode);
+ spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
+
+ if (inode) {
+ drop_inmem_pages(inode);
+ iput(inode);
+ }
+ congestion_wait(BLK_RW_ASYNC, HZ/50);
+ cond_resched();
+ goto next;
+}
+
void drop_inmem_pages(struct inode *inode)
{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct f2fs_inode_info *fi = F2FS_I(inode);
mutex_lock(&fi->inmem_lock);
__revoke_inmem_pages(inode, &fi->inmem_pages, true, false);
+ spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
+ if (!list_empty(&fi->inmem_ilist))
+ list_del_init(&fi->inmem_ilist);
+ spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
mutex_unlock(&fi->inmem_lock);
clear_inode_flag(inode, FI_ATOMIC_FILE);
@@ -313,6 +347,7 @@ static int __commit_inmem_pages(struct inode *inode,
struct inmem_pages *cur, *tmp;
struct f2fs_io_info fio = {
.sbi = sbi,
+ .ino = inode->i_ino,
.type = DATA,
.op = REQ_OP_WRITE,
.op_flags = REQ_SYNC | REQ_PRIO,
@@ -398,6 +433,10 @@ int commit_inmem_pages(struct inode *inode)
/* drop all uncommitted pages */
__revoke_inmem_pages(inode, &fi->inmem_pages, true, false);
}
+ spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
+ if (!list_empty(&fi->inmem_ilist))
+ list_del_init(&fi->inmem_ilist);
+ spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
mutex_unlock(&fi->inmem_lock);
clear_inode_flag(inode, FI_ATOMIC_COMMIT);
@@ -472,7 +511,7 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
static int __submit_flush_wait(struct f2fs_sb_info *sbi,
struct block_device *bdev)
{
- struct bio *bio = f2fs_bio_alloc(0);
+ struct bio *bio = f2fs_bio_alloc(sbi, 0, true);
int ret;
bio->bi_rw = REQ_OP_WRITE;
@@ -485,15 +524,17 @@ static int __submit_flush_wait(struct f2fs_sb_info *sbi,
return ret;
}
-static int submit_flush_wait(struct f2fs_sb_info *sbi)
+static int submit_flush_wait(struct f2fs_sb_info *sbi, nid_t ino)
{
- int ret = __submit_flush_wait(sbi, sbi->sb->s_bdev);
+ int ret = 0;
int i;
- if (!sbi->s_ndevs || ret)
- return ret;
+ if (!sbi->s_ndevs)
+ return __submit_flush_wait(sbi, sbi->sb->s_bdev);
- for (i = 1; i < sbi->s_ndevs; i++) {
+ for (i = 0; i < sbi->s_ndevs; i++) {
+ if (!is_dirty_device(sbi, ino, i, FLUSH_INO))
+ continue;
ret = __submit_flush_wait(sbi, FDEV(i).bdev);
if (ret)
break;
@@ -519,7 +560,9 @@ repeat:
fcc->dispatch_list = llist_del_all(&fcc->issue_list);
fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list);
- ret = submit_flush_wait(sbi);
+ cmd = llist_entry(fcc->dispatch_list, struct flush_cmd, llnode);
+
+ ret = submit_flush_wait(sbi, cmd->ino);
atomic_inc(&fcc->issued_flush);
llist_for_each_entry_safe(cmd, next,
@@ -537,7 +580,7 @@ repeat:
goto repeat;
}
-int f2fs_issue_flush(struct f2fs_sb_info *sbi)
+int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino)
{
struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
struct flush_cmd cmd;
@@ -547,19 +590,20 @@ int f2fs_issue_flush(struct f2fs_sb_info *sbi)
return 0;
if (!test_opt(sbi, FLUSH_MERGE)) {
- ret = submit_flush_wait(sbi);
+ ret = submit_flush_wait(sbi, ino);
atomic_inc(&fcc->issued_flush);
return ret;
}
- if (atomic_inc_return(&fcc->issing_flush) == 1) {
- ret = submit_flush_wait(sbi);
+ if (atomic_inc_return(&fcc->issing_flush) == 1 || sbi->s_ndevs > 1) {
+ ret = submit_flush_wait(sbi, ino);
atomic_dec(&fcc->issing_flush);
atomic_inc(&fcc->issued_flush);
return ret;
}
+ cmd.ino = ino;
init_completion(&cmd.wait);
llist_add(&cmd.llnode, &fcc->issue_list);
@@ -583,7 +627,7 @@ int f2fs_issue_flush(struct f2fs_sb_info *sbi)
} else {
struct flush_cmd *tmp, *next;
- ret = submit_flush_wait(sbi);
+ ret = submit_flush_wait(sbi, ino);
llist_for_each_entry_safe(tmp, next, list, llnode) {
if (tmp == &cmd) {
@@ -653,6 +697,28 @@ void destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free)
}
}
+int f2fs_flush_device_cache(struct f2fs_sb_info *sbi)
+{
+ int ret = 0, i;
+
+ if (!sbi->s_ndevs)
+ return 0;
+
+ for (i = 1; i < sbi->s_ndevs; i++) {
+ if (!f2fs_test_bit(i, (char *)&sbi->dirty_device))
+ continue;
+ ret = __submit_flush_wait(sbi, FDEV(i).bdev);
+ if (ret)
+ break;
+
+ spin_lock(&sbi->dev_lock);
+ f2fs_clear_bit(i, (char *)&sbi->dirty_device);
+ spin_unlock(&sbi->dev_lock);
+ }
+
+ return ret;
+}
+
static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
enum dirty_type dirty_type)
{
@@ -794,6 +860,8 @@ static void __remove_discard_cmd(struct f2fs_sb_info *sbi,
{
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
+ trace_f2fs_remove_discard(dc->bdev, dc->start, dc->len);
+
f2fs_bug_on(sbi, dc->ref);
if (dc->error == -EOPNOTSUPP)
@@ -875,7 +943,7 @@ static int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
if (ret)
return ret;
}
- bio = f2fs_bio_alloc(1);
+ bio = bio_alloc(GFP_NOIO | __GFP_NOFAIL, 1);
bio->bi_iter.bi_sector = sector;
bio->bi_bdev = bdev;
bio_set_op_attrs(bio, op, 0);
@@ -926,10 +994,14 @@ void __check_sit_bitmap(struct f2fs_sb_info *sbi,
/* this function is copied from blkdev_issue_discard from block/blk-lib.c */
static void __submit_discard_cmd(struct f2fs_sb_info *sbi,
- struct discard_cmd *dc)
+ struct discard_policy *dpolicy,
+ struct discard_cmd *dc)
{
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
+ struct list_head *wait_list = (dpolicy->type == DPOLICY_FSTRIM) ?
+ &(dcc->fstrim_list) : &(dcc->wait_list);
struct bio *bio = NULL;
+ int flag = dpolicy->sync ? REQ_SYNC : 0;
if (dc->state != D_PREP)
return;
@@ -948,8 +1020,8 @@ static void __submit_discard_cmd(struct f2fs_sb_info *sbi,
if (bio) {
bio->bi_private = dc;
bio->bi_end_io = f2fs_submit_discard_endio;
- submit_bio(REQ_SYNC, bio);
- list_move_tail(&dc->list, &dcc->wait_list);
+ submit_bio(flag, bio);
+ list_move_tail(&dc->list, wait_list);
__check_sit_bitmap(sbi, dc->start, dc->start + dc->len);
f2fs_update_iostat(sbi, FS_DISCARD, 1);
@@ -966,7 +1038,7 @@ static struct discard_cmd *__insert_discard_tree(struct f2fs_sb_info *sbi,
struct rb_node *insert_parent)
{
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
- struct rb_node **p = &dcc->root.rb_node;
+ struct rb_node **p;
struct rb_node *parent = NULL;
struct discard_cmd *dc = NULL;
@@ -1134,58 +1206,107 @@ static int __queue_discard_cmd(struct f2fs_sb_info *sbi,
return 0;
}
-static int __issue_discard_cmd(struct f2fs_sb_info *sbi, bool issue_cond)
+static void __issue_discard_cmd_range(struct f2fs_sb_info *sbi,
+ struct discard_policy *dpolicy,
+ unsigned int start, unsigned int end)
+{
+ struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
+ struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
+ struct rb_node **insert_p = NULL, *insert_parent = NULL;
+ struct discard_cmd *dc;
+ struct blk_plug plug;
+ int issued;
+
+next:
+ issued = 0;
+
+ mutex_lock(&dcc->cmd_lock);
+ f2fs_bug_on(sbi, !__check_rb_tree_consistence(sbi, &dcc->root));
+
+ dc = (struct discard_cmd *)__lookup_rb_tree_ret(&dcc->root,
+ NULL, start,
+ (struct rb_entry **)&prev_dc,
+ (struct rb_entry **)&next_dc,
+ &insert_p, &insert_parent, true);
+ if (!dc)
+ dc = next_dc;
+
+ blk_start_plug(&plug);
+
+ while (dc && dc->lstart <= end) {
+ struct rb_node *node;
+
+ if (dc->len < dpolicy->granularity)
+ goto skip;
+
+ if (dc->state != D_PREP) {
+ list_move_tail(&dc->list, &dcc->fstrim_list);
+ goto skip;
+ }
+
+ __submit_discard_cmd(sbi, dpolicy, dc);
+
+ if (++issued >= dpolicy->max_requests) {
+ start = dc->lstart + dc->len;
+
+ blk_finish_plug(&plug);
+ mutex_unlock(&dcc->cmd_lock);
+
+ schedule();
+
+ goto next;
+ }
+skip:
+ node = rb_next(&dc->rb_node);
+ dc = rb_entry_safe(node, struct discard_cmd, rb_node);
+
+ if (fatal_signal_pending(current))
+ break;
+ }
+
+ blk_finish_plug(&plug);
+ mutex_unlock(&dcc->cmd_lock);
+}
+
+static int __issue_discard_cmd(struct f2fs_sb_info *sbi,
+ struct discard_policy *dpolicy)
{
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
struct list_head *pend_list;
struct discard_cmd *dc, *tmp;
struct blk_plug plug;
- int iter = 0, issued = 0;
- int i;
+ int i, iter = 0, issued = 0;
bool io_interrupted = false;
- mutex_lock(&dcc->cmd_lock);
- f2fs_bug_on(sbi,
- !__check_rb_tree_consistence(sbi, &dcc->root));
- blk_start_plug(&plug);
- for (i = MAX_PLIST_NUM - 1;
- i >= 0 && plist_issue(dcc->pend_list_tag[i]); i--) {
+ for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
+ if (i + 1 < dpolicy->granularity)
+ break;
pend_list = &dcc->pend_list[i];
+
+ mutex_lock(&dcc->cmd_lock);
+ f2fs_bug_on(sbi, !__check_rb_tree_consistence(sbi, &dcc->root));
+ blk_start_plug(&plug);
list_for_each_entry_safe(dc, tmp, pend_list, list) {
f2fs_bug_on(sbi, dc->state != D_PREP);
- /* Hurry up to finish fstrim */
- if (dcc->pend_list_tag[i] & P_TRIM) {
- __submit_discard_cmd(sbi, dc);
- issued++;
-
- if (fatal_signal_pending(current))
- break;
- continue;
- }
-
- if (!issue_cond) {
- __submit_discard_cmd(sbi, dc);
- issued++;
- continue;
- }
-
- if (is_idle(sbi)) {
- __submit_discard_cmd(sbi, dc);
- issued++;
- } else {
+ if (dpolicy->io_aware && i < dpolicy->io_aware_gran &&
+ !is_idle(sbi)) {
io_interrupted = true;
+ goto skip;
}
- if (++iter >= DISCARD_ISSUE_RATE)
- goto out;
+ __submit_discard_cmd(sbi, dpolicy, dc);
+ issued++;
+skip:
+ if (++iter >= dpolicy->max_requests)
+ break;
}
- if (list_empty(pend_list) && dcc->pend_list_tag[i] & P_TRIM)
- dcc->pend_list_tag[i] &= (~P_TRIM);
+ blk_finish_plug(&plug);
+ mutex_unlock(&dcc->cmd_lock);
+
+ if (iter >= dpolicy->max_requests)
+ break;
}
-out:
- blk_finish_plug(&plug);
- mutex_unlock(&dcc->cmd_lock);
if (!issued && io_interrupted)
issued = -1;
@@ -1193,12 +1314,13 @@ out:
return issued;
}
-static void __drop_discard_cmd(struct f2fs_sb_info *sbi)
+static bool __drop_discard_cmd(struct f2fs_sb_info *sbi)
{
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
struct list_head *pend_list;
struct discard_cmd *dc, *tmp;
int i;
+ bool dropped = false;
mutex_lock(&dcc->cmd_lock);
for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
@@ -1206,39 +1328,58 @@ static void __drop_discard_cmd(struct f2fs_sb_info *sbi)
list_for_each_entry_safe(dc, tmp, pend_list, list) {
f2fs_bug_on(sbi, dc->state != D_PREP);
__remove_discard_cmd(sbi, dc);
+ dropped = true;
}
}
mutex_unlock(&dcc->cmd_lock);
+
+ return dropped;
}
-static void __wait_one_discard_bio(struct f2fs_sb_info *sbi,
+static unsigned int __wait_one_discard_bio(struct f2fs_sb_info *sbi,
struct discard_cmd *dc)
{
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
+ unsigned int len = 0;
wait_for_completion_io(&dc->wait);
mutex_lock(&dcc->cmd_lock);
f2fs_bug_on(sbi, dc->state != D_DONE);
dc->ref--;
- if (!dc->ref)
+ if (!dc->ref) {
+ if (!dc->error)
+ len = dc->len;
__remove_discard_cmd(sbi, dc);
+ }
mutex_unlock(&dcc->cmd_lock);
+
+ return len;
}
-static void __wait_discard_cmd(struct f2fs_sb_info *sbi, bool wait_cond)
+static unsigned int __wait_discard_cmd_range(struct f2fs_sb_info *sbi,
+ struct discard_policy *dpolicy,
+ block_t start, block_t end)
{
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
- struct list_head *wait_list = &(dcc->wait_list);
+ struct list_head *wait_list = (dpolicy->type == DPOLICY_FSTRIM) ?
+ &(dcc->fstrim_list) : &(dcc->wait_list);
struct discard_cmd *dc, *tmp;
bool need_wait;
+ unsigned int trimmed = 0;
next:
need_wait = false;
mutex_lock(&dcc->cmd_lock);
list_for_each_entry_safe(dc, tmp, wait_list, list) {
- if (!wait_cond || (dc->state == D_DONE && !dc->ref)) {
+ if (dc->lstart + dc->len <= start || end <= dc->lstart)
+ continue;
+ if (dc->len < dpolicy->granularity)
+ continue;
+ if (dc->state == D_DONE && !dc->ref) {
wait_for_completion_io(&dc->wait);
+ if (!dc->error)
+ trimmed += dc->len;
__remove_discard_cmd(sbi, dc);
} else {
dc->ref++;
@@ -1249,9 +1390,17 @@ next:
mutex_unlock(&dcc->cmd_lock);
if (need_wait) {
- __wait_one_discard_bio(sbi, dc);
+ trimmed += __wait_one_discard_bio(sbi, dc);
goto next;
}
+
+ return trimmed;
+}
+
+static void __wait_all_discard_cmd(struct f2fs_sb_info *sbi,
+ struct discard_policy *dpolicy)
+{
+ __wait_discard_cmd_range(sbi, dpolicy, 0, UINT_MAX);
}
/* This should be covered by global mutex, &sit_i->sentry_lock */
@@ -1289,23 +1438,19 @@ void stop_discard_thread(struct f2fs_sb_info *sbi)
}
}
-/* This comes from f2fs_put_super and f2fs_trim_fs */
-void f2fs_wait_discard_bios(struct f2fs_sb_info *sbi, bool umount)
-{
- __issue_discard_cmd(sbi, false);
- __drop_discard_cmd(sbi);
- __wait_discard_cmd(sbi, !umount);
-}
-
-static void mark_discard_range_all(struct f2fs_sb_info *sbi)
+/* This comes from f2fs_put_super */
+bool f2fs_wait_discard_bios(struct f2fs_sb_info *sbi)
{
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
- int i;
+ struct discard_policy dpolicy;
+ bool dropped;
- mutex_lock(&dcc->cmd_lock);
- for (i = 0; i < MAX_PLIST_NUM; i++)
- dcc->pend_list_tag[i] |= P_TRIM;
- mutex_unlock(&dcc->cmd_lock);
+ init_discard_policy(&dpolicy, DPOLICY_UMOUNT, dcc->discard_granularity);
+ __issue_discard_cmd(sbi, &dpolicy);
+ dropped = __drop_discard_cmd(sbi);
+ __wait_all_discard_cmd(sbi, &dpolicy);
+
+ return dropped;
}
static int issue_discard_thread(void *data)
@@ -1313,12 +1458,16 @@ static int issue_discard_thread(void *data)
struct f2fs_sb_info *sbi = data;
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
wait_queue_head_t *q = &dcc->discard_wait_queue;
+ struct discard_policy dpolicy;
unsigned int wait_ms = DEF_MIN_DISCARD_ISSUE_TIME;
int issued;
set_freezable();
do {
+ init_discard_policy(&dpolicy, DPOLICY_BG,
+ dcc->discard_granularity);
+
wait_event_interruptible_timeout(*q,
kthread_should_stop() || freezing(current) ||
dcc->discard_wake,
@@ -1331,17 +1480,18 @@ static int issue_discard_thread(void *data)
if (dcc->discard_wake) {
dcc->discard_wake = 0;
if (sbi->gc_thread && sbi->gc_thread->gc_urgent)
- mark_discard_range_all(sbi);
+ init_discard_policy(&dpolicy,
+ DPOLICY_FORCE, 1);
}
sb_start_intwrite(sbi->sb);
- issued = __issue_discard_cmd(sbi, true);
+ issued = __issue_discard_cmd(sbi, &dpolicy);
if (issued) {
- __wait_discard_cmd(sbi, true);
- wait_ms = DEF_MIN_DISCARD_ISSUE_TIME;
+ __wait_all_discard_cmd(sbi, &dpolicy);
+ wait_ms = dpolicy.min_interval;
} else {
- wait_ms = DEF_MAX_DISCARD_ISSUE_TIME;
+ wait_ms = dpolicy.max_interval;
}
sb_end_intwrite(sbi->sb);
@@ -1605,7 +1755,6 @@ find_next:
f2fs_issue_discard(sbi, entry->start_blkaddr + cur_pos,
len);
- cpc->trimmed += len;
total_len += len;
} else {
next_pos = find_next_bit_le(entry->discard_map,
@@ -1626,6 +1775,37 @@ skip:
wake_up_discard_thread(sbi, false);
}
+void init_discard_policy(struct discard_policy *dpolicy,
+ int discard_type, unsigned int granularity)
+{
+ /* common policy */
+ dpolicy->type = discard_type;
+ dpolicy->sync = true;
+ dpolicy->granularity = granularity;
+
+ if (discard_type == DPOLICY_BG) {
+ dpolicy->min_interval = DEF_MIN_DISCARD_ISSUE_TIME;
+ dpolicy->max_interval = DEF_MAX_DISCARD_ISSUE_TIME;
+ dpolicy->max_requests = DEF_MAX_DISCARD_REQUEST;
+ dpolicy->io_aware_gran = MAX_PLIST_NUM;
+ dpolicy->io_aware = true;
+ } else if (discard_type == DPOLICY_FORCE) {
+ dpolicy->min_interval = DEF_MIN_DISCARD_ISSUE_TIME;
+ dpolicy->max_interval = DEF_MAX_DISCARD_ISSUE_TIME;
+ dpolicy->max_requests = DEF_MAX_DISCARD_REQUEST;
+ dpolicy->io_aware_gran = MAX_PLIST_NUM;
+ dpolicy->io_aware = true;
+ } else if (discard_type == DPOLICY_FSTRIM) {
+ dpolicy->max_requests = DEF_MAX_DISCARD_REQUEST;
+ dpolicy->io_aware_gran = MAX_PLIST_NUM;
+ dpolicy->io_aware = false;
+ } else if (discard_type == DPOLICY_UMOUNT) {
+ dpolicy->max_requests = DEF_MAX_DISCARD_REQUEST;
+ dpolicy->io_aware_gran = MAX_PLIST_NUM;
+ dpolicy->io_aware = false;
+ }
+}
+
static int create_discard_cmd_control(struct f2fs_sb_info *sbi)
{
dev_t dev = sbi->sb->s_bdev->bd_dev;
@@ -1643,12 +1823,10 @@ static int create_discard_cmd_control(struct f2fs_sb_info *sbi)
dcc->discard_granularity = DEFAULT_DISCARD_GRANULARITY;
INIT_LIST_HEAD(&dcc->entry_list);
- for (i = 0; i < MAX_PLIST_NUM; i++) {
+ for (i = 0; i < MAX_PLIST_NUM; i++)
INIT_LIST_HEAD(&dcc->pend_list[i]);
- if (i >= dcc->discard_granularity - 1)
- dcc->pend_list_tag[i] |= P_ACTIVE;
- }
INIT_LIST_HEAD(&dcc->wait_list);
+ INIT_LIST_HEAD(&dcc->fstrim_list);
mutex_init(&dcc->cmd_lock);
atomic_set(&dcc->issued_discard, 0);
atomic_set(&dcc->issing_discard, 0);
@@ -1796,16 +1974,6 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
get_sec_entry(sbi, segno)->valid_blocks += del;
}
-void refresh_sit_entry(struct f2fs_sb_info *sbi, block_t old, block_t new)
-{
- update_sit_entry(sbi, new, 1);
- if (GET_SEGNO(sbi, old) != NULL_SEGNO)
- update_sit_entry(sbi, old, -1);
-
- locate_dirty_segment(sbi, GET_SEGNO(sbi, old));
- locate_dirty_segment(sbi, GET_SEGNO(sbi, new));
-}
-
void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
{
unsigned int segno = GET_SEGNO(sbi, addr);
@@ -1816,14 +1984,14 @@ void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
return;
/* add it into sit main buffer */
- mutex_lock(&sit_i->sentry_lock);
+ down_write(&sit_i->sentry_lock);
update_sit_entry(sbi, addr, -1);
/* add it into dirty seglist */
locate_dirty_segment(sbi, segno);
- mutex_unlock(&sit_i->sentry_lock);
+ up_write(&sit_i->sentry_lock);
}
bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr)
@@ -1836,7 +2004,7 @@ bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr)
if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR)
return true;
- mutex_lock(&sit_i->sentry_lock);
+ down_read(&sit_i->sentry_lock);
segno = GET_SEGNO(sbi, blkaddr);
se = get_seg_entry(sbi, segno);
@@ -1845,7 +2013,7 @@ bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr)
if (f2fs_test_bit(offset, se->ckpt_valid_map))
is_cp = true;
- mutex_unlock(&sit_i->sentry_lock);
+ up_read(&sit_i->sentry_lock);
return is_cp;
}
@@ -1903,12 +2071,8 @@ struct page *get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno)
void update_meta_page(struct f2fs_sb_info *sbi, void *src, block_t blk_addr)
{
struct page *page = grab_meta_page(sbi, blk_addr);
- void *dst = page_address(page);
- if (src)
- memcpy(dst, src, PAGE_SIZE);
- else
- memset(dst, 0, PAGE_SIZE);
+ memcpy(page_address(page), src, PAGE_SIZE);
set_page_dirty(page);
f2fs_put_page(page, 1);
}
@@ -2007,7 +2171,6 @@ find_other_zone:
}
secno = left_start;
skip_left:
- hint = secno;
segno = GET_SEG_FROM_SEC(sbi, secno);
zoneno = GET_ZONE_FROM_SEC(sbi, secno);
@@ -2242,12 +2405,16 @@ void allocate_new_segments(struct f2fs_sb_info *sbi)
unsigned int old_segno;
int i;
+ down_write(&SIT_I(sbi)->sentry_lock);
+
for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
curseg = CURSEG_I(sbi, i);
old_segno = curseg->segno;
SIT_I(sbi)->s_ops->allocate_segment(sbi, i, true);
locate_dirty_segment(sbi, old_segno);
}
+
+ up_write(&SIT_I(sbi)->sentry_lock);
}
static const struct segment_allocation default_salloc_ops = {
@@ -2259,14 +2426,14 @@ bool exist_trim_candidates(struct f2fs_sb_info *sbi, struct cp_control *cpc)
__u64 trim_start = cpc->trim_start;
bool has_candidate = false;
- mutex_lock(&SIT_I(sbi)->sentry_lock);
+ down_write(&SIT_I(sbi)->sentry_lock);
for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++) {
if (add_discard_addrs(sbi, cpc, true)) {
has_candidate = true;
break;
}
}
- mutex_unlock(&SIT_I(sbi)->sentry_lock);
+ up_write(&SIT_I(sbi)->sentry_lock);
cpc->trim_start = trim_start;
return has_candidate;
@@ -2276,14 +2443,16 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
{
__u64 start = F2FS_BYTES_TO_BLK(range->start);
__u64 end = start + F2FS_BYTES_TO_BLK(range->len) - 1;
- unsigned int start_segno, end_segno;
+ unsigned int start_segno, end_segno, cur_segno;
+ block_t start_block, end_block;
struct cp_control cpc;
+ struct discard_policy dpolicy;
+ unsigned long long trimmed = 0;
int err = 0;
if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize)
return -EINVAL;
- cpc.trimmed = 0;
if (end <= MAIN_BLKADDR(sbi))
goto out;
@@ -2297,12 +2466,14 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start);
end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 :
GET_SEGNO(sbi, end);
+
cpc.reason = CP_DISCARD;
cpc.trim_minlen = max_t(__u64, 1, F2FS_BYTES_TO_BLK(range->minlen));
/* do checkpoint to issue discard commands safely */
- for (; start_segno <= end_segno; start_segno = cpc.trim_end + 1) {
- cpc.trim_start = start_segno;
+ for (cur_segno = start_segno; cur_segno <= end_segno;
+ cur_segno = cpc.trim_end + 1) {
+ cpc.trim_start = cur_segno;
if (sbi->discard_blks == 0)
break;
@@ -2310,7 +2481,7 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
cpc.trim_end = end_segno;
else
cpc.trim_end = min_t(unsigned int,
- rounddown(start_segno +
+ rounddown(cur_segno +
BATCHED_TRIM_SEGMENTS(sbi),
sbi->segs_per_sec) - 1, end_segno);
@@ -2322,11 +2493,16 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
schedule();
}
- /* It's time to issue all the filed discards */
- mark_discard_range_all(sbi);
- f2fs_wait_discard_bios(sbi, false);
+
+ start_block = START_BLOCK(sbi, start_segno);
+ end_block = START_BLOCK(sbi, min(cur_segno, end_segno) + 1);
+
+ init_discard_policy(&dpolicy, DPOLICY_FSTRIM, cpc.trim_minlen);
+ __issue_discard_cmd_range(sbi, &dpolicy, start_block, end_block);
+ trimmed = __wait_discard_cmd_range(sbi, &dpolicy,
+ start_block, end_block);
out:
- range->len = F2FS_BLK_TO_BYTES(cpc.trimmed);
+ range->len = F2FS_BLK_TO_BYTES(trimmed);
return err;
}
@@ -2338,6 +2514,20 @@ static bool __has_curseg_space(struct f2fs_sb_info *sbi, int type)
return false;
}
+#if 0
+int rw_hint_to_seg_type(enum rw_hint hint)
+{
+ switch (hint) {
+ case WRITE_LIFE_SHORT:
+ return CURSEG_HOT_DATA;
+ case WRITE_LIFE_EXTREME:
+ return CURSEG_COLD_DATA;
+ default:
+ return CURSEG_WARM_DATA;
+ }
+}
+#endif
+
static int __get_segment_type_2(struct f2fs_io_info *fio)
{
if (fio->type == DATA)
@@ -2372,6 +2562,7 @@ static int __get_segment_type_6(struct f2fs_io_info *fio)
return CURSEG_COLD_DATA;
if (is_inode_flag_set(inode, FI_HOT_DATA))
return CURSEG_HOT_DATA;
+ /* rw_hint_to_seg_type(inode->i_write_hint); */
return CURSEG_WARM_DATA;
} else {
if (IS_DNODE(fio->page))
@@ -2416,8 +2607,10 @@ void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
struct sit_info *sit_i = SIT_I(sbi);
struct curseg_info *curseg = CURSEG_I(sbi, type);
+ down_read(&SM_I(sbi)->curseg_lock);
+
mutex_lock(&curseg->curseg_mutex);
- mutex_lock(&sit_i->sentry_lock);
+ down_write(&sit_i->sentry_lock);
*new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
@@ -2434,15 +2627,26 @@ void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
stat_inc_block_count(sbi, curseg);
+ /*
+ * SIT information should be updated before segment allocation,
+ * since SSR needs latest valid block information.
+ */
+ update_sit_entry(sbi, *new_blkaddr, 1);
+ if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
+ update_sit_entry(sbi, old_blkaddr, -1);
+
if (!__has_curseg_space(sbi, type))
sit_i->s_ops->allocate_segment(sbi, type, false);
+
/*
- * SIT information should be updated after segment allocation,
- * since we need to keep dirty segments precisely under SSR.
+ * segment dirty status should be updated after segment allocation,
+ * so we just need to update status only one time after previous
+ * segment being closed.
*/
- refresh_sit_entry(sbi, old_blkaddr, *new_blkaddr);
+ locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
+ locate_dirty_segment(sbi, GET_SEGNO(sbi, *new_blkaddr));
- mutex_unlock(&sit_i->sentry_lock);
+ up_write(&sit_i->sentry_lock);
if (page && IS_NODESEG(type)) {
fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg));
@@ -2462,6 +2666,29 @@ void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
}
mutex_unlock(&curseg->curseg_mutex);
+
+ up_read(&SM_I(sbi)->curseg_lock);
+}
+
+static void update_device_state(struct f2fs_io_info *fio)
+{
+ struct f2fs_sb_info *sbi = fio->sbi;
+ unsigned int devidx;
+
+ if (!sbi->s_ndevs)
+ return;
+
+ devidx = f2fs_target_device_index(sbi, fio->new_blkaddr);
+
+ /* update device state for fsync */
+ set_dirty_device(sbi, fio->ino, devidx, FLUSH_INO);
+
+ /* update device state for checkpoint */
+ if (!f2fs_test_bit(devidx, (char *)&sbi->dirty_device)) {
+ spin_lock(&sbi->dev_lock);
+ f2fs_set_bit(devidx, (char *)&sbi->dirty_device);
+ spin_unlock(&sbi->dev_lock);
+ }
}
static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
@@ -2478,6 +2705,8 @@ reallocate:
if (err == -EAGAIN) {
fio->old_blkaddr = fio->new_blkaddr;
goto reallocate;
+ } else if (!err) {
+ update_device_state(fio);
}
}
@@ -2538,12 +2767,26 @@ int rewrite_data_page(struct f2fs_io_info *fio)
stat_inc_inplace_blocks(fio->sbi);
err = f2fs_submit_page_bio(fio);
+ if (!err)
+ update_device_state(fio);
f2fs_update_iostat(fio->sbi, fio->io_type, F2FS_BLKSIZE);
return err;
}
+static inline int __f2fs_get_curseg(struct f2fs_sb_info *sbi,
+ unsigned int segno)
+{
+ int i;
+
+ for (i = CURSEG_HOT_DATA; i < NO_CHECK_TYPE; i++) {
+ if (CURSEG_I(sbi, i)->segno == segno)
+ break;
+ }
+ return i;
+}
+
void __f2fs_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
block_t old_blkaddr, block_t new_blkaddr,
bool recover_curseg, bool recover_newaddr)
@@ -2559,6 +2802,8 @@ void __f2fs_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
se = get_seg_entry(sbi, segno);
type = se->type;
+ down_write(&SM_I(sbi)->curseg_lock);
+
if (!recover_curseg) {
/* for recovery flow */
if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) {
@@ -2568,14 +2813,19 @@ void __f2fs_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
type = CURSEG_WARM_DATA;
}
} else {
- if (!IS_CURSEG(sbi, segno))
+ if (IS_CURSEG(sbi, segno)) {
+ /* se->type is volatile as SSR allocation */
+ type = __f2fs_get_curseg(sbi, segno);
+ f2fs_bug_on(sbi, type == NO_CHECK_TYPE);
+ } else {
type = CURSEG_WARM_DATA;
+ }
}
curseg = CURSEG_I(sbi, type);
mutex_lock(&curseg->curseg_mutex);
- mutex_lock(&sit_i->sentry_lock);
+ down_write(&sit_i->sentry_lock);
old_cursegno = curseg->segno;
old_blkoff = curseg->next_blkoff;
@@ -2607,8 +2857,9 @@ void __f2fs_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
curseg->next_blkoff = old_blkoff;
}
- mutex_unlock(&sit_i->sentry_lock);
+ up_write(&sit_i->sentry_lock);
mutex_unlock(&curseg->curseg_mutex);
+ up_write(&SM_I(sbi)->curseg_lock);
}
void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
@@ -3062,7 +3313,7 @@ void flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
bool to_journal = true;
struct seg_entry *se;
- mutex_lock(&sit_i->sentry_lock);
+ down_write(&sit_i->sentry_lock);
if (!sit_i->dirty_sentries)
goto out;
@@ -3156,7 +3407,7 @@ out:
cpc->trim_start = trim_start;
}
- mutex_unlock(&sit_i->sentry_lock);
+ up_write(&sit_i->sentry_lock);
set_prefree_as_free_segments(sbi);
}
@@ -3249,7 +3500,7 @@ static int build_sit_info(struct f2fs_sb_info *sbi)
sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time);
sit_i->mounted_time = CURRENT_TIME_SEC.tv_sec;
- mutex_init(&sit_i->sentry_lock);
+ init_rwsem(&sit_i->sentry_lock);
return 0;
}
@@ -3490,7 +3741,7 @@ static void init_min_max_mtime(struct f2fs_sb_info *sbi)
struct sit_info *sit_i = SIT_I(sbi);
unsigned int segno;
- mutex_lock(&sit_i->sentry_lock);
+ down_write(&sit_i->sentry_lock);
sit_i->min_mtime = LLONG_MAX;
@@ -3507,7 +3758,7 @@ static void init_min_max_mtime(struct f2fs_sb_info *sbi)
sit_i->min_mtime = mtime;
}
sit_i->max_mtime = get_mtime(sbi);
- mutex_unlock(&sit_i->sentry_lock);
+ up_write(&sit_i->sentry_lock);
}
int build_segment_manager(struct f2fs_sb_info *sbi)
@@ -3540,11 +3791,14 @@ int build_segment_manager(struct f2fs_sb_info *sbi)
sm_info->min_ipu_util = DEF_MIN_IPU_UTIL;
sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS;
sm_info->min_hot_blocks = DEF_MIN_HOT_BLOCKS;
+ sm_info->min_ssr_sections = reserved_sections(sbi);
sm_info->trim_sections = DEF_BATCHED_TRIM_SECTIONS;
INIT_LIST_HEAD(&sm_info->sit_entry_set);
+ init_rwsem(&sm_info->curseg_lock);
+
if (!f2fs_readonly(sbi->sb)) {
err = create_flush_cmd_control(sbi);
if (err)
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index ffa11274b0ce..5264b6ed120c 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -231,7 +231,7 @@ struct sit_info {
unsigned long *dirty_sentries_bitmap; /* bitmap for dirty sentries */
unsigned int dirty_sentries; /* # of dirty sentries */
unsigned int sents_per_block; /* # of SIT entries per block */
- struct mutex sentry_lock; /* to protect SIT cache */
+ struct rw_semaphore sentry_lock; /* to protect SIT cache */
struct seg_entry *sentries; /* SIT segment-level cache */
struct sec_entry *sec_entries; /* SIT section-level cache */
@@ -497,6 +497,33 @@ static inline int reserved_sections(struct f2fs_sb_info *sbi)
return GET_SEC_FROM_SEG(sbi, (unsigned int)reserved_segments(sbi));
}
+static inline bool has_curseg_enough_space(struct f2fs_sb_info *sbi)
+{
+ unsigned int node_blocks = get_pages(sbi, F2FS_DIRTY_NODES) +
+ get_pages(sbi, F2FS_DIRTY_DENTS);
+ unsigned int dent_blocks = get_pages(sbi, F2FS_DIRTY_DENTS);
+ unsigned int segno, left_blocks;
+ int i;
+
+ /* check current node segment */
+ for (i = CURSEG_HOT_NODE; i <= CURSEG_COLD_NODE; i++) {
+ segno = CURSEG_I(sbi, i)->segno;
+ left_blocks = sbi->blocks_per_seg -
+ get_seg_entry(sbi, segno)->ckpt_valid_blocks;
+
+ if (node_blocks > left_blocks)
+ return false;
+ }
+
+ /* check current data segment */
+ segno = CURSEG_I(sbi, CURSEG_HOT_DATA)->segno;
+ left_blocks = sbi->blocks_per_seg -
+ get_seg_entry(sbi, segno)->ckpt_valid_blocks;
+ if (dent_blocks > left_blocks)
+ return false;
+ return true;
+}
+
static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi,
int freed, int needed)
{
@@ -507,6 +534,9 @@ static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi,
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
return false;
+ if (free_sections(sbi) + freed == reserved_sections(sbi) + needed &&
+ has_curseg_enough_space(sbi))
+ return false;
return (free_sections(sbi) + freed) <=
(node_secs + 2 * dent_secs + imeta_secs +
reserved_sections(sbi) + needed);
@@ -730,7 +760,7 @@ static inline block_t sum_blk_addr(struct f2fs_sb_info *sbi, int base, int type)
static inline bool no_fggc_candidate(struct f2fs_sb_info *sbi,
unsigned int secno)
{
- if (get_valid_blocks(sbi, GET_SEG_FROM_SEC(sbi, secno), true) >=
+ if (get_valid_blocks(sbi, GET_SEG_FROM_SEC(sbi, secno), true) >
sbi->fggc_threshold)
return true;
return false;
@@ -795,8 +825,9 @@ static inline void wake_up_discard_thread(struct f2fs_sb_info *sbi, bool force)
goto wake_up;
mutex_lock(&dcc->cmd_lock);
- for (i = MAX_PLIST_NUM - 1;
- i >= 0 && plist_issue(dcc->pend_list_tag[i]); i--) {
+ for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
+ if (i + 1 < dcc->discard_granularity)
+ break;
if (!list_empty(&dcc->pend_list[i])) {
wakeup = true;
break;
diff --git a/fs/f2fs/shrinker.c b/fs/f2fs/shrinker.c
index 5c60fc28ec75..0b5664a1a6cc 100644
--- a/fs/f2fs/shrinker.c
+++ b/fs/f2fs/shrinker.c
@@ -28,7 +28,7 @@ static unsigned long __count_nat_entries(struct f2fs_sb_info *sbi)
static unsigned long __count_free_nids(struct f2fs_sb_info *sbi)
{
- long count = NM_I(sbi)->nid_cnt[FREE_NID_LIST] - MAX_FREE_NIDS;
+ long count = NM_I(sbi)->nid_cnt[FREE_NID] - MAX_FREE_NIDS;
return count > 0 ? count : 0;
}
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 482bb0333806..76e2f1518224 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -44,6 +44,8 @@ static struct kmem_cache *f2fs_inode_cachep;
char *fault_name[FAULT_MAX] = {
[FAULT_KMALLOC] = "kmalloc",
[FAULT_PAGE_ALLOC] = "page alloc",
+ [FAULT_PAGE_GET] = "page get",
+ [FAULT_ALLOC_BIO] = "alloc bio",
[FAULT_ALLOC_NID] = "alloc nid",
[FAULT_ORPHAN] = "orphan",
[FAULT_BLOCK] = "no more block",
@@ -92,6 +94,7 @@ enum {
Opt_disable_ext_identify,
Opt_inline_xattr,
Opt_noinline_xattr,
+ Opt_inline_xattr_size,
Opt_inline_data,
Opt_inline_dentry,
Opt_noinline_dentry,
@@ -141,6 +144,7 @@ static match_table_t f2fs_tokens = {
{Opt_disable_ext_identify, "disable_ext_identify"},
{Opt_inline_xattr, "inline_xattr"},
{Opt_noinline_xattr, "noinline_xattr"},
+ {Opt_inline_xattr_size, "inline_xattr_size=%u"},
{Opt_inline_data, "inline_data"},
{Opt_inline_dentry, "inline_dentry"},
{Opt_noinline_dentry, "noinline_dentry"},
@@ -209,6 +213,12 @@ static int f2fs_set_qf_name(struct super_block *sb, int qtype,
"quota options when quota turned on");
return -EINVAL;
}
+ if (f2fs_sb_has_quota_ino(sb)) {
+ f2fs_msg(sb, KERN_INFO,
+ "QUOTA feature is enabled, so ignore qf_name");
+ return 0;
+ }
+
qname = match_strdup(args);
if (!qname) {
f2fs_msg(sb, KERN_ERR,
@@ -287,6 +297,18 @@ static int f2fs_check_quota_options(struct f2fs_sb_info *sbi)
return -1;
}
}
+
+ if (f2fs_sb_has_quota_ino(sbi->sb) && sbi->s_jquota_fmt) {
+ f2fs_msg(sbi->sb, KERN_INFO,
+ "QUOTA feature is enabled, so ignore jquota_fmt");
+ sbi->s_jquota_fmt = 0;
+ }
+ if (f2fs_sb_has_quota_ino(sbi->sb) && sb_rdonly(sbi->sb)) {
+ f2fs_msg(sbi->sb, KERN_INFO,
+ "Filesystem with quota feature cannot be mounted RDWR "
+ "without CONFIG_QUOTA");
+ return -1;
+ }
return 0;
}
#endif
@@ -383,6 +405,12 @@ static int parse_options(struct super_block *sb, char *options)
case Opt_noinline_xattr:
clear_opt(sbi, INLINE_XATTR);
break;
+ case Opt_inline_xattr_size:
+ if (args->from && match_int(args, &arg))
+ return -EINVAL;
+ set_opt(sbi, INLINE_XATTR_SIZE);
+ sbi->inline_xattr_size = arg;
+ break;
#else
case Opt_user_xattr:
f2fs_msg(sb, KERN_INFO,
@@ -604,6 +632,24 @@ static int parse_options(struct super_block *sb, char *options)
F2FS_IO_SIZE_KB(sbi));
return -EINVAL;
}
+
+ if (test_opt(sbi, INLINE_XATTR_SIZE)) {
+ if (!test_opt(sbi, INLINE_XATTR)) {
+ f2fs_msg(sb, KERN_ERR,
+ "inline_xattr_size option should be "
+ "set with inline_xattr option");
+ return -EINVAL;
+ }
+ if (!sbi->inline_xattr_size ||
+ sbi->inline_xattr_size >= DEF_ADDRS_PER_INODE -
+ F2FS_TOTAL_EXTRA_ATTR_SIZE -
+ DEF_INLINE_RESERVED_SIZE -
+ DEF_MIN_INLINE_SIZE) {
+ f2fs_msg(sb, KERN_ERR,
+ "inline xattr size is out of range");
+ return -EINVAL;
+ }
+ }
return 0;
}
@@ -618,13 +664,13 @@ static struct inode *f2fs_alloc_inode(struct super_block *sb)
init_once((void *) fi);
/* Initialize f2fs-specific inode info */
- fi->vfs_inode.i_version = 1;
atomic_set(&fi->dirty_pages, 0);
fi->i_current_depth = 1;
fi->i_advise = 0;
init_rwsem(&fi->i_sem);
INIT_LIST_HEAD(&fi->dirty_list);
INIT_LIST_HEAD(&fi->gdirty_list);
+ INIT_LIST_HEAD(&fi->inmem_ilist);
INIT_LIST_HEAD(&fi->inmem_pages);
mutex_init(&fi->inmem_lock);
init_rwsem(&fi->dio_rwsem[READ]);
@@ -673,7 +719,6 @@ static int f2fs_drop_inode(struct inode *inode)
sb_end_intwrite(inode->i_sb);
- fscrypt_put_encryption_info(inode, NULL);
spin_lock(&inode->i_lock);
atomic_dec(&inode->i_count);
}
@@ -781,6 +826,7 @@ static void f2fs_put_super(struct super_block *sb)
{
struct f2fs_sb_info *sbi = F2FS_SB(sb);
int i;
+ bool dropped;
f2fs_quota_off_umount(sb);
@@ -801,9 +847,9 @@ static void f2fs_put_super(struct super_block *sb)
}
/* be sure to wait for any on-going discard commands */
- f2fs_wait_discard_bios(sbi, true);
+ dropped = f2fs_wait_discard_bios(sbi);
- if (f2fs_discard_en(sbi) && !sbi->discard_blks) {
+ if (f2fs_discard_en(sbi) && !sbi->discard_blks && !dropped) {
struct cp_control cpc = {
.reason = CP_UMOUNT | CP_TRIMMED,
};
@@ -859,6 +905,9 @@ int f2fs_sync_fs(struct super_block *sb, int sync)
struct f2fs_sb_info *sbi = F2FS_SB(sb);
int err = 0;
+ if (unlikely(f2fs_cp_error(sbi)))
+ return 0;
+
trace_f2fs_sync_fs(sb, sync);
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
@@ -958,7 +1007,7 @@ static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
buf->f_blocks = total_count - start_count;
buf->f_bfree = user_block_count - valid_user_blocks(sbi) + ovp_count;
buf->f_bavail = user_block_count - valid_user_blocks(sbi) -
- sbi->reserved_blocks;
+ sbi->current_reserved_blocks;
avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
@@ -1047,6 +1096,9 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
seq_puts(seq, ",inline_xattr");
else
seq_puts(seq, ",noinline_xattr");
+ if (test_opt(sbi, INLINE_XATTR_SIZE))
+ seq_printf(seq, ",inline_xattr_size=%u",
+ sbi->inline_xattr_size);
#endif
#ifdef CONFIG_F2FS_FS_POSIX_ACL
if (test_opt(sbi, POSIX_ACL))
@@ -1109,6 +1161,7 @@ static void default_options(struct f2fs_sb_info *sbi)
{
/* init some FS parameters */
sbi->active_logs = NR_CURSEG_TYPE;
+ sbi->inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS;
set_opt(sbi, BG_GC);
set_opt(sbi, INLINE_XATTR);
@@ -1137,6 +1190,9 @@ static void default_options(struct f2fs_sb_info *sbi)
#endif
}
+#ifdef CONFIG_QUOTA
+static int f2fs_enable_quotas(struct super_block *sb);
+#endif
static int f2fs_remount(struct super_block *sb, int *flags, char *data)
{
struct f2fs_sb_info *sbi = F2FS_SB(sb);
@@ -1203,6 +1259,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
if (f2fs_readonly(sb) && (*flags & MS_RDONLY))
goto skip;
+#ifdef CONFIG_QUOTA
if (!f2fs_readonly(sb) && (*flags & MS_RDONLY)) {
err = dquot_suspend(sb, -1);
if (err < 0)
@@ -1210,9 +1267,15 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
} else {
/* dquot_resume needs RW */
sb->s_flags &= ~MS_RDONLY;
- dquot_resume(sb, -1);
+ if (sb_any_quota_suspended(sb)) {
+ dquot_resume(sb, -1);
+ } else if (f2fs_sb_has_quota_ino(sb)) {
+ err = f2fs_enable_quotas(sb);
+ if (err)
+ goto restore_opts;
+ }
}
-
+#endif
/* disallow enable/disable extent_cache dynamically */
if (no_extent_cache == !!test_opt(sbi, EXTENT_CACHE)) {
err = -EINVAL;
@@ -1321,8 +1384,13 @@ static ssize_t f2fs_quota_read(struct super_block *sb, int type, char *data,
tocopy = min_t(unsigned long, sb->s_blocksize - offset, toread);
repeat:
page = read_mapping_page(mapping, blkidx, NULL);
- if (IS_ERR(page))
+ if (IS_ERR(page)) {
+ if (PTR_ERR(page) == -ENOMEM) {
+ congestion_wait(BLK_RW_ASYNC, HZ/50);
+ goto repeat;
+ }
return PTR_ERR(page);
+ }
lock_page(page);
@@ -1365,11 +1433,16 @@ static ssize_t f2fs_quota_write(struct super_block *sb, int type,
while (towrite > 0) {
tocopy = min_t(unsigned long, sb->s_blocksize - offset,
towrite);
-
+retry:
err = a_ops->write_begin(NULL, mapping, off, tocopy, 0,
&page, NULL);
- if (unlikely(err))
+ if (unlikely(err)) {
+ if (err == -ENOMEM) {
+ congestion_wait(BLK_RW_ASYNC, HZ/50);
+ goto retry;
+ }
break;
+ }
kaddr = kmap_atomic(page);
memcpy(kaddr + offset, data, tocopy);
@@ -1386,8 +1459,7 @@ static ssize_t f2fs_quota_write(struct super_block *sb, int type,
}
if (len == towrite)
- return 0;
- inode->i_version++;
+ return err;
inode->i_mtime = inode->i_ctime = current_time(inode);
f2fs_mark_inode_dirty_sync(inode, false);
return len - towrite;
@@ -1409,19 +1481,91 @@ static int f2fs_quota_on_mount(struct f2fs_sb_info *sbi, int type)
sbi->s_jquota_fmt, type);
}
-void f2fs_enable_quota_files(struct f2fs_sb_info *sbi)
+int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly)
{
- int i, ret;
+ int enabled = 0;
+ int i, err;
+
+ if (f2fs_sb_has_quota_ino(sbi->sb) && rdonly) {
+ err = f2fs_enable_quotas(sbi->sb);
+ if (err) {
+ f2fs_msg(sbi->sb, KERN_ERR,
+ "Cannot turn on quota_ino: %d", err);
+ return 0;
+ }
+ return 1;
+ }
for (i = 0; i < MAXQUOTAS; i++) {
if (sbi->s_qf_names[i]) {
- ret = f2fs_quota_on_mount(sbi, i);
- if (ret < 0)
- f2fs_msg(sbi->sb, KERN_ERR,
- "Cannot turn on journaled "
- "quota: error %d", ret);
+ err = f2fs_quota_on_mount(sbi, i);
+ if (!err) {
+ enabled = 1;
+ continue;
+ }
+ f2fs_msg(sbi->sb, KERN_ERR,
+ "Cannot turn on quotas: %d on %d", err, i);
+ }
+ }
+ return enabled;
+}
+
+static int f2fs_quota_enable(struct super_block *sb, int type, int format_id,
+ unsigned int flags)
+{
+ struct inode *qf_inode;
+ unsigned long qf_inum;
+ int err;
+
+ BUG_ON(!f2fs_sb_has_quota_ino(sb));
+
+ qf_inum = f2fs_qf_ino(sb, type);
+ if (!qf_inum)
+ return -EPERM;
+
+ qf_inode = f2fs_iget(sb, qf_inum);
+ if (IS_ERR(qf_inode)) {
+ f2fs_msg(sb, KERN_ERR,
+ "Bad quota inode %u:%lu", type, qf_inum);
+ return PTR_ERR(qf_inode);
+ }
+
+ /* Don't account quota for quota files to avoid recursion */
+ qf_inode->i_flags |= S_NOQUOTA;
+ err = dquot_enable(qf_inode, type, format_id, flags);
+ iput(qf_inode);
+ return err;
+}
+
+static int f2fs_enable_quotas(struct super_block *sb)
+{
+ int type, err = 0;
+ unsigned long qf_inum;
+ bool quota_mopt[MAXQUOTAS] = {
+ test_opt(F2FS_SB(sb), USRQUOTA),
+ test_opt(F2FS_SB(sb), GRPQUOTA),
+ test_opt(F2FS_SB(sb), PRJQUOTA),
+ };
+
+ sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE;
+ for (type = 0; type < MAXQUOTAS; type++) {
+ qf_inum = f2fs_qf_ino(sb, type);
+ if (qf_inum) {
+ err = f2fs_quota_enable(sb, type, QFMT_VFS_V1,
+ DQUOT_USAGE_ENABLED |
+ (quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0));
+ if (err) {
+ f2fs_msg(sb, KERN_ERR,
+ "Failed to enable quota tracking "
+ "(type=%d, err=%d). Please run "
+ "fsck to fix.", type, err);
+ for (type--; type >= 0; type--)
+ dquot_quota_off(sb, type);
+ return err;
+ }
}
}
+ return 0;
}
static int f2fs_quota_sync(struct super_block *sb, int type)
@@ -1492,7 +1636,7 @@ static int f2fs_quota_off(struct super_block *sb, int type)
f2fs_quota_sync(sb, type);
err = dquot_quota_off(sb, type);
- if (err)
+ if (err || f2fs_sb_has_quota_ino(sb))
goto out_put;
inode_lock(inode);
@@ -1660,7 +1804,7 @@ static loff_t max_file_blocks(void)
/*
* note: previously, result is equal to (DEF_ADDRS_PER_INODE -
- * F2FS_INLINE_XATTR_ADDRS), but now f2fs try to reserve more
+ * DEFAULT_INLINE_XATTR_ADDRS), but now f2fs try to reserve more
* space in inode.i_addr, it will be more safe to reassign
* result as zero.
*/
@@ -1969,6 +2113,9 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
for (j = HOT; j < NR_TEMP_TYPE; j++)
mutex_init(&sbi->wio_mutex[i][j]);
spin_lock_init(&sbi->cp_lock);
+
+ sbi->dirty_device = 0;
+ spin_lock_init(&sbi->dev_lock);
}
static int init_percpu_info(struct f2fs_sb_info *sbi)
@@ -2323,7 +2470,10 @@ try_onemore:
#ifdef CONFIG_QUOTA
sb->dq_op = &f2fs_quota_operations;
- sb->s_qcop = &f2fs_quotactl_ops;
+ if (f2fs_sb_has_quota_ino(sb))
+ sb->s_qcop = &dquot_quotactl_sysfile_ops;
+ else
+ sb->s_qcop = &f2fs_quotactl_ops;
sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
#endif
@@ -2419,6 +2569,7 @@ try_onemore:
le64_to_cpu(sbi->ckpt->valid_block_count);
sbi->last_valid_block_count = sbi->total_valid_block_count;
sbi->reserved_blocks = 0;
+ sbi->current_reserved_blocks = 0;
for (i = 0; i < NR_INODE_TYPE; i++) {
INIT_LIST_HEAD(&sbi->inode_list[i]);
@@ -2493,10 +2644,24 @@ try_onemore:
if (err)
goto free_root_inode;
+#ifdef CONFIG_QUOTA
+ /*
+ * Turn on quotas which were not enabled for read-only mounts if
+ * filesystem has quota feature, so that they are updated correctly.
+ */
+ if (f2fs_sb_has_quota_ino(sb) && !sb_rdonly(sb)) {
+ err = f2fs_enable_quotas(sb);
+ if (err) {
+ f2fs_msg(sb, KERN_ERR,
+ "Cannot turn on quotas: error %d", err);
+ goto free_sysfs;
+ }
+ }
+#endif
/* if there are nt orphan nodes free them */
err = recover_orphan_inodes(sbi);
if (err)
- goto free_sysfs;
+ goto free_meta;
/* recover fsynced data */
if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) {
@@ -2530,7 +2695,7 @@ try_onemore:
err = -EINVAL;
f2fs_msg(sb, KERN_ERR,
"Need to recover fsync data");
- goto free_sysfs;
+ goto free_meta;
}
}
skip_recovery:
@@ -2564,6 +2729,10 @@ skip_recovery:
return 0;
free_meta:
+#ifdef CONFIG_QUOTA
+ if (f2fs_sb_has_quota_ino(sb) && !sb_rdonly(sb))
+ f2fs_quota_off_umount(sbi->sb);
+#endif
f2fs_sync_inode_meta(sbi);
/*
* Some dirty meta pages can be produced by recover_orphan_inodes()
@@ -2572,7 +2741,9 @@ free_meta:
* falls into an infinite loop in sync_meta_pages().
*/
truncate_inode_pages_final(META_MAPPING(sbi));
+#ifdef CONFIG_QUOTA
free_sysfs:
+#endif
f2fs_unregister_sysfs(sbi);
free_root_inode:
dput(sb->s_root);
diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
index e2c258f717cd..9835348b6e5d 100644
--- a/fs/f2fs/sysfs.c
+++ b/fs/f2fs/sysfs.c
@@ -30,7 +30,7 @@ enum {
FAULT_INFO_RATE, /* struct f2fs_fault_info */
FAULT_INFO_TYPE, /* struct f2fs_fault_info */
#endif
- RESERVED_BLOCKS,
+ RESERVED_BLOCKS, /* struct f2fs_sb_info */
};
struct f2fs_attr {
@@ -63,6 +63,13 @@ static unsigned char *__struct_ptr(struct f2fs_sb_info *sbi, int struct_type)
return NULL;
}
+static ssize_t dirty_segments_show(struct f2fs_attr *a,
+ struct f2fs_sb_info *sbi, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%llu\n",
+ (unsigned long long)(dirty_segments(sbi)));
+}
+
static ssize_t lifetime_write_kbytes_show(struct f2fs_attr *a,
struct f2fs_sb_info *sbi, char *buf)
{
@@ -100,10 +107,22 @@ static ssize_t features_show(struct f2fs_attr *a,
if (f2fs_sb_has_inode_chksum(sb))
len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
len ? ", " : "", "inode_checksum");
+ if (f2fs_sb_has_flexible_inline_xattr(sb))
+ len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len ? ", " : "", "flexible_inline_xattr");
+ if (f2fs_sb_has_quota_ino(sb))
+ len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len ? ", " : "", "quota_ino");
len += snprintf(buf + len, PAGE_SIZE - len, "\n");
return len;
}
+static ssize_t current_reserved_blocks_show(struct f2fs_attr *a,
+ struct f2fs_sb_info *sbi, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", sbi->current_reserved_blocks);
+}
+
static ssize_t f2fs_sbi_show(struct f2fs_attr *a,
struct f2fs_sb_info *sbi, char *buf)
{
@@ -143,34 +162,22 @@ static ssize_t f2fs_sbi_store(struct f2fs_attr *a,
#endif
if (a->struct_type == RESERVED_BLOCKS) {
spin_lock(&sbi->stat_lock);
- if ((unsigned long)sbi->total_valid_block_count + t >
- (unsigned long)sbi->user_block_count) {
+ if (t > (unsigned long)sbi->user_block_count) {
spin_unlock(&sbi->stat_lock);
return -EINVAL;
}
*ui = t;
+ sbi->current_reserved_blocks = min(sbi->reserved_blocks,
+ sbi->user_block_count - valid_user_blocks(sbi));
spin_unlock(&sbi->stat_lock);
return count;
}
if (!strcmp(a->attr.name, "discard_granularity")) {
- struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
- int i;
-
if (t == 0 || t > MAX_PLIST_NUM)
return -EINVAL;
if (t == *ui)
return count;
-
- mutex_lock(&dcc->cmd_lock);
- for (i = 0; i < MAX_PLIST_NUM; i++) {
- if (i >= t - 1)
- dcc->pend_list_tag[i] |= P_ACTIVE;
- else
- dcc->pend_list_tag[i] &= (~P_ACTIVE);
- }
- mutex_unlock(&dcc->cmd_lock);
-
*ui = t;
return count;
}
@@ -222,6 +229,8 @@ enum feat_id {
FEAT_EXTRA_ATTR,
FEAT_PROJECT_QUOTA,
FEAT_INODE_CHECKSUM,
+ FEAT_FLEXIBLE_INLINE_XATTR,
+ FEAT_QUOTA_INO,
};
static ssize_t f2fs_feature_show(struct f2fs_attr *a,
@@ -234,6 +243,8 @@ static ssize_t f2fs_feature_show(struct f2fs_attr *a,
case FEAT_EXTRA_ATTR:
case FEAT_PROJECT_QUOTA:
case FEAT_INODE_CHECKSUM:
+ case FEAT_FLEXIBLE_INLINE_XATTR:
+ case FEAT_QUOTA_INO:
return snprintf(buf, PAGE_SIZE, "supported\n");
}
return 0;
@@ -279,6 +290,7 @@ F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, ipu_policy, ipu_policy);
F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_ipu_util, min_ipu_util);
F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_fsync_blocks, min_fsync_blocks);
F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_hot_blocks, min_hot_blocks);
+F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_ssr_sections, min_ssr_sections);
F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, ram_thresh, ram_thresh);
F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, ra_nid_pages, ra_nid_pages);
F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, dirty_nats_ratio, dirty_nats_ratio);
@@ -291,8 +303,10 @@ F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, iostat_enable, iostat_enable);
F2FS_RW_ATTR(FAULT_INFO_RATE, f2fs_fault_info, inject_rate, inject_rate);
F2FS_RW_ATTR(FAULT_INFO_TYPE, f2fs_fault_info, inject_type, inject_type);
#endif
+F2FS_GENERAL_RO_ATTR(dirty_segments);
F2FS_GENERAL_RO_ATTR(lifetime_write_kbytes);
F2FS_GENERAL_RO_ATTR(features);
+F2FS_GENERAL_RO_ATTR(current_reserved_blocks);
#ifdef CONFIG_F2FS_FS_ENCRYPTION
F2FS_FEATURE_RO_ATTR(encryption, FEAT_CRYPTO);
@@ -304,6 +318,8 @@ F2FS_FEATURE_RO_ATTR(atomic_write, FEAT_ATOMIC_WRITE);
F2FS_FEATURE_RO_ATTR(extra_attr, FEAT_EXTRA_ATTR);
F2FS_FEATURE_RO_ATTR(project_quota, FEAT_PROJECT_QUOTA);
F2FS_FEATURE_RO_ATTR(inode_checksum, FEAT_INODE_CHECKSUM);
+F2FS_FEATURE_RO_ATTR(flexible_inline_xattr, FEAT_FLEXIBLE_INLINE_XATTR);
+F2FS_FEATURE_RO_ATTR(quota_ino, FEAT_QUOTA_INO);
#define ATTR_LIST(name) (&f2fs_attr_##name.attr)
static struct attribute *f2fs_attrs[] = {
@@ -321,6 +337,7 @@ static struct attribute *f2fs_attrs[] = {
ATTR_LIST(min_ipu_util),
ATTR_LIST(min_fsync_blocks),
ATTR_LIST(min_hot_blocks),
+ ATTR_LIST(min_ssr_sections),
ATTR_LIST(max_victim_search),
ATTR_LIST(dir_level),
ATTR_LIST(ram_thresh),
@@ -333,9 +350,11 @@ static struct attribute *f2fs_attrs[] = {
ATTR_LIST(inject_rate),
ATTR_LIST(inject_type),
#endif
+ ATTR_LIST(dirty_segments),
ATTR_LIST(lifetime_write_kbytes),
ATTR_LIST(features),
ATTR_LIST(reserved_blocks),
+ ATTR_LIST(current_reserved_blocks),
NULL,
};
@@ -350,6 +369,8 @@ static struct attribute *f2fs_feat_attrs[] = {
ATTR_LIST(extra_attr),
ATTR_LIST(project_quota),
ATTR_LIST(inode_checksum),
+ ATTR_LIST(flexible_inline_xattr),
+ ATTR_LIST(quota_ino),
NULL,
};
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
index ab658419552b..7acf56ebda65 100644
--- a/fs/f2fs/xattr.c
+++ b/fs/f2fs/xattr.c
@@ -264,12 +264,12 @@ static struct f2fs_xattr_entry *__find_xattr(void *base_addr, int index,
return entry;
}
-static struct f2fs_xattr_entry *__find_inline_xattr(void *base_addr,
- void **last_addr, int index,
- size_t len, const char *name)
+static struct f2fs_xattr_entry *__find_inline_xattr(struct inode *inode,
+ void *base_addr, void **last_addr, int index,
+ size_t len, const char *name)
{
struct f2fs_xattr_entry *entry;
- unsigned int inline_size = F2FS_INLINE_XATTR_ADDRS << 2;
+ unsigned int inline_size = inline_xattr_size(inode);
list_for_each_xattr(entry, base_addr) {
if ((void *)entry + sizeof(__u32) > base_addr + inline_size ||
@@ -288,12 +288,54 @@ static struct f2fs_xattr_entry *__find_inline_xattr(void *base_addr,
return entry;
}
+static int read_inline_xattr(struct inode *inode, struct page *ipage,
+ void *txattr_addr)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ unsigned int inline_size = inline_xattr_size(inode);
+ struct page *page = NULL;
+ void *inline_addr;
+
+ if (ipage) {
+ inline_addr = inline_xattr_addr(inode, ipage);
+ } else {
+ page = get_node_page(sbi, inode->i_ino);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+
+ inline_addr = inline_xattr_addr(inode, page);
+ }
+ memcpy(txattr_addr, inline_addr, inline_size);
+ f2fs_put_page(page, 1);
+
+ return 0;
+}
+
+static int read_xattr_block(struct inode *inode, void *txattr_addr)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ nid_t xnid = F2FS_I(inode)->i_xattr_nid;
+ unsigned int inline_size = inline_xattr_size(inode);
+ struct page *xpage;
+ void *xattr_addr;
+
+ /* The inode already has an extended attribute block. */
+ xpage = get_node_page(sbi, xnid);
+ if (IS_ERR(xpage))
+ return PTR_ERR(xpage);
+
+ xattr_addr = page_address(xpage);
+ memcpy(txattr_addr + inline_size, xattr_addr, VALID_XATTR_BLOCK_SIZE);
+ f2fs_put_page(xpage, 1);
+
+ return 0;
+}
+
static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
unsigned int index, unsigned int len,
const char *name, struct f2fs_xattr_entry **xe,
void **base_addr)
{
- struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
void *cur_addr, *txattr_addr, *last_addr = NULL;
nid_t xnid = F2FS_I(inode)->i_xattr_nid;
unsigned int size = xnid ? VALID_XATTR_BLOCK_SIZE : 0;
@@ -310,23 +352,11 @@ static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
/* read from inline xattr */
if (inline_size) {
- struct page *page = NULL;
- void *inline_addr;
-
- if (ipage) {
- inline_addr = inline_xattr_addr(ipage);
- } else {
- page = get_node_page(sbi, inode->i_ino);
- if (IS_ERR(page)) {
- err = PTR_ERR(page);
- goto out;
- }
- inline_addr = inline_xattr_addr(page);
- }
- memcpy(txattr_addr, inline_addr, inline_size);
- f2fs_put_page(page, 1);
+ err = read_inline_xattr(inode, ipage, txattr_addr);
+ if (err)
+ goto out;
- *xe = __find_inline_xattr(txattr_addr, &last_addr,
+ *xe = __find_inline_xattr(inode, txattr_addr, &last_addr,
index, len, name);
if (*xe)
goto check;
@@ -334,19 +364,9 @@ static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
/* read from xattr node block */
if (xnid) {
- struct page *xpage;
- void *xattr_addr;
-
- /* The inode already has an extended attribute block. */
- xpage = get_node_page(sbi, xnid);
- if (IS_ERR(xpage)) {
- err = PTR_ERR(xpage);
+ err = read_xattr_block(inode, txattr_addr);
+ if (err)
goto out;
- }
-
- xattr_addr = page_address(xpage);
- memcpy(txattr_addr + inline_size, xattr_addr, size);
- f2fs_put_page(xpage, 1);
}
if (last_addr)
@@ -371,7 +391,6 @@ out:
static int read_all_xattrs(struct inode *inode, struct page *ipage,
void **base_addr)
{
- struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct f2fs_xattr_header *header;
nid_t xnid = F2FS_I(inode)->i_xattr_nid;
unsigned int size = VALID_XATTR_BLOCK_SIZE;
@@ -386,38 +405,16 @@ static int read_all_xattrs(struct inode *inode, struct page *ipage,
/* read from inline xattr */
if (inline_size) {
- struct page *page = NULL;
- void *inline_addr;
-
- if (ipage) {
- inline_addr = inline_xattr_addr(ipage);
- } else {
- page = get_node_page(sbi, inode->i_ino);
- if (IS_ERR(page)) {
- err = PTR_ERR(page);
- goto fail;
- }
- inline_addr = inline_xattr_addr(page);
- }
- memcpy(txattr_addr, inline_addr, inline_size);
- f2fs_put_page(page, 1);
+ err = read_inline_xattr(inode, ipage, txattr_addr);
+ if (err)
+ goto fail;
}
/* read from xattr node block */
if (xnid) {
- struct page *xpage;
- void *xattr_addr;
-
- /* The inode already has an extended attribute block. */
- xpage = get_node_page(sbi, xnid);
- if (IS_ERR(xpage)) {
- err = PTR_ERR(xpage);
+ err = read_xattr_block(inode, txattr_addr);
+ if (err)
goto fail;
- }
-
- xattr_addr = page_address(xpage);
- memcpy(txattr_addr + inline_size, xattr_addr, size);
- f2fs_put_page(xpage, 1);
}
header = XATTR_HDR(txattr_addr);
@@ -439,10 +436,12 @@ static inline int write_all_xattrs(struct inode *inode, __u32 hsize,
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
size_t inline_size = inline_xattr_size(inode);
+ struct page *in_page = NULL;
void *xattr_addr;
+ void *inline_addr = NULL;
struct page *xpage;
nid_t new_nid = 0;
- int err;
+ int err = 0;
if (hsize > inline_size && !F2FS_I(inode)->i_xattr_nid)
if (!alloc_nid(sbi, &new_nid))
@@ -450,30 +449,30 @@ static inline int write_all_xattrs(struct inode *inode, __u32 hsize,
/* write to inline xattr */
if (inline_size) {
- struct page *page = NULL;
- void *inline_addr;
-
if (ipage) {
- inline_addr = inline_xattr_addr(ipage);
- f2fs_wait_on_page_writeback(ipage, NODE, true);
- set_page_dirty(ipage);
+ inline_addr = inline_xattr_addr(inode, ipage);
} else {
- page = get_node_page(sbi, inode->i_ino);
- if (IS_ERR(page)) {
+ in_page = get_node_page(sbi, inode->i_ino);
+ if (IS_ERR(in_page)) {
alloc_nid_failed(sbi, new_nid);
- return PTR_ERR(page);
+ return PTR_ERR(in_page);
}
- inline_addr = inline_xattr_addr(page);
- f2fs_wait_on_page_writeback(page, NODE, true);
+ inline_addr = inline_xattr_addr(inode, in_page);
}
- memcpy(inline_addr, txattr_addr, inline_size);
- f2fs_put_page(page, 1);
+ f2fs_wait_on_page_writeback(ipage ? ipage : in_page,
+ NODE, true);
/* no need to use xattr node block */
if (hsize <= inline_size) {
- err = truncate_xattr_node(inode, ipage);
+ err = truncate_xattr_node(inode);
alloc_nid_failed(sbi, new_nid);
- return err;
+ if (err) {
+ f2fs_put_page(in_page, 1);
+ return err;
+ }
+ memcpy(inline_addr, txattr_addr, inline_size);
+ set_page_dirty(ipage ? ipage : in_page);
+ goto in_page_out;
}
}
@@ -482,7 +481,7 @@ static inline int write_all_xattrs(struct inode *inode, __u32 hsize,
xpage = get_node_page(sbi, F2FS_I(inode)->i_xattr_nid);
if (IS_ERR(xpage)) {
alloc_nid_failed(sbi, new_nid);
- return PTR_ERR(xpage);
+ goto in_page_out;
}
f2fs_bug_on(sbi, new_nid);
f2fs_wait_on_page_writeback(xpage, NODE, true);
@@ -492,17 +491,24 @@ static inline int write_all_xattrs(struct inode *inode, __u32 hsize,
xpage = new_node_page(&dn, XATTR_NODE_OFFSET);
if (IS_ERR(xpage)) {
alloc_nid_failed(sbi, new_nid);
- return PTR_ERR(xpage);
+ goto in_page_out;
}
alloc_nid_done(sbi, new_nid);
}
-
xattr_addr = page_address(xpage);
+
+ if (inline_size)
+ memcpy(inline_addr, txattr_addr, inline_size);
memcpy(xattr_addr, txattr_addr + inline_size, VALID_XATTR_BLOCK_SIZE);
+
+ if (inline_size)
+ set_page_dirty(ipage ? ipage : in_page);
set_page_dirty(xpage);
- f2fs_put_page(xpage, 1);
- return 0;
+ f2fs_put_page(xpage, 1);
+in_page_out:
+ f2fs_put_page(in_page, 1);
+ return err;
}
int f2fs_getxattr(struct inode *inode, int index, const char *name,
@@ -721,6 +727,10 @@ int f2fs_setxattr(struct inode *inode, int index, const char *name,
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
int err;
+ err = dquot_initialize(inode);
+ if (err)
+ return err;
+
/* this case is only from init_inode_metadata */
if (ipage)
return __f2fs_setxattr(inode, index, name, value,
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index b237862dc274..0dede8a66816 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -1343,7 +1343,8 @@ static int parse_dirplusfile(char *buf, size_t nbytes, struct file *file,
*/
over = !dir_emit(ctx, dirent->name, dirent->namelen,
dirent->ino, dirent->type);
- ctx->pos = dirent->off;
+ if (!over)
+ ctx->pos = dirent->off;
}
buf += reclen;
diff --git a/fs/isofs/isofs.h b/fs/isofs/isofs.h
index 0ac4c1f73fbd..25177e6bd603 100644
--- a/fs/isofs/isofs.h
+++ b/fs/isofs/isofs.h
@@ -103,7 +103,7 @@ static inline unsigned int isonum_733(char *p)
/* Ignore bigendian datum due to broken mastering programs */
return get_unaligned_le32(p);
}
-extern int iso_date(char *, int);
+extern int iso_date(u8 *, int);
struct inode; /* To make gcc happy */
diff --git a/fs/isofs/rock.h b/fs/isofs/rock.h
index ed09e2b08637..f835976ce033 100644
--- a/fs/isofs/rock.h
+++ b/fs/isofs/rock.h
@@ -65,7 +65,7 @@ struct RR_PL_s {
};
struct stamp {
- char time[7];
+ __u8 time[7]; /* actually 6 unsigned, 1 signed */
} __attribute__ ((packed));
struct RR_TF_s {
diff --git a/fs/isofs/util.c b/fs/isofs/util.c
index 005a15cfd30a..37860fea364d 100644
--- a/fs/isofs/util.c
+++ b/fs/isofs/util.c
@@ -15,7 +15,7 @@
* to GMT. Thus we should always be correct.
*/
-int iso_date(char * p, int flag)
+int iso_date(u8 *p, int flag)
{
int year, month, day, hour, minute, second, tz;
int crtime;
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 348e0a05bd18..44e09483d2cd 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1260,7 +1260,7 @@ static int nfs_weak_revalidate(struct dentry *dentry, unsigned int flags)
return 0;
}
- error = nfs_revalidate_inode(NFS_SERVER(inode), inode);
+ error = nfs_lookup_verify_inode(inode, flags);
dfprintk(LOOKUPCACHE, "NFS: %s: inode %lu is %s\n",
__func__, inode->i_ino, error ? "invalid" : "valid");
return !error;
@@ -1420,6 +1420,7 @@ static int nfs4_lookup_revalidate(struct dentry *, unsigned int);
const struct dentry_operations nfs4_dentry_operations = {
.d_revalidate = nfs4_lookup_revalidate,
+ .d_weak_revalidate = nfs_weak_revalidate,
.d_delete = nfs_dentry_delete,
.d_iput = nfs_dentry_iput,
.d_automount = nfs_d_automount,
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 8e425f2c5ddd..8ef6f70c9e25 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -38,7 +38,6 @@
#include <linux/mm.h>
#include <linux/delay.h>
#include <linux/errno.h>
-#include <linux/file.h>
#include <linux/string.h>
#include <linux/ratelimit.h>
#include <linux/printk.h>
@@ -242,15 +241,12 @@ const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE
};
const u32 nfs4_fs_locations_bitmap[3] = {
- FATTR4_WORD0_TYPE
- | FATTR4_WORD0_CHANGE
+ FATTR4_WORD0_CHANGE
| FATTR4_WORD0_SIZE
| FATTR4_WORD0_FSID
| FATTR4_WORD0_FILEID
| FATTR4_WORD0_FS_LOCATIONS,
- FATTR4_WORD1_MODE
- | FATTR4_WORD1_NUMLINKS
- | FATTR4_WORD1_OWNER
+ FATTR4_WORD1_OWNER
| FATTR4_WORD1_OWNER_GROUP
| FATTR4_WORD1_RAWDEV
| FATTR4_WORD1_SPACE_USED
@@ -5741,7 +5737,6 @@ static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
p->server = server;
atomic_inc(&lsp->ls_count);
p->ctx = get_nfs_open_context(ctx);
- get_file(fl->fl_file);
memcpy(&p->fl, fl, sizeof(p->fl));
return p;
out_free_seqid:
@@ -5854,7 +5849,6 @@ static void nfs4_lock_release(void *calldata)
nfs_free_seqid(data->arg.lock_seqid);
nfs4_put_lock_state(data->lsp);
put_nfs_open_context(data->ctx);
- fput(data->fl.fl_file);
kfree(data);
dprintk("%s: done!\n", __func__);
}
@@ -6351,9 +6345,7 @@ static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
struct page *page)
{
struct nfs_server *server = NFS_SERVER(dir);
- u32 bitmask[3] = {
- [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
- };
+ u32 bitmask[3];
struct nfs4_fs_locations_arg args = {
.dir_fh = NFS_FH(dir),
.name = name,
@@ -6372,12 +6364,15 @@ static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
dprintk("%s: start\n", __func__);
+ bitmask[0] = nfs4_fattr_bitmap[0] | FATTR4_WORD0_FS_LOCATIONS;
+ bitmask[1] = nfs4_fattr_bitmap[1];
+
/* Ask for the fileid of the absent filesystem if mounted_on_fileid
* is not supported */
if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)
- bitmask[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID;
+ bitmask[0] &= ~FATTR4_WORD0_FILEID;
else
- bitmask[0] |= FATTR4_WORD0_FILEID;
+ bitmask[1] &= ~FATTR4_WORD1_MOUNTED_ON_FILEID;
nfs_fattr_init(&fs_locations->fattr);
fs_locations->server = server;
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index e8d1d6c5000c..9a0b219ff74d 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1680,7 +1680,6 @@ static int nfs4_recovery_handle_error(struct nfs_client *clp, int error)
break;
case -NFS4ERR_STALE_CLIENTID:
set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
- nfs4_state_clear_reclaim_reboot(clp);
nfs4_state_start_reclaim_reboot(clp);
break;
case -NFS4ERR_EXPIRED:
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index f1268280244e..3149f7e58d6f 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -1322,7 +1322,7 @@ static int nfs_parse_mount_options(char *raw,
mnt->options |= NFS_OPTION_MIGRATION;
break;
case Opt_nomigration:
- mnt->options &= NFS_OPTION_MIGRATION;
+ mnt->options &= ~NFS_OPTION_MIGRATION;
break;
/*
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index ca9ebc3242d3..11c67e8b939d 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -3379,7 +3379,9 @@ nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
/* ignore lock owners */
if (local->st_stateowner->so_is_open_owner == 0)
continue;
- if (local->st_stateowner == &oo->oo_owner) {
+ if (local->st_stateowner != &oo->oo_owner)
+ continue;
+ if (local->st_stid.sc_type == NFS4_OPEN_STID) {
ret = local;
atomic_inc(&ret->st_stid.sc_count);
break;
@@ -3388,6 +3390,52 @@ nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
return ret;
}
+static __be32
+nfsd4_verify_open_stid(struct nfs4_stid *s)
+{
+ __be32 ret = nfs_ok;
+
+ switch (s->sc_type) {
+ default:
+ break;
+ case NFS4_CLOSED_STID:
+ case NFS4_CLOSED_DELEG_STID:
+ ret = nfserr_bad_stateid;
+ break;
+ case NFS4_REVOKED_DELEG_STID:
+ ret = nfserr_deleg_revoked;
+ }
+ return ret;
+}
+
+/* Lock the stateid st_mutex, and deal with races with CLOSE */
+static __be32
+nfsd4_lock_ol_stateid(struct nfs4_ol_stateid *stp)
+{
+ __be32 ret;
+
+ mutex_lock(&stp->st_mutex);
+ ret = nfsd4_verify_open_stid(&stp->st_stid);
+ if (ret != nfs_ok)
+ mutex_unlock(&stp->st_mutex);
+ return ret;
+}
+
+static struct nfs4_ol_stateid *
+nfsd4_find_and_lock_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
+{
+ struct nfs4_ol_stateid *stp;
+ for (;;) {
+ spin_lock(&fp->fi_lock);
+ stp = nfsd4_find_existing_open(fp, open);
+ spin_unlock(&fp->fi_lock);
+ if (!stp || nfsd4_lock_ol_stateid(stp) == nfs_ok)
+ break;
+ nfs4_put_stid(&stp->st_stid);
+ }
+ return stp;
+}
+
static struct nfs4_openowner *
alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
struct nfsd4_compound_state *cstate)
@@ -3420,23 +3468,27 @@ alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
}
static struct nfs4_ol_stateid *
-init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp,
- struct nfsd4_open *open)
+init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open)
{
struct nfs4_openowner *oo = open->op_openowner;
struct nfs4_ol_stateid *retstp = NULL;
+ struct nfs4_ol_stateid *stp;
+ stp = open->op_stp;
/* We are moving these outside of the spinlocks to avoid the warnings */
mutex_init(&stp->st_mutex);
mutex_lock(&stp->st_mutex);
+retry:
spin_lock(&oo->oo_owner.so_client->cl_lock);
spin_lock(&fp->fi_lock);
retstp = nfsd4_find_existing_open(fp, open);
if (retstp)
goto out_unlock;
+
+ open->op_stp = NULL;
atomic_inc(&stp->st_stid.sc_count);
stp->st_stid.sc_type = NFS4_OPEN_STID;
INIT_LIST_HEAD(&stp->st_locks);
@@ -3453,11 +3505,16 @@ out_unlock:
spin_unlock(&fp->fi_lock);
spin_unlock(&oo->oo_owner.so_client->cl_lock);
if (retstp) {
- mutex_lock(&retstp->st_mutex);
- /* Not that we need to, just for neatness */
+ /* Handle races with CLOSE */
+ if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
+ nfs4_put_stid(&retstp->st_stid);
+ goto retry;
+ }
+ /* To keep mutex tracking happy */
mutex_unlock(&stp->st_mutex);
+ stp = retstp;
}
- return retstp;
+ return stp;
}
/*
@@ -3829,7 +3886,8 @@ static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, statei
{
struct nfs4_stid *ret;
- ret = find_stateid_by_type(cl, s, NFS4_DELEG_STID);
+ ret = find_stateid_by_type(cl, s,
+ NFS4_DELEG_STID|NFS4_REVOKED_DELEG_STID);
if (!ret)
return NULL;
return delegstateid(ret);
@@ -3852,6 +3910,12 @@ nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open,
deleg = find_deleg_stateid(cl, &open->op_delegate_stateid);
if (deleg == NULL)
goto out;
+ if (deleg->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID) {
+ nfs4_put_stid(&deleg->dl_stid);
+ if (cl->cl_minorversion)
+ status = nfserr_deleg_revoked;
+ goto out;
+ }
flags = share_access_to_flags(open->op_share_access);
status = nfs4_check_delegmode(deleg, flags);
if (status) {
@@ -4253,9 +4317,9 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
struct nfs4_client *cl = open->op_openowner->oo_owner.so_client;
struct nfs4_file *fp = NULL;
struct nfs4_ol_stateid *stp = NULL;
- struct nfs4_ol_stateid *swapstp = NULL;
struct nfs4_delegation *dp = NULL;
__be32 status;
+ bool new_stp = false;
/*
* Lookup file; if found, lookup stateid and check open request,
@@ -4267,9 +4331,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
status = nfs4_check_deleg(cl, open, &dp);
if (status)
goto out;
- spin_lock(&fp->fi_lock);
- stp = nfsd4_find_existing_open(fp, open);
- spin_unlock(&fp->fi_lock);
+ stp = nfsd4_find_and_lock_existing_open(fp, open);
} else {
open->op_file = NULL;
status = nfserr_bad_stateid;
@@ -4277,41 +4339,31 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
goto out;
}
+ if (!stp) {
+ stp = init_open_stateid(fp, open);
+ if (!open->op_stp)
+ new_stp = true;
+ }
+
/*
* OPEN the file, or upgrade an existing OPEN.
* If truncate fails, the OPEN fails.
+ *
+ * stp is already locked.
*/
- if (stp) {
+ if (!new_stp) {
/* Stateid was found, this is an OPEN upgrade */
- mutex_lock(&stp->st_mutex);
status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
if (status) {
mutex_unlock(&stp->st_mutex);
goto out;
}
} else {
- stp = open->op_stp;
- open->op_stp = NULL;
- /*
- * init_open_stateid() either returns a locked stateid
- * it found, or initializes and locks the new one we passed in
- */
- swapstp = init_open_stateid(stp, fp, open);
- if (swapstp) {
- nfs4_put_stid(&stp->st_stid);
- stp = swapstp;
- status = nfs4_upgrade_open(rqstp, fp, current_fh,
- stp, open);
- if (status) {
- mutex_unlock(&stp->st_mutex);
- goto out;
- }
- goto upgrade_out;
- }
status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open);
if (status) {
- mutex_unlock(&stp->st_mutex);
+ stp->st_stid.sc_type = NFS4_CLOSED_STID;
release_open_stateid(stp);
+ mutex_unlock(&stp->st_mutex);
goto out;
}
@@ -4320,7 +4372,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
if (stp->st_clnt_odstate == open->op_odstate)
open->op_odstate = NULL;
}
-upgrade_out:
+
nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid);
mutex_unlock(&stp->st_mutex);
@@ -4696,6 +4748,16 @@ nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
struct nfs4_stid **s, struct nfsd_net *nn)
{
__be32 status;
+ bool return_revoked = false;
+
+ /*
+ * only return revoked delegations if explicitly asked.
+ * otherwise we report revoked or bad_stateid status.
+ */
+ if (typemask & NFS4_REVOKED_DELEG_STID)
+ return_revoked = true;
+ else if (typemask & NFS4_DELEG_STID)
+ typemask |= NFS4_REVOKED_DELEG_STID;
if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
return nfserr_bad_stateid;
@@ -4710,6 +4772,12 @@ nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
*s = find_stateid_by_type(cstate->clp, stateid, typemask);
if (!*s)
return nfserr_bad_stateid;
+ if (((*s)->sc_type == NFS4_REVOKED_DELEG_STID) && !return_revoked) {
+ nfs4_put_stid(*s);
+ if (cstate->minorversion)
+ return nfserr_deleg_revoked;
+ return nfserr_bad_stateid;
+ }
return nfs_ok;
}
@@ -5130,7 +5198,6 @@ static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
bool unhashed;
LIST_HEAD(reaplist);
- s->st_stid.sc_type = NFS4_CLOSED_STID;
spin_lock(&clp->cl_lock);
unhashed = unhash_open_stateid(s, &reaplist);
@@ -5169,10 +5236,12 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
nfsd4_bump_seqid(cstate, status);
if (status)
goto out;
+
+ stp->st_stid.sc_type = NFS4_CLOSED_STID;
nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid);
- mutex_unlock(&stp->st_mutex);
nfsd4_close_open_stateid(stp);
+ mutex_unlock(&stp->st_mutex);
/* put reference from nfs4_preprocess_seqid_op */
nfs4_put_stid(&stp->st_stid);
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 2f27c935bd57..34c22fe4eca0 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -1945,8 +1945,6 @@ static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci,
"failed to get inode block.\n");
return err;
}
- mark_buffer_dirty(ibh);
- nilfs_mdt_mark_dirty(ifile);
spin_lock(&nilfs->ns_inode_lock);
if (likely(!ii->i_bh))
ii->i_bh = ibh;
@@ -1955,6 +1953,10 @@ static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci,
goto retry;
}
+ // Always redirty the buffer to avoid race condition
+ mark_buffer_dirty(ii->i_bh);
+ nilfs_mdt_mark_dirty(ifile);
+
clear_bit(NILFS_I_QUEUED, &ii->i_state);
set_bit(NILFS_I_BUSY, &ii->i_state);
list_move_tail(&ii->i_dirty, &sci->sc_dirty_files);
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index 86181d6526dc..93e6f029a322 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -7270,13 +7270,24 @@ out:
static int ocfs2_trim_extent(struct super_block *sb,
struct ocfs2_group_desc *gd,
- u32 start, u32 count)
+ u64 group, u32 start, u32 count)
{
u64 discard, bcount;
+ struct ocfs2_super *osb = OCFS2_SB(sb);
bcount = ocfs2_clusters_to_blocks(sb, count);
- discard = le64_to_cpu(gd->bg_blkno) +
- ocfs2_clusters_to_blocks(sb, start);
+ discard = ocfs2_clusters_to_blocks(sb, start);
+
+ /*
+ * For the first cluster group, the gd->bg_blkno is not at the start
+ * of the group, but at an offset from the start. If we add it while
+ * calculating discard for first group, we will wrongly start fstrim a
+ * few blocks after the desried start block and the range can cross
+ * over into the next cluster group. So, add it only if this is not
+ * the first cluster group.
+ */
+ if (group != osb->first_cluster_group_blkno)
+ discard += le64_to_cpu(gd->bg_blkno);
trace_ocfs2_trim_extent(sb, (unsigned long long)discard, bcount);
@@ -7284,7 +7295,7 @@ static int ocfs2_trim_extent(struct super_block *sb,
}
static int ocfs2_trim_group(struct super_block *sb,
- struct ocfs2_group_desc *gd,
+ struct ocfs2_group_desc *gd, u64 group,
u32 start, u32 max, u32 minbits)
{
int ret = 0, count = 0, next;
@@ -7303,7 +7314,7 @@ static int ocfs2_trim_group(struct super_block *sb,
next = ocfs2_find_next_bit(bitmap, max, start);
if ((next - start) >= minbits) {
- ret = ocfs2_trim_extent(sb, gd,
+ ret = ocfs2_trim_extent(sb, gd, group,
start, next - start);
if (ret < 0) {
mlog_errno(ret);
@@ -7401,7 +7412,8 @@ int ocfs2_trim_fs(struct super_block *sb, struct fstrim_range *range)
}
gd = (struct ocfs2_group_desc *)gd_bh->b_data;
- cnt = ocfs2_trim_group(sb, gd, first_bit, last_bit, minlen);
+ cnt = ocfs2_trim_group(sb, gd, group,
+ first_bit, last_bit, minlen);
brelse(gd_bh);
gd_bh = NULL;
if (cnt < 0) {
diff --git a/fs/sdcardfs/inode.c b/fs/sdcardfs/inode.c
index 85eb8ebb5372..1d9ef8229cd6 100644
--- a/fs/sdcardfs/inode.c
+++ b/fs/sdcardfs/inode.c
@@ -596,7 +596,7 @@ static const char *sdcardfs_follow_link(struct dentry *dentry, void **cookie)
static int sdcardfs_permission_wrn(struct inode *inode, int mask)
{
- WARN_RATELIMIT(1, "sdcardfs does not support permission. Use permission2.\n");
+ pr_debug("sdcardfs does not support permission. Use permission2.\n");
return -EINVAL;
}
diff --git a/include/dt-bindings/pinctrl/omap.h b/include/dt-bindings/pinctrl/omap.h
index 13949259705a..0d4fe32b3ae2 100644
--- a/include/dt-bindings/pinctrl/omap.h
+++ b/include/dt-bindings/pinctrl/omap.h
@@ -45,8 +45,8 @@
#define PIN_OFF_NONE 0
#define PIN_OFF_OUTPUT_HIGH (OFF_EN | OFFOUT_EN | OFFOUT_VAL)
#define PIN_OFF_OUTPUT_LOW (OFF_EN | OFFOUT_EN)
-#define PIN_OFF_INPUT_PULLUP (OFF_EN | OFF_PULL_EN | OFF_PULL_UP)
-#define PIN_OFF_INPUT_PULLDOWN (OFF_EN | OFF_PULL_EN)
+#define PIN_OFF_INPUT_PULLUP (OFF_EN | OFFOUT_EN | OFF_PULL_EN | OFF_PULL_UP)
+#define PIN_OFF_INPUT_PULLDOWN (OFF_EN | OFFOUT_EN | OFF_PULL_EN)
#define PIN_OFF_WAKEUPENABLE WAKEUP_EN
/*
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 89d9aa9e79bf..6fe974dbe741 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -234,12 +234,10 @@ static inline int block_page_mkwrite_return(int err)
{
if (err == 0)
return VM_FAULT_LOCKED;
- if (err == -EFAULT)
+ if (err == -EFAULT || err == -EAGAIN)
return VM_FAULT_NOPAGE;
if (err == -ENOMEM)
return VM_FAULT_OOM;
- if (err == -EAGAIN)
- return VM_FAULT_RETRY;
/* -ENOSPC, -EDQUOT, -EIO ... */
return VM_FAULT_SIGBUS;
}
diff --git a/include/linux/clk/msm-clock-generic.h b/include/linux/clk/msm-clock-generic.h
index d7186a363a3f..fe019d366d0b 100644
--- a/include/linux/clk/msm-clock-generic.h
+++ b/include/linux/clk/msm-clock-generic.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -307,4 +307,16 @@ static inline struct mux_div_clk *to_mux_div_clk(struct clk *clk)
extern struct clk_ops clk_ops_mux_div_clk;
+/* ==================== Virtual clock ==================== */
+struct virtclk_front {
+ int id;
+ struct clk c;
+};
+
+extern struct clk_ops virtclk_front_ops;
+
+int msm_virtclk_front_probe(struct platform_device *pdev,
+ struct clk_lookup *table,
+ size_t size);
+
#endif
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 333d0ca6940f..516d83041206 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -1292,7 +1292,7 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
unsigned long *load_addr,
unsigned long *load_size);
-efi_status_t efi_parse_options(char *cmdline);
+efi_status_t efi_parse_options(char const *cmdline);
bool efi_runtime_disabled(void);
#endif /* _LINUX_EFI_H */
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index c2a975e4a711..fef1caeddf54 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -36,6 +36,8 @@
#define F2FS_NODE_INO(sbi) (sbi->node_ino_num)
#define F2FS_META_INO(sbi) (sbi->meta_ino_num)
+#define F2FS_MAX_QUOTAS 3
+
#define F2FS_IO_SIZE(sbi) (1 << (sbi)->write_io_size_bits) /* Blocks */
#define F2FS_IO_SIZE_KB(sbi) (1 << ((sbi)->write_io_size_bits + 2)) /* KB */
#define F2FS_IO_SIZE_BYTES(sbi) (1 << ((sbi)->write_io_size_bits + 12)) /* B */
@@ -108,7 +110,8 @@ struct f2fs_super_block {
__u8 encryption_level; /* versioning level for encryption */
__u8 encrypt_pw_salt[16]; /* Salt used for string2key algorithm */
struct f2fs_device devs[MAX_DEVICES]; /* device list */
- __u8 reserved[327]; /* valid reserved region */
+ __le32 qf_ino[F2FS_MAX_QUOTAS]; /* quota inode numbers */
+ __u8 reserved[315]; /* valid reserved region */
} __packed;
/*
@@ -184,7 +187,8 @@ struct f2fs_extent {
} __packed;
#define F2FS_NAME_LEN 255
-#define F2FS_INLINE_XATTR_ADDRS 50 /* 200 bytes for inline xattrs */
+/* 200 bytes for inline xattrs by default */
+#define DEFAULT_INLINE_XATTR_ADDRS 50
#define DEF_ADDRS_PER_INODE 923 /* Address Pointers in an Inode */
#define CUR_ADDRS_PER_INODE(inode) (DEF_ADDRS_PER_INODE - \
get_extra_isize(inode))
@@ -238,7 +242,7 @@ struct f2fs_inode {
union {
struct {
__le16 i_extra_isize; /* extra inode attribute size */
- __le16 i_padding; /* padding */
+ __le16 i_inline_xattr_size; /* inline xattr size, unit: 4 bytes */
__le32 i_projid; /* project id */
__le32 i_inode_checksum;/* inode meta checksum */
__le32 i_extra_end[0]; /* for attribute size calculation */
diff --git a/include/linux/input/qpnp-power-on.h b/include/linux/input/qpnp-power-on.h
index a2624ab57826..5944f0fd3414 100644
--- a/include/linux/input/qpnp-power-on.h
+++ b/include/linux/input/qpnp-power-on.h
@@ -51,6 +51,7 @@ enum pon_power_off_type {
};
enum pon_restart_reason {
+ /* 0 ~ 31 for common defined features */
PON_RESTART_REASON_UNKNOWN = 0x00,
PON_RESTART_REASON_RECOVERY = 0x01,
PON_RESTART_REASON_BOOTLOADER = 0x02,
@@ -58,6 +59,10 @@ enum pon_restart_reason {
PON_RESTART_REASON_DMVERITY_CORRUPTED = 0x04,
PON_RESTART_REASON_DMVERITY_ENFORCE = 0x05,
PON_RESTART_REASON_KEYS_CLEAR = 0x06,
+
+ /* 32 ~ 63 for OEMs/ODMs secific features */
+ PON_RESTART_REASON_OEM_MIN = 0x20,
+ PON_RESTART_REASON_OEM_MAX = 0x3f,
};
#ifdef CONFIG_INPUT_QPNP_POWER_ON
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 50220cab738c..05b63a1e9f84 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -53,6 +53,13 @@
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
+#define u64_to_user_ptr(x) ( \
+{ \
+ typecheck(u64, x); \
+ (void __user *)(uintptr_t)x; \
+} \
+)
+
/*
* This looks more complex than it should be. But we need to
* get the type for the ~ right in round_down (it needs to be
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 7d0b5e7bcadb..b4a5021fbbfa 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -71,6 +71,10 @@ extern int mmap_rnd_compat_bits __read_mostly;
#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
#endif
+#ifndef lm_alias
+#define lm_alias(x) __va(__pa_symbol(x))
+#endif
+
/*
* To prevent common memory management code establishing
* a zero page mapping on a read fault.
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 2b1be7efde55..721bdb0226bd 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -715,7 +715,8 @@ typedef struct pglist_data {
* is the first PFN that needs to be initialised.
*/
unsigned long first_deferred_pfn;
- unsigned long static_init_size;
+ /* Number of non-deferred pages */
+ unsigned long static_init_pgcnt;
#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
} pg_data_t;
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 0a306b431ece..c77de3b5f564 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -3473,6 +3473,9 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
unsigned char name_assign_type,
void (*setup)(struct net_device *),
unsigned int txqs, unsigned int rxqs);
+int dev_get_valid_name(struct net *net, struct net_device *dev,
+ const char *name);
+
#define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \
alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1)
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 639e9b8b0e4d..0b41959aab9f 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -131,6 +131,7 @@ netlink_skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
struct netlink_callback {
struct sk_buff *skb;
const struct nlmsghdr *nlh;
+ int (*start)(struct netlink_callback *);
int (*dump)(struct sk_buff * skb,
struct netlink_callback *cb);
int (*done)(struct netlink_callback *cb);
@@ -153,6 +154,7 @@ struct nlmsghdr *
__nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags);
struct netlink_dump_control {
+ int (*start)(struct netlink_callback *);
int (*dump)(struct sk_buff *skb, struct netlink_callback *);
int (*done)(struct netlink_callback *);
void *data;
diff --git a/include/linux/phy.h b/include/linux/phy.h
index b64825d6ad26..5bc4b9d563a9 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -136,11 +136,7 @@ static inline const char *phy_modes(phy_interface_t interface)
/* Used when trying to connect to a specific phy (mii bus id:phy device id) */
#define PHY_ID_FMT "%s:%02x"
-/*
- * Need to be a little smaller than phydev->dev.bus_id to leave room
- * for the ":%02x"
- */
-#define MII_BUS_ID_SIZE (20 - 3)
+#define MII_BUS_ID_SIZE 61
/* Or MII_ADDR_C45 into regnum for read/write on mii_bus to enable the 21 bit
IEEE 802.3ae clause 45 addressing mode used by 10GIGE phy chips. */
@@ -599,7 +595,7 @@ struct phy_driver {
/* A Structure for boards to register fixups with the PHY Lib */
struct phy_fixup {
struct list_head list;
- char bus_id[20];
+ char bus_id[MII_BUS_ID_SIZE + 3];
u32 phy_uid;
u32 phy_uid_mask;
int (*run)(struct phy_device *phydev);
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index 75e4e30677f1..7eeceac52dea 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -65,19 +65,24 @@
/*
* Are we doing bottom half or hardware interrupt processing?
- * Are we in a softirq context? Interrupt context?
- * in_softirq - Are we currently processing softirq or have bh disabled?
- * in_serving_softirq - Are we currently processing softirq?
+ *
+ * in_irq() - We're in (hard) IRQ context
+ * in_softirq() - We have BH disabled, or are processing softirqs
+ * in_interrupt() - We're in NMI,IRQ,SoftIRQ context or have BH disabled
+ * in_serving_softirq() - We're in softirq context
+ * in_nmi() - We're in NMI context
+ * in_task() - We're in task context
+ *
+ * Note: due to the BH disabled confusion: in_softirq(),in_interrupt() really
+ * should not be used in new code.
*/
#define in_irq() (hardirq_count())
#define in_softirq() (softirq_count())
#define in_interrupt() (irq_count())
#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
-
-/*
- * Are we in NMI context?
- */
-#define in_nmi() (preempt_count() & NMI_MASK)
+#define in_nmi() (preempt_count() & NMI_MASK)
+#define in_task() (!(preempt_count() & \
+ (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
/*
* The preempt_count offset after preempt_disable();
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 128c4a8c9979..ca7c8041b894 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -40,7 +40,6 @@ extern unsigned int sysctl_sched_min_granularity;
extern unsigned int sysctl_sched_wakeup_granularity;
extern unsigned int sysctl_sched_child_runs_first;
extern unsigned int sysctl_sched_sync_hint_enable;
-extern unsigned int sysctl_sched_initial_task_util;
extern unsigned int sysctl_sched_cstate_aware;
#ifdef CONFIG_SCHED_HMP
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 3f61c647fc5c..b5421f6f155a 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -3400,6 +3400,13 @@ static inline void nf_reset_trace(struct sk_buff *skb)
#endif
}
+static inline void ipvs_reset(struct sk_buff *skb)
+{
+#if IS_ENABLED(CONFIG_IP_VS)
+ skb->ipvs_property = 0;
+#endif
+}
+
/* Note: This doesn't put any conntrack and bridge info in dst. */
static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
bool copy)
diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h
new file mode 100644
index 000000000000..0f175b8f6456
--- /dev/null
+++ b/include/linux/tee_drv.h
@@ -0,0 +1,277 @@
+/*
+ * Copyright (c) 2015-2016, Linaro Limited
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __TEE_DRV_H
+#define __TEE_DRV_H
+
+#include <linux/types.h>
+#include <linux/idr.h>
+#include <linux/list.h>
+#include <linux/tee.h>
+
+/*
+ * The file describes the API provided by the generic TEE driver to the
+ * specific TEE driver.
+ */
+
+#define TEE_SHM_MAPPED 0x1 /* Memory mapped by the kernel */
+#define TEE_SHM_DMA_BUF 0x2 /* Memory with dma-buf handle */
+
+struct tee_device;
+struct tee_shm;
+struct tee_shm_pool;
+
+/**
+ * struct tee_context - driver specific context on file pointer data
+ * @teedev: pointer to this drivers struct tee_device
+ * @list_shm: List of shared memory object owned by this context
+ * @data: driver specific context data, managed by the driver
+ */
+struct tee_context {
+ struct tee_device *teedev;
+ struct list_head list_shm;
+ void *data;
+};
+
+struct tee_param_memref {
+ size_t shm_offs;
+ size_t size;
+ struct tee_shm *shm;
+};
+
+struct tee_param_value {
+ u64 a;
+ u64 b;
+ u64 c;
+};
+
+struct tee_param {
+ u64 attr;
+ union {
+ struct tee_param_memref memref;
+ struct tee_param_value value;
+ } u;
+};
+
+/**
+ * struct tee_driver_ops - driver operations vtable
+ * @get_version: returns version of driver
+ * @open: called when the device file is opened
+ * @release: release this open file
+ * @open_session: open a new session
+ * @close_session: close a session
+ * @invoke_func: invoke a trusted function
+ * @cancel_req: request cancel of an ongoing invoke or open
+ * @supp_revc: called for supplicant to get a command
+ * @supp_send: called for supplicant to send a response
+ */
+struct tee_driver_ops {
+ void (*get_version)(struct tee_device *teedev,
+ struct tee_ioctl_version_data *vers);
+ int (*open)(struct tee_context *ctx);
+ void (*release)(struct tee_context *ctx);
+ int (*open_session)(struct tee_context *ctx,
+ struct tee_ioctl_open_session_arg *arg,
+ struct tee_param *param);
+ int (*close_session)(struct tee_context *ctx, u32 session);
+ int (*invoke_func)(struct tee_context *ctx,
+ struct tee_ioctl_invoke_arg *arg,
+ struct tee_param *param);
+ int (*cancel_req)(struct tee_context *ctx, u32 cancel_id, u32 session);
+ int (*supp_recv)(struct tee_context *ctx, u32 *func, u32 *num_params,
+ struct tee_param *param);
+ int (*supp_send)(struct tee_context *ctx, u32 ret, u32 num_params,
+ struct tee_param *param);
+};
+
+/**
+ * struct tee_desc - Describes the TEE driver to the subsystem
+ * @name: name of driver
+ * @ops: driver operations vtable
+ * @owner: module providing the driver
+ * @flags: Extra properties of driver, defined by TEE_DESC_* below
+ */
+#define TEE_DESC_PRIVILEGED 0x1
+struct tee_desc {
+ const char *name;
+ const struct tee_driver_ops *ops;
+ struct module *owner;
+ u32 flags;
+};
+
+/**
+ * tee_device_alloc() - Allocate a new struct tee_device instance
+ * @teedesc: Descriptor for this driver
+ * @dev: Parent device for this device
+ * @pool: Shared memory pool, NULL if not used
+ * @driver_data: Private driver data for this device
+ *
+ * Allocates a new struct tee_device instance. The device is
+ * removed by tee_device_unregister().
+ *
+ * @returns a pointer to a 'struct tee_device' or an ERR_PTR on failure
+ */
+struct tee_device *tee_device_alloc(const struct tee_desc *teedesc,
+ struct device *dev,
+ struct tee_shm_pool *pool,
+ void *driver_data);
+
+/**
+ * tee_device_register() - Registers a TEE device
+ * @teedev: Device to register
+ *
+ * tee_device_unregister() need to be called to remove the @teedev if
+ * this function fails.
+ *
+ * @returns < 0 on failure
+ */
+int tee_device_register(struct tee_device *teedev);
+
+/**
+ * tee_device_unregister() - Removes a TEE device
+ * @teedev: Device to unregister
+ *
+ * This function should be called to remove the @teedev even if
+ * tee_device_register() hasn't been called yet. Does nothing if
+ * @teedev is NULL.
+ */
+void tee_device_unregister(struct tee_device *teedev);
+
+/**
+ * struct tee_shm_pool_mem_info - holds information needed to create a shared
+ * memory pool
+ * @vaddr: Virtual address of start of pool
+ * @paddr: Physical address of start of pool
+ * @size: Size in bytes of the pool
+ */
+struct tee_shm_pool_mem_info {
+ unsigned long vaddr;
+ phys_addr_t paddr;
+ size_t size;
+};
+
+/**
+ * tee_shm_pool_alloc_res_mem() - Create a shared memory pool from reserved
+ * memory range
+ * @priv_info: Information for driver private shared memory pool
+ * @dmabuf_info: Information for dma-buf shared memory pool
+ *
+ * Start and end of pools will must be page aligned.
+ *
+ * Allocation with the flag TEE_SHM_DMA_BUF set will use the range supplied
+ * in @dmabuf, others will use the range provided by @priv.
+ *
+ * @returns pointer to a 'struct tee_shm_pool' or an ERR_PTR on failure.
+ */
+struct tee_shm_pool *
+tee_shm_pool_alloc_res_mem(struct tee_shm_pool_mem_info *priv_info,
+ struct tee_shm_pool_mem_info *dmabuf_info);
+
+/**
+ * tee_shm_pool_free() - Free a shared memory pool
+ * @pool: The shared memory pool to free
+ *
+ * The must be no remaining shared memory allocated from this pool when
+ * this function is called.
+ */
+void tee_shm_pool_free(struct tee_shm_pool *pool);
+
+/**
+ * tee_get_drvdata() - Return driver_data pointer
+ * @returns the driver_data pointer supplied to tee_register().
+ */
+void *tee_get_drvdata(struct tee_device *teedev);
+
+/**
+ * tee_shm_alloc() - Allocate shared memory
+ * @ctx: Context that allocates the shared memory
+ * @size: Requested size of shared memory
+ * @flags: Flags setting properties for the requested shared memory.
+ *
+ * Memory allocated as global shared memory is automatically freed when the
+ * TEE file pointer is closed. The @flags field uses the bits defined by
+ * TEE_SHM_* above. TEE_SHM_MAPPED must currently always be set. If
+ * TEE_SHM_DMA_BUF global shared memory will be allocated and associated
+ * with a dma-buf handle, else driver private memory.
+ *
+ * @returns a pointer to 'struct tee_shm'
+ */
+struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags);
+
+/**
+ * tee_shm_free() - Free shared memory
+ * @shm: Handle to shared memory to free
+ */
+void tee_shm_free(struct tee_shm *shm);
+
+/**
+ * tee_shm_put() - Decrease reference count on a shared memory handle
+ * @shm: Shared memory handle
+ */
+void tee_shm_put(struct tee_shm *shm);
+
+/**
+ * tee_shm_va2pa() - Get physical address of a virtual address
+ * @shm: Shared memory handle
+ * @va: Virtual address to tranlsate
+ * @pa: Returned physical address
+ * @returns 0 on success and < 0 on failure
+ */
+int tee_shm_va2pa(struct tee_shm *shm, void *va, phys_addr_t *pa);
+
+/**
+ * tee_shm_pa2va() - Get virtual address of a physical address
+ * @shm: Shared memory handle
+ * @pa: Physical address to tranlsate
+ * @va: Returned virtual address
+ * @returns 0 on success and < 0 on failure
+ */
+int tee_shm_pa2va(struct tee_shm *shm, phys_addr_t pa, void **va);
+
+/**
+ * tee_shm_get_va() - Get virtual address of a shared memory plus an offset
+ * @shm: Shared memory handle
+ * @offs: Offset from start of this shared memory
+ * @returns virtual address of the shared memory + offs if offs is within
+ * the bounds of this shared memory, else an ERR_PTR
+ */
+void *tee_shm_get_va(struct tee_shm *shm, size_t offs);
+
+/**
+ * tee_shm_get_pa() - Get physical address of a shared memory plus an offset
+ * @shm: Shared memory handle
+ * @offs: Offset from start of this shared memory
+ * @pa: Physical address to return
+ * @returns 0 if offs is within the bounds of this shared memory, else an
+ * error code.
+ */
+int tee_shm_get_pa(struct tee_shm *shm, size_t offs, phys_addr_t *pa);
+
+/**
+ * tee_shm_get_id() - Get id of a shared memory object
+ * @shm: Shared memory handle
+ * @returns id
+ */
+int tee_shm_get_id(struct tee_shm *shm);
+
+/**
+ * tee_shm_get_from_id() - Find shared memory object and increase reference
+ * count
+ * @ctx: Context owning the shared memory
+ * @id: Id of shared memory object
+ * @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure
+ */
+struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id);
+
+#endif /*__TEE_DRV_H*/
diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h
index f0f1793cfa49..3a5af09af18b 100644
--- a/include/linux/timekeeper_internal.h
+++ b/include/linux/timekeeper_internal.h
@@ -50,13 +50,13 @@ struct tk_read_base {
* @tai_offset: The current UTC to TAI offset in seconds
* @clock_was_set_seq: The sequence number of clock was set events
* @next_leap_ktime: CLOCK_MONOTONIC time value of a pending leap-second
- * @raw_time: Monotonic raw base time in timespec64 format
+ * @raw_sec: CLOCK_MONOTONIC_RAW time in seconds
* @cycle_interval: Number of clock cycles in one NTP interval
* @xtime_interval: Number of clock shifted nano seconds in one NTP
* interval.
* @xtime_remainder: Shifted nano seconds left over when rounding
* @cycle_interval
- * @raw_interval: Raw nano seconds accumulated per NTP interval.
+ * @raw_interval: Shifted raw nano seconds accumulated per NTP interval.
* @ntp_error: Difference between accumulated time and NTP time in ntp
* shifted nano seconds.
* @ntp_error_shift: Shift conversion between clock shifted nano seconds and
@@ -91,13 +91,13 @@ struct timekeeper {
s32 tai_offset;
unsigned int clock_was_set_seq;
ktime_t next_leap_ktime;
- struct timespec64 raw_time;
+ u64 raw_sec;
/* The following members are for timekeeping internal use */
cycle_t cycle_interval;
u64 xtime_interval;
s64 xtime_remainder;
- u32 raw_interval;
+ u64 raw_interval;
/* The ntp_tick_length() value currently being used.
* This cached copy ensures we consistently apply the tick
* length for an entire tick, as ntp_tick_length may change
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 83a505c749e1..1821d34c24a5 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -330,6 +330,7 @@ struct usb_host_bos {
struct usb_ss_cap_descriptor *ss_cap;
struct usb_ssp_cap_descriptor *ssp_cap;
struct usb_ss_container_id_descriptor *ss_id;
+ struct usb_ptm_cap_descriptor *ptm_cap;
struct usb_config_summary_descriptor *config_summary;
unsigned int num_config_summary_desc;
};
diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h
index 3a375d07d0dc..6670e9b34f20 100644
--- a/include/linux/usb/cdc_ncm.h
+++ b/include/linux/usb/cdc_ncm.h
@@ -82,6 +82,7 @@
/* Driver flags */
#define CDC_NCM_FLAG_NDP_TO_END 0x02 /* NDP is placed at end of frame */
+#define CDC_NCM_FLAG_RESET_NTB16 0x08 /* set NDP16 one more time after altsetting switch */
#define cdc_ncm_comm_intf_is_mbim(x) ((x)->desc.bInterfaceSubClass == USB_CDC_SUBCLASS_MBIM && \
(x)->desc.bInterfaceProtocol == USB_CDC_PROTO_NONE)
diff --git a/include/media/adv7481.h b/include/media/adv7481.h
index 80b8ee879ea4..fa5466197889 100644
--- a/include/media/adv7481.h
+++ b/include/media/adv7481.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -14,6 +14,7 @@
#ifndef __ADV7481_H__
#define __ADV7481_H__
+#include <uapi/media/msm_ba.h>
/**
* adv7481_platform_data
* structure to pass board specific information to the ADV7481 driver
diff --git a/include/media/msm_ba.h b/include/media/msm_ba.h
index d630e441590f..4bab36ade468 100644
--- a/include/media/msm_ba.h
+++ b/include/media/msm_ba.h
@@ -35,6 +35,7 @@ enum msm_ba_ip {
BA_IP_HDMI_1,
BA_IP_MHL_1,
BA_IP_TTL,
+ BA_IP_TV_TUNER,
BA_IP_MAX = 0xffffffff
};
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index 1b6b6dcb018d..43c0e771f417 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -114,6 +114,7 @@ static inline void genl_info_net_set(struct genl_info *info, struct net *net)
* @flags: flags
* @policy: attribute validation policy
* @doit: standard command callback
+ * @start: start callback for dumps
* @dumpit: callback for dumpers
* @done: completion callback for dumps
* @ops_list: operations list
@@ -122,6 +123,7 @@ struct genl_ops {
const struct nla_policy *policy;
int (*doit)(struct sk_buff *skb,
struct genl_info *info);
+ int (*start)(struct netlink_callback *cb);
int (*dumpit)(struct sk_buff *skb,
struct netlink_callback *cb);
int (*done)(struct netlink_callback *cb);
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index 625bdf95d673..95aa999f31d7 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -95,7 +95,7 @@ struct inet_request_sock {
kmemcheck_bitfield_end(flags);
u32 ir_mark;
union {
- struct ip_options_rcu *opt;
+ struct ip_options_rcu __rcu *ireq_opt;
struct sk_buff *pktopts;
};
};
@@ -113,6 +113,12 @@ static inline u32 inet_request_mark(const struct sock *sk, struct sk_buff *skb)
return sk->sk_mark;
}
+static inline struct ip_options_rcu *ireq_opt_deref(const struct inet_request_sock *ireq)
+{
+ return rcu_dereference_check(ireq->ireq_opt,
+ atomic_read(&ireq->req.rsk_refcnt) > 0);
+}
+
struct inet_cork {
unsigned int flags;
__be32 addr;
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 52402ab90c57..340b01dd8c37 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1629,12 +1629,12 @@ static inline void tcp_highest_sack_reset(struct sock *sk)
tcp_sk(sk)->highest_sack = tcp_write_queue_head(sk);
}
-/* Called when old skb is about to be deleted (to be combined with new skb) */
-static inline void tcp_highest_sack_combine(struct sock *sk,
+/* Called when old skb is about to be deleted and replaced by new skb */
+static inline void tcp_highest_sack_replace(struct sock *sk,
struct sk_buff *old,
struct sk_buff *new)
{
- if (tcp_sk(sk)->sacked_out && (old == tcp_sk(sk)->highest_sack))
+ if (old == tcp_highest_sack(sk))
tcp_sk(sk)->highest_sack = new;
}
diff --git a/include/soc/qcom/icnss.h b/include/soc/qcom/icnss.h
index 7915841b17ea..4fff429dc0b2 100644
--- a/include/soc/qcom/icnss.h
+++ b/include/soc/qcom/icnss.h
@@ -153,6 +153,7 @@ extern int icnss_wlan_set_dfs_nol(const void *info, u16 info_len);
extern int icnss_wlan_get_dfs_nol(void *info, u16 info_len);
extern bool icnss_is_qmi_disable(struct device *dev);
extern bool icnss_is_fw_ready(void);
+extern bool icnss_is_fw_down(void);
extern int icnss_set_wlan_mac_address(const u8 *in, const uint32_t len);
extern u8 *icnss_get_wlan_mac_address(struct device *dev, uint32_t *num);
extern int icnss_trigger_recovery(struct device *dev);
diff --git a/include/sound/apr_audio-v2.h b/include/sound/apr_audio-v2.h
index 6078ef2e24de..ca2ceff39f2f 100644
--- a/include/sound/apr_audio-v2.h
+++ b/include/sound/apr_audio-v2.h
@@ -29,6 +29,108 @@ struct param_outband {
phys_addr_t paddr;
};
+/* --------- Common Structures and Definitions------------- */
+/* Instance ID Definitions */
+#define INSTANCE_ID_0 0x0000
+
+struct mem_mapping_hdr {
+ /*
+ * LSW of parameter data payload address. Supported values: any.
+ * - Must be set to zero for in-band data.
+ */
+ u32 data_payload_addr_lsw;
+
+ /*
+ * MSW of Parameter data payload address. Supported values: any.
+ * - Must be set to zero for in-band data.
+ * - In the case of 32 bit Shared memory address, msw field must be
+ * set to zero.
+ * - In the case of 36 bit shared memory address, bit 31 to bit 4 of
+ * msw must be set to zero.
+ */
+ u32 data_payload_addr_msw;
+
+ /*
+ * Memory map handle returned by DSP through
+ * ASM_CMD_SHARED_MEM_MAP_REGIONS command.
+ * Supported Values: Any.
+ * If mmhandle is NULL, the ParamData payloads are within the
+ * message payload (in-band).
+ * If mmhandle is non-NULL, the ParamData payloads begin at the
+ * address specified in the address msw and lsw (out-of-band).
+ */
+ u32 mem_map_handle;
+
+} __packed;
+
+/*
+ * Payload format for parameter data.
+ * Immediately following these structures are param_size bytes of parameter
+ * data.
+ */
+struct param_hdr_v1 {
+ /* Valid ID of the module. */
+ uint32_t module_id;
+
+ /* Valid ID of the parameter. */
+ uint32_t param_id;
+
+ /* The size of the parameter specified by the module/param ID combo */
+ uint16_t param_size;
+
+ /* This field must be set to zero. */
+ uint16_t reserved;
+} __packed;
+
+struct param_hdr_v2 {
+ /* Valid ID of the module. */
+ uint32_t module_id;
+
+ /* Valid ID of the parameter. */
+ uint32_t param_id;
+
+ /* The size of the parameter specified by the module/param ID combo */
+ uint32_t param_size;
+} __packed;
+
+struct param_hdr_v3 {
+ /* Valid ID of the module. */
+ uint32_t module_id;
+
+ /* Instance of the module. */
+ uint16_t instance_id;
+
+ /* This field must be set to zero. */
+ uint16_t reserved;
+
+ /* Valid ID of the parameter. */
+ uint32_t param_id;
+
+ /* The size of the parameter specified by the module/param ID combo */
+ uint32_t param_size;
+} __packed;
+
+/* A union of all param_hdr versions for versitility and max size */
+union param_hdrs {
+ struct param_hdr_v1 v1;
+ struct param_hdr_v2 v2;
+ struct param_hdr_v3 v3;
+};
+
+struct module_instance_info {
+ /* Module ID. */
+ u32 module_id;
+
+ /* Instance of the module */
+ u16 instance_id;
+
+ /* Reserved. This field must be set to zero. */
+ u16 reserved;
+} __packed;
+/* -------------------------------------------------------- */
+
+/* Begin service specific definitions and structures */
+
#define ADSP_ADM_VERSION 0x00070000
#define ADM_CMD_SHARED_MEM_MAP_REGIONS 0x00010322
@@ -399,70 +501,36 @@ struct adm_cmd_device_open_v6 {
/* Sets one or more parameters to a COPP.
*/
#define ADM_CMD_SET_PP_PARAMS_V5 0x00010328
+#define ADM_CMD_SET_PP_PARAMS_V6 0x0001035D
-/* Payload of the #ADM_CMD_SET_PP_PARAMS_V5 command.
- * If the data_payload_addr_lsw and data_payload_addr_msw element
- * are NULL, a series of adm_param_datastructures immediately
- * follows, whose total size is data_payload_size bytes.
- */
-struct adm_cmd_set_pp_params_v5 {
- struct apr_hdr hdr;
- u32 payload_addr_lsw;
- /* LSW of parameter data payload address.*/
- u32 payload_addr_msw;
- /* MSW of parameter data payload address.*/
-
- u32 mem_map_handle;
-/* Memory map handle returned by ADM_CMD_SHARED_MEM_MAP_REGIONS
- * command */
-/* If mem_map_handle is zero implies the message is in
- * the payload */
-
- u32 payload_size;
-/* Size in bytes of the variable payload accompanying this
- * message or
- * in shared memory. This is used for parsing the parameter
- * payload.
- */
-} __packed;
-
-/* Payload format for COPP parameter data.
- * Immediately following this structure are param_size bytes
- * of parameter
- * data.
+/*
+ * Structure of the ADM Set PP Params command. Parameter data must be
+ * pre-packed with correct header for either V2 or V3 when sent in-band.
+ * Use q6core_pack_pp_params to pack the header and data correctly depending on
+ * Instance ID support.
*/
-struct adm_param_data_v5 {
- u32 module_id;
- /* Unique ID of the module. */
- u32 param_id;
- /* Unique ID of the parameter. */
- u16 param_size;
- /* Data size of the param_id/module_id combination.
- This value is a
- multiple of 4 bytes. */
- u16 reserved;
- /* Reserved for future enhancements.
- * This field must be set to zero.
- */
-} __packed;
+struct adm_cmd_set_pp_params {
+ /* APR Header */
+ struct apr_hdr apr_hdr;
+ /* The memory mapping header to be used when sending out of band */
+ struct mem_mapping_hdr mem_hdr;
-struct param_data_v6 {
- /* Unique ID of the module. */
- u32 module_id;
- /* Unique ID of the instance. */
- u16 instance_id;
- /* Reserved for future enhancements.
- * This field must be set to zero.
+ /* Size in bytes of the variable payload accompanying this
+ * message or
+ * in shared memory. This is used for parsing the parameter
+ * payload.
*/
- u16 reserved;
- /* Unique ID of the parameter. */
- u32 param_id;
- /* Data size of the param_id/module_id combination.
- * This value is a
- * multiple of 4 bytes.
+ u32 payload_size;
+
+ /* Parameter data for in band payload. This should be structured as the
+ * parameter header immediately followed by the parameter data. Multiple
+ * parameters can be set in one command by repeating the header followed
+ * by the data for as many parameters as need to be set.
+ * Use q6core_pack_pp_params to pack the header and data correctly
+ * depending on Instance ID support.
*/
- u32 param_size;
+ u8 param_data[0];
} __packed;
/* ADM_CMD_SET_MTMX_STRTR_DEV_PARAMS_V1 command is used to set
@@ -480,7 +548,7 @@ struct param_data_v6 {
/* Payload of the #define ADM_CMD_SET_MTMX_STRTR_DEV_PARAMS_V1 command.
* If the data_payload_addr_lsw and data_payload_addr_msw element
- * are NULL, a series of struct param_data_v6 structures immediately
+ * are NULL, a series of struct param_hdr_v3 structures immediately
* follows, whose total size is payload_size bytes.
*/
struct adm_cmd_set_mtmx_params_v1 {
@@ -517,7 +585,7 @@ struct enable_param_v6 {
* This parameter is generic/common parameter to configure or
* determine the state of any audio processing module.
*/
- struct param_data_v6 param;
+ struct param_hdr_v3 param;
/* @values 0 : Disable 1: Enable */
uint32_t enable;
@@ -570,25 +638,6 @@ struct adm_cmd_set_pspd_mtmx_strtr_params_v5 {
u16 reserved;
} __packed;
-/* Defined specifically for in-band use, includes params */
-struct adm_cmd_set_pp_params_inband_v5 {
- struct apr_hdr hdr;
- /* LSW of parameter data payload address.*/
- u32 payload_addr_lsw;
- /* MSW of parameter data payload address.*/
- u32 payload_addr_msw;
- /* Memory map handle returned by ADM_CMD_SHARED_MEM_MAP_REGIONS */
- /* command. If mem_map_handle is zero implies the message is in */
- /* the payload */
- u32 mem_map_handle;
- /* Size in bytes of the variable payload accompanying this */
- /* message or in shared memory. This is used for parsing the */
- /* parameter payload. */
- u32 payload_size;
- /* Parameters passed for in band payload */
- struct adm_param_data_v5 params;
-} __packed;
-
/* Returns the status and COPP ID to an #ADM_CMD_DEVICE_OPEN_V5 command.
*/
#define ADM_CMDRSP_DEVICE_OPEN_V5 0x00010329
@@ -621,44 +670,21 @@ struct adm_cmd_rsp_device_open_v5 {
/* This command allows a query of one COPP parameter.
*/
#define ADM_CMD_GET_PP_PARAMS_V5 0x0001032A
+#define ADM_CMD_GET_PP_PARAMS_V6 0x0001035E
-/* Payload an #ADM_CMD_GET_PP_PARAMS_V5 command.
-*/
-struct adm_cmd_get_pp_params_v5 {
- struct apr_hdr hdr;
- u32 data_payload_addr_lsw;
- /* LSW of parameter data payload address.*/
-
- u32 data_payload_addr_msw;
- /* MSW of parameter data payload address.*/
-
- /* If the mem_map_handle is non zero,
- * on ACK, the ParamData payloads begin at
- * the address specified (out-of-band).
- */
-
- u32 mem_map_handle;
- /* Memory map handle returned
- * by ADM_CMD_SHARED_MEM_MAP_REGIONS command.
- * If the mem_map_handle is 0, it implies that
- * the ACK's payload will contain the ParamData (in-band).
- */
-
- u32 module_id;
- /* Unique ID of the module. */
+/*
+ * Structure of the ADM Get PP Params command. Parameter header must be
+ * packed correctly for either V2 or V3. Use q6core_pack_pp_params to pack the
+ * header correctly depending on Instance ID support.
+ */
+struct adm_cmd_get_pp_params {
+ struct apr_hdr apr_hdr;
- u32 param_id;
- /* Unique ID of the parameter. */
+ /* The memory mapping header to be used when requesting outband */
+ struct mem_mapping_hdr mem_hdr;
- u16 param_max_size;
- /* Maximum data size of the parameter
- *ID/module ID combination. This
- * field is a multiple of 4 bytes.
- */
- u16 reserved;
- /* Reserved for future enhancements.
- * This field must be set to zero.
- */
+ /* Parameter header for in band payload. */
+ union param_hdrs param_hdr;
} __packed;
/* Returns parameter values
@@ -670,15 +696,48 @@ struct adm_cmd_get_pp_params_v5 {
* which returns parameter values in response
* to an #ADM_CMD_GET_PP_PARAMS_V5 command.
* Immediately following this
- * structure is the adm_param_data_v5
+ * structure is the param_hdr_v1
* structure containing the pre/postprocessing
* parameter data. For an in-band
* scenario, the variable payload depends
* on the size of the parameter.
*/
struct adm_cmd_rsp_get_pp_params_v5 {
- u32 status;
/* Status message (error code).*/
+ u32 status;
+
+ /* The header that identifies the subsequent parameter data */
+ struct param_hdr_v1 param_hdr;
+
+ /* The parameter data returned */
+ u32 param_data[0];
+} __packed;
+
+/*
+ * Returns parameter values in response to an #ADM_CMD_GET_PP_PARAMS_V5/6
+ * command.
+ */
+#define ADM_CMDRSP_GET_PP_PARAMS_V6 0x0001035F
+
+/* Payload of the #ADM_CMDRSP_GET_PP_PARAMS_V6 message,
+ * which returns parameter values in response
+ * to an #ADM_CMD_GET_PP_PARAMS_V6 command.
+ * Immediately following this
+ * structure is the param_hdr_v3
+ * structure containing the pre/postprocessing
+ * parameter data. For an in-band
+ * scenario, the variable payload depends
+ * on the size of the parameter.
+*/
+struct adm_cmd_rsp_get_pp_params_v6 {
+ /* Status message (error code).*/
+ u32 status;
+
+ /* The header that identifies the subsequent parameter data */
+ struct param_hdr_v3 param_hdr;
+
+ /* The parameter data returned */
+ u32 param_data[0];
} __packed;
/* Structure for holding soft stepping volume parameters. */
@@ -731,9 +790,29 @@ struct adm_pspd_param_data_t {
uint16_t reserved;
} __packed;
-struct audproc_mfc_output_media_fmt {
- struct adm_cmd_set_pp_params_v5 params;
- struct adm_param_data_v5 data;
+struct adm_cmd_set_pp_params_v5 {
+ struct apr_hdr hdr;
+ u32 payload_addr_lsw;
+ /* LSW of parameter data payload address.*/
+ u32 payload_addr_msw;
+ /* MSW of parameter data payload address.*/
+
+ u32 mem_map_handle;
+ /* Memory map handle returned by ADM_CMD_SHARED_MEM_MAP_REGIONS
+ * command.
+ * If mem_map_handle is zero implies the message is in
+ * the payload
+ */
+
+ u32 payload_size;
+ /* Size in bytes of the variable payload accompanying this
+ * message or
+ * in shared memory. This is used for parsing the parameter
+ * payload.
+ */
+} __packed;
+
+struct audproc_mfc_param_media_fmt {
uint32_t sampling_rate;
uint16_t bits_per_sample;
uint16_t num_channels;
@@ -741,8 +820,6 @@ struct audproc_mfc_output_media_fmt {
} __packed;
struct audproc_volume_ctrl_master_gain {
- struct adm_cmd_set_pp_params_v5 params;
- struct adm_param_data_v5 data;
/* Linear gain in Q13 format. */
uint16_t master_gain;
/* Clients must set this field to zero. */
@@ -750,8 +827,6 @@ struct audproc_volume_ctrl_master_gain {
} __packed;
struct audproc_soft_step_volume_params {
- struct adm_cmd_set_pp_params_v5 params;
- struct adm_param_data_v5 data;
/*
* Period in milliseconds.
* Supported values: 0 to 15000
@@ -773,7 +848,6 @@ struct audproc_soft_step_volume_params {
} __packed;
struct audproc_enable_param_t {
- struct adm_cmd_set_pp_params_inband_v5 pp_params;
/*
* Specifies whether the Audio processing module is enabled.
* This parameter is generic/common parameter to configure or
@@ -1497,87 +1571,136 @@ struct afe_sidetone_iir_filter_config_params {
#define AFE_MODULE_LOOPBACK 0x00010205
#define AFE_PARAM_ID_LOOPBACK_GAIN_PER_PATH 0x00010206
-/* Payload of the #AFE_PARAM_ID_LOOPBACK_GAIN_PER_PATH parameter,
- * which gets/sets loopback gain of a port to an Rx port.
- * The Tx port ID of the loopback is part of the set_param command.
- */
+/* Used by RTAC */
+struct afe_rtac_user_data_set_v2 {
+ /* Port interface and direction (Rx or Tx) to start. */
+ u16 port_id;
-/* Payload of the #AFE_PORT_CMD_SET_PARAM_V2 command's
- * configuration/calibration settings for the AFE port.
- */
-struct afe_port_cmd_set_param_v2 {
+ /* Actual size of the payload in bytes.
+ * This is used for parsing the parameter payload.
+ * Supported values: > 0
+ */
+ u16 payload_size;
+
+ /* The header detailing the memory mapping for out of band. */
+ struct mem_mapping_hdr mem_hdr;
+
+ /* The parameter header for the parameter data to set */
+ struct param_hdr_v1 param_hdr;
+
+ /* The parameter data to be filled when sent inband */
+ u32 *param_data;
+} __packed;
+
+struct afe_rtac_user_data_set_v3 {
+ /* Port interface and direction (Rx or Tx) to start. */
u16 port_id;
-/* Port interface and direction (Rx or Tx) to start.
- */
+ /* Reserved for future enhancements. Must be 0. */
+ u16 reserved;
+
+ /* The header detailing the memory mapping for out of band. */
+ struct mem_mapping_hdr mem_hdr;
+ /* The size of the parameter header and parameter data */
+ u32 payload_size;
+
+ /* The parameter header for the parameter data to set */
+ struct param_hdr_v3 param_hdr;
+
+ /* The parameter data to be filled when sent inband */
+ u32 *param_data;
+} __packed;
+
+struct afe_rtac_user_data_get_v2 {
+ /* Port interface and direction (Rx or Tx) to start. */
+ u16 port_id;
+
+ /* Actual size of the payload in bytes.
+ * This is used for parsing the parameter payload.
+ * Supported values: > 0
+ */
u16 payload_size;
-/* Actual size of the payload in bytes.
- * This is used for parsing the parameter payload.
- * Supported values: > 0
- */
-u32 payload_address_lsw;
-/* LSW of 64 bit Payload address.
- * Address should be 32-byte,
- * 4kbyte aligned and must be contiguous memory.
- */
+ /* The header detailing the memory mapping for out of band. */
+ struct mem_mapping_hdr mem_hdr;
-u32 payload_address_msw;
-/* MSW of 64 bit Payload address.
- * In case of 32-bit shared memory address,
- * this field must be set to zero.
- * In case of 36-bit shared memory address,
- * bit-4 to bit-31 must be set to zero.
- * Address should be 32-byte, 4kbyte aligned
- * and must be contiguous memory.
- */
+ /* The module ID of the parameter to get */
+ u32 module_id;
-u32 mem_map_handle;
-/* Memory map handle returned by
- * AFE_SERVICE_CMD_SHARED_MEM_MAP_REGIONS commands.
- * Supported Values:
- * - NULL -- Message. The parameter data is in-band.
- * - Non-NULL -- The parameter data is Out-band.Pointer to
- * the physical address
- * in shared memory of the payload data.
- * An optional field is available if parameter
- * data is in-band:
- * afe_param_data_v2 param_data[...].
- * For detailed payload content, see the
- * afe_port_param_data_v2 structure.
- */
+ /* The parameter ID of the parameter to get */
+ u32 param_id;
+
+ /* The parameter data to be filled when sent inband */
+ struct param_hdr_v1 param_hdr;
} __packed;
+struct afe_rtac_user_data_get_v3 {
+ /* Port interface and direction (Rx or Tx) to start. */
+ u16 port_id;
+ /* Reserved for future enhancements. Must be 0. */
+ u16 reserved;
+
+ /* The header detailing the memory mapping for out of band. */
+ struct mem_mapping_hdr mem_hdr;
+
+ /* The parameter data to be filled when sent inband */
+ struct param_hdr_v3 param_hdr;
+} __packed;
#define AFE_PORT_CMD_SET_PARAM_V2 0x000100EF
+struct afe_port_cmd_set_param_v2 {
+ /* APR Header */
+ struct apr_hdr apr_hdr;
-struct afe_port_param_data_v2 {
- u32 module_id;
-/* ID of the module to be configured.
- * Supported values: Valid module ID
- */
+ /* Port interface and direction (Rx or Tx) to start. */
+ u16 port_id;
-u32 param_id;
-/* ID of the parameter corresponding to the supported parameters
- * for the module ID.
- * Supported values: Valid parameter ID
- */
+ /*
+ * Actual size of the payload in bytes.
+ * This is used for parsing the parameter payload.
+ * Supported values: > 0
+ */
+ u16 payload_size;
-u16 param_size;
-/* Actual size of the data for the
- * module_id/param_id pair. The size is a
- * multiple of four bytes.
- * Supported values: > 0
- */
+ /* The header detailing the memory mapping for out of band. */
+ struct mem_mapping_hdr mem_hdr;
-u16 reserved;
-/* This field must be set to zero.
- */
+ /* The parameter data to be filled when sent inband */
+ u8 param_data[0];
} __packed;
+#define AFE_PORT_CMD_SET_PARAM_V3 0x000100FA
+struct afe_port_cmd_set_param_v3 {
+ /* APR Header */
+ struct apr_hdr apr_hdr;
+
+ /* Port ID of the AFE port to configure. Port interface and direction
+ * (Rx or Tx) to configure. An even number represents the Rx direction,
+ * and an odd number represents the Tx direction.
+ */
+ u16 port_id;
+
+ /* Reserved. This field must be set to zero. */
+ u16 reserved;
+
+ /* The memory mapping header to be used when sending outband */
+ struct mem_mapping_hdr mem_hdr;
+
+ /* The total size of the payload, including param_hdr_v3 */
+ u32 payload_size;
+
+ /*
+ * The parameter data to be filled when sent inband.
+ * Must include param_hdr packed correctly.
+ */
+ u8 param_data[0];
+} __packed;
+
+/* Payload of the #AFE_PARAM_ID_LOOPBACK_GAIN_PER_PATH parameter,
+ * which gets/sets loopback gain of a port to an Rx port.
+ * The Tx port ID of the loopback is part of the set_param command.
+ */
+
struct afe_loopback_gain_per_path_param {
- struct apr_hdr hdr;
- struct afe_port_cmd_set_param_v2 param;
- struct afe_port_param_data_v2 pdata;
u16 rx_port_id;
/* Rx port of the loopback. */
@@ -1613,9 +1736,6 @@ enum afe_loopback_routing_mode {
* which enables/disables one AFE loopback.
*/
struct afe_loopback_cfg_v1 {
- struct apr_hdr hdr;
- struct afe_port_cmd_set_param_v2 param;
- struct afe_port_param_data_v2 pdata;
u32 loopback_cfg_minor_version;
/* Minor version used for tracking the version of the RMC module
* configuration interface.
@@ -1677,19 +1797,19 @@ struct loopback_cfg_data {
struct afe_st_loopback_cfg_v1 {
struct apr_hdr hdr;
- struct afe_port_cmd_set_param_v2 param;
- struct afe_port_param_data_v2 gain_pdata;
+ struct mem_mapping_hdr mem_hdr;
+ struct param_hdr_v1 gain_pdata;
struct afe_loopback_sidetone_gain gain_data;
- struct afe_port_param_data_v2 cfg_pdata;
+ struct param_hdr_v1 cfg_pdata;
struct loopback_cfg_data cfg_data;
} __packed;
struct afe_loopback_iir_cfg_v2 {
- struct apr_hdr hdr;
- struct afe_port_cmd_set_param_v2 param;
- struct afe_port_param_data_v2 st_iir_enable_pdata;
- struct afe_mod_enable_param st_iir_mode_enable_data;
- struct afe_port_param_data_v2 st_iir_filter_config_pdata;
+ struct apr_hdr hdr;
+ struct mem_mapping_hdr param;
+ struct param_hdr_v1 st_iir_enable_pdata;
+ struct afe_mod_enable_param st_iir_mode_enable_data;
+ struct param_hdr_v1 st_iir_filter_config_pdata;
struct afe_sidetone_iir_filter_config_params st_iir_filter_config_data;
} __packed;
#define AFE_MODULE_SPEAKER_PROTECTION 0x00010209
@@ -2141,20 +2261,6 @@ struct afe_param_id_spdif_clk_cfg {
*/
} __packed;
-struct afe_spdif_clk_config_command {
- struct apr_hdr hdr;
- struct afe_port_cmd_set_param_v2 param;
- struct afe_port_param_data_v2 pdata;
- struct afe_param_id_spdif_clk_cfg clk_cfg;
-} __packed;
-
-struct afe_spdif_chstatus_config_command {
- struct apr_hdr hdr;
- struct afe_port_cmd_set_param_v2 param;
- struct afe_port_param_data_v2 pdata;
- struct afe_param_id_spdif_ch_status_cfg ch_status;
-} __packed;
-
struct afe_spdif_port_config {
struct afe_param_id_spdif_cfg cfg;
struct afe_param_id_spdif_ch_status_cfg ch_status;
@@ -2680,16 +2786,6 @@ struct afe_param_id_usb_audio_cfg {
u32 endian;
} __packed;
-struct afe_usb_audio_dev_param_command {
- struct apr_hdr hdr;
- struct afe_port_cmd_set_param_v2 param;
- struct afe_port_param_data_v2 pdata;
- union {
- struct afe_param_id_usb_audio_dev_params usb_dev;
- struct afe_param_id_usb_audio_dev_lpcm_fmt lpcm_fmt;
- };
-} __packed;
-
/*
* This param id is used to configure Real Time Proxy interface.
*/
@@ -3084,20 +3180,6 @@ struct afe_param_id_custom_tdm_header_cfg {
uint16_t header7; Reserved Info[3] - Bitrate[kbps] - Low Byte -> 0x0 */
} __packed;
-struct afe_slot_mapping_config_command {
- struct apr_hdr hdr;
- struct afe_port_cmd_set_param_v2 param;
- struct afe_port_param_data_v2 pdata;
- struct afe_param_id_slot_mapping_cfg slot_mapping;
-} __packed;
-
-struct afe_custom_tdm_header_config_command {
- struct apr_hdr hdr;
- struct afe_port_cmd_set_param_v2 param;
- struct afe_port_param_data_v2 pdata;
- struct afe_param_id_custom_tdm_header_cfg custom_tdm_header;
-} __packed;
-
struct afe_tdm_port_config {
struct afe_param_id_tdm_cfg tdm;
struct afe_param_id_slot_mapping_cfg slot_mapping;
@@ -3474,18 +3556,6 @@ union afe_port_config {
struct avs_enc_packetizer_id_param_t enc_pkt_id_param;
} __packed;
-struct afe_audioif_config_command_no_payload {
- struct apr_hdr hdr;
- struct afe_port_cmd_set_param_v2 param;
-} __packed;
-
-struct afe_audioif_config_command {
- struct apr_hdr hdr;
- struct afe_port_cmd_set_param_v2 param;
- struct afe_port_param_data_v2 pdata;
- union afe_port_config port;
-} __packed;
-
#define AFE_PORT_CMD_DEVICE_START 0x000100E5
/* Payload of the #AFE_PORT_CMD_DEVICE_START.*/
@@ -3648,13 +3718,8 @@ u32 mem_map_handle;
*/
} __packed;
-#define AFE_PORT_CMD_GET_PARAM_V2 0x000100F0
-
-/* Payload of the #AFE_PORT_CMD_GET_PARAM_V2 command,
- * which queries for one post/preprocessing parameter of a
- * stream.
- */
-struct afe_port_cmd_get_param_v2 {
+/* Used by RTAC */
+struct afe_rtac_get_param_v2 {
u16 port_id;
/* Port interface and direction (Rx or Tx) to start. */
@@ -3700,6 +3765,37 @@ struct afe_port_cmd_get_param_v2 {
*/
} __packed;
+#define AFE_PORT_CMD_GET_PARAM_V2 0x000100F0
+
+/* Payload of the #AFE_PORT_CMD_GET_PARAM_V2 command,
+ * which queries for one post/preprocessing parameter of a
+ * stream.
+ */
+struct afe_port_cmd_get_param_v2 {
+ struct apr_hdr apr_hdr;
+
+ /* Port interface and direction (Rx or Tx) to start. */
+ u16 port_id;
+
+ /* Maximum data size of the parameter ID/module ID combination.
+ * This is a multiple of four bytes
+ * Supported values: > 0
+ */
+ u16 payload_size;
+
+ /* The memory mapping header to be used when requesting outband */
+ struct mem_mapping_hdr mem_hdr;
+
+ /* The module ID of the parameter data requested */
+ u32 module_id;
+
+ /* The parameter ID of the parameter data requested */
+ u32 param_id;
+
+ /* The header information for the parameter data */
+ struct param_hdr_v1 param_hdr;
+} __packed;
+
#define AFE_PORT_CMDRSP_GET_PARAM_V2 0x00010106
/* Payload of the #AFE_PORT_CMDRSP_GET_PARAM_V2 message, which
@@ -3715,6 +3811,41 @@ struct afe_port_cmd_get_param_v2 {
struct afe_port_cmdrsp_get_param_v2 {
u32 status;
+ struct param_hdr_v1 param_hdr;
+ u8 param_data[0];
+} __packed;
+
+#define AFE_PORT_CMD_GET_PARAM_V3 0x000100FB
+struct afe_port_cmd_get_param_v3 {
+ /* APR Header */
+ struct apr_hdr apr_hdr;
+
+ /* Port ID of the AFE port to configure. Port interface and direction
+ * (Rx or Tx) to configure. An even number represents the Rx direction,
+ * and an odd number represents the Tx direction.
+ */
+ u16 port_id;
+
+ /* Reserved. This field must be set to zero. */
+ u16 reserved;
+
+ /* The memory mapping header to be used when requesting outband */
+ struct mem_mapping_hdr mem_hdr;
+
+ /* The header information for the parameter data */
+ struct param_hdr_v3 param_hdr;
+} __packed;
+
+#define AFE_PORT_CMDRSP_GET_PARAM_V3 0x00010108
+struct afe_port_cmdrsp_get_param_v3 {
+ /* The status of the command */
+ uint32_t status;
+
+ /* The header information for the parameter data */
+ struct param_hdr_v3 param_hdr;
+
+ /* The parameter data to be filled when sent inband */
+ u8 param_data[0];
} __packed;
#define AFE_PARAM_ID_LPASS_CORE_SHARED_CLOCK_CONFIG 0x0001028C
@@ -3736,13 +3867,6 @@ struct afe_param_id_lpass_core_shared_clk_cfg {
*/
} __packed;
-struct afe_lpass_core_shared_clk_config_command {
- struct apr_hdr hdr;
- struct afe_port_cmd_set_param_v2 param;
- struct afe_port_param_data_v2 pdata;
- struct afe_param_id_lpass_core_shared_clk_cfg clk_cfg;
-} __packed;
-
/* adsp_afe_service_commands.h */
#define ADSP_MEMORY_MAP_EBI_POOL 0
@@ -6382,59 +6506,33 @@ struct asm_stream_cmd_open_transcode_loopback_t {
#define ASM_STREAM_CMD_FLUSH_READBUFS 0x00010C09
#define ASM_STREAM_CMD_SET_PP_PARAMS_V2 0x00010DA1
+#define ASM_STREAM_CMD_SET_PP_PARAMS_V3 0x0001320D
-struct asm_stream_cmd_set_pp_params_v2 {
- u32 data_payload_addr_lsw;
-/* LSW of parameter data payload address. Supported values: any. */
- u32 data_payload_addr_msw;
-/* MSW of Parameter data payload address. Supported values: any.
- * - Must be set to zero for in-band data.
- * - In the case of 32 bit Shared memory address, msw field must be
- * - set to zero.
- * - In the case of 36 bit shared memory address, bit 31 to bit 4 of
- * msw
- *
- * - must be set to zero.
+/*
+ * Structure for the ASM Stream Set PP Params command. Parameter data must be
+ * pre-packed with the correct header for either V2 or V3 when sent in-band.
+ * Use q6core_pack_pp_params to pack the header and data correctly depending on
+ * Instance ID support.
*/
- u32 mem_map_handle;
-/* Supported Values: Any.
-* memory map handle returned by DSP through
-* ASM_CMD_SHARED_MEM_MAP_REGIONS
-* command.
-* if mmhandle is NULL, the ParamData payloads are within the
-* message payload (in-band).
-* If mmhandle is non-NULL, the ParamData payloads begin at the
-* address specified in the address msw and lsw (out-of-band).
-*/
+struct asm_stream_cmd_set_pp_params {
+ /* APR Header */
+ struct apr_hdr apr_hdr;
- u32 data_payload_size;
-/* Size in bytes of the variable payload accompanying the
-message, or in shared memory. This field is used for parsing the
-parameter payload. */
-
-} __packed;
+ /* The memory mapping header to be used when sending out of band */
+ struct mem_mapping_hdr mem_hdr;
+ /* The total size of the payload, including the parameter header */
+ u32 payload_size;
-struct asm_stream_param_data_v2 {
- u32 module_id;
- /* Unique module ID. */
-
- u32 param_id;
- /* Unique parameter ID. */
-
- u16 param_size;
-/* Data size of the param_id/module_id combination. This is
- * a multiple of 4 bytes.
- */
-
- u16 reserved;
-/* Reserved for future enhancements. This field must be set to
- * zero.
- */
-
+ /* The parameter data to be filled when sent inband. Parameter data
+ * must be pre-packed with parameter header and then copied here. Use
+ * q6core_pack_pp_params to pack the header and param data correctly.
+ */
+ u32 param_data[0];
} __packed;
#define ASM_STREAM_CMD_GET_PP_PARAMS_V2 0x00010DA2
+#define ASM_STREAM_CMD_GET_PP_PARAMS_V3 0x0001320E
struct asm_stream_cmd_get_pp_params_v2 {
u32 data_payload_addr_lsw;
@@ -6613,6 +6711,7 @@ struct asm_aac_dual_mono_mapping_param {
} __packed;
#define ASM_STREAM_CMDRSP_GET_PP_PARAMS_V2 0x00010DA4
+#define ASM_STREAM_CMDRSP_GET_PP_PARAMS_V3 0x0001320F
struct asm_stream_cmdrsp_get_pp_params_v2 {
u32 status;
@@ -7388,12 +7487,6 @@ struct admx_mic_gain {
/*< Clients must set this field to zero. */
} __packed;
-struct adm_set_mic_gain_params {
- struct adm_cmd_set_pp_params_v5 params;
- struct adm_param_data_v5 data;
- struct admx_mic_gain mic_gain_data;
-} __packed;
-
/* end_addtogroup audio_pp_param_ids */
/* @ingroup audio_pp_module_ids
@@ -7749,56 +7842,23 @@ struct adm_qensemble_param_set_new_angle {
#define ADM_CMD_GET_PP_TOPO_MODULE_LIST 0x00010349
#define ADM_CMDRSP_GET_PP_TOPO_MODULE_LIST 0x00010350
+#define ADM_CMD_GET_PP_TOPO_MODULE_LIST_V2 0x00010360
+#define ADM_CMDRSP_GET_PP_TOPO_MODULE_LIST_V2 0x00010361
#define AUDPROC_PARAM_ID_ENABLE 0x00010904
- /*
- * Payload of the ADM_CMD_GET_PP_TOPO_MODULE_LIST command.
- */
-struct adm_cmd_get_pp_topo_module_list_t {
- struct apr_hdr hdr;
- /* Lower 32 bits of the 64-bit parameter data payload address. */
- uint32_t data_payload_addr_lsw;
- /*
- * Upper 32 bits of the 64-bit parameter data payload address.
- *
- *
- * The size of the shared memory, if specified, must be large enough to
- * contain the entire parameter data payload, including the module ID,
- * parameter ID, parameter size, and parameter values.
- */
- uint32_t data_payload_addr_msw;
- /*
- * Unique identifier for an address.
- *
- * This memory map handle is returned by the aDSP through the
- * #ADM_CMD_SHARED_MEM_MAP_REGIONS command.
- *
- * @values
- * - Non-NULL -- On acknowledgment, the parameter data payloads begin at
- * the address specified (out-of-band)
- * - NULL -- The acknowledgment's payload contains the parameter data
- * (in-band) @tablebulletend
- */
- uint32_t mem_map_handle;
+/*
+ * Payload of the ADM_CMD_GET_PP_TOPO_MODULE_LIST command.
+ */
+struct adm_cmd_get_pp_topo_module_list {
+ struct apr_hdr apr_hdr;
+
+ /* The memory mapping header to be used when requesting out of band */
+ struct mem_mapping_hdr mem_hdr;
+
/*
* Maximum data size of the list of modules. This
* field is a multiple of 4 bytes.
*/
- uint16_t param_max_size;
- /* This field must be set to zero. */
- uint16_t reserved;
-} __packed;
-
-/*
- * Payload of the ADM_CMDRSP_GET_PP_TOPO_MODULE_LIST message, which returns
- * module ids in response to an ADM_CMD_GET_PP_TOPO_MODULE_LIST command.
- * Immediately following this structure is the acknowledgement <b>module id
- * data variable payload</b> containing the pre/postprocessing module id
- * values. For an in-band scenario, the variable payload depends on the size
- * of the parameter.
- */
-struct adm_cmd_rsp_get_pp_topo_module_list_t {
- /* Status message (error code). */
- uint32_t status;
+ uint32_t param_max_size;
} __packed;
struct audproc_topology_module_id_info_t {
@@ -7891,9 +7951,6 @@ struct audproc_topology_module_id_info_t {
struct asm_volume_ctrl_master_gain {
- struct apr_hdr hdr;
- struct asm_stream_cmd_set_pp_params_v2 param;
- struct asm_stream_param_data_v2 data;
uint16_t master_gain;
/*< Linear gain in Q13 format. */
@@ -7904,10 +7961,6 @@ struct asm_volume_ctrl_master_gain {
struct asm_volume_ctrl_lr_chan_gain {
- struct apr_hdr hdr;
- struct asm_stream_cmd_set_pp_params_v2 param;
- struct asm_stream_param_data_v2 data;
-
uint16_t l_chan_gain;
/*< Linear gain in Q13 format for the left channel. */
@@ -7919,6 +7972,7 @@ struct audproc_chmixer_param_coeff {
uint32_t index;
uint16_t num_output_channels;
uint16_t num_input_channels;
+ uint32_t payload[0];
} __packed;
@@ -7947,6 +8001,7 @@ struct audproc_volume_ctrl_channel_type_gain_pair {
/* Payload of the AUDPROC_PARAM_ID_MULTICHANNEL_MUTE parameters used by
* the Volume Control module.
*/
+#define ASM_MAX_CHANNELS 8
struct audproc_volume_ctrl_multichannel_gain {
uint32_t num_channels;
/* Number of channels for which mute configuration is provided. Any
@@ -7954,7 +8009,8 @@ struct audproc_volume_ctrl_multichannel_gain {
* provided are set to unmute.
*/
- struct audproc_volume_ctrl_channel_type_gain_pair *gain_data;
+ struct audproc_volume_ctrl_channel_type_gain_pair
+ gain_data[ASM_MAX_CHANNELS];
/* Array of channel type/mute setting pairs. */
} __packed;
@@ -7968,9 +8024,6 @@ struct audproc_volume_ctrl_multichannel_gain {
struct asm_volume_ctrl_mute_config {
- struct apr_hdr hdr;
- struct asm_stream_cmd_set_pp_params_v2 param;
- struct asm_stream_param_data_v2 data;
uint32_t mute_flag;
/*< Specifies whether mute is disabled (0) or enabled (nonzero).*/
@@ -7998,9 +8051,6 @@ struct asm_volume_ctrl_mute_config {
* parameters used by the Volume Control module.
*/
struct asm_soft_step_volume_params {
- struct apr_hdr hdr;
- struct asm_stream_cmd_set_pp_params_v2 param;
- struct asm_stream_param_data_v2 data;
uint32_t period;
/*< Period in milliseconds.
* Supported values: 0 to 15000
@@ -8030,9 +8080,6 @@ struct asm_soft_step_volume_params {
struct asm_soft_pause_params {
- struct apr_hdr hdr;
- struct asm_stream_cmd_set_pp_params_v2 param;
- struct asm_stream_param_data_v2 data;
uint32_t enable_flag;
/*< Specifies whether soft pause is disabled (0) or enabled
* (nonzero).
@@ -8122,10 +8169,7 @@ struct asm_volume_ctrl_channeltype_gain_pair {
struct asm_volume_ctrl_multichannel_gain {
- struct apr_hdr hdr;
- struct asm_stream_cmd_set_pp_params_v2 param;
- struct asm_stream_param_data_v2 data;
- uint32_t num_channels;
+ uint32_t num_channels;
/*
* Number of channels for which gain values are provided. Any
* channels present in the data for which gain is not provided are
@@ -8150,9 +8194,6 @@ struct asm_volume_ctrl_multichannel_gain {
struct asm_volume_ctrl_channelype_mute_pair {
- struct apr_hdr hdr;
- struct asm_stream_cmd_set_pp_params_v2 param;
- struct asm_stream_param_data_v2 data;
uint8_t channelype;
/*< Channel type for which the mute setting is to be applied.
* Supported values:
@@ -8201,9 +8242,6 @@ struct asm_volume_ctrl_channelype_mute_pair {
struct asm_volume_ctrl_multichannel_mute {
- struct apr_hdr hdr;
- struct asm_stream_cmd_set_pp_params_v2 param;
- struct asm_stream_param_data_v2 data;
uint32_t num_channels;
/*< Number of channels for which mute configuration is
* provided. Any channels present in the data for which mute
@@ -8648,9 +8686,6 @@ struct asm_eq_per_band_params {
} __packed;
struct asm_eq_params {
- struct apr_hdr hdr;
- struct asm_stream_cmd_set_pp_params_v2 param;
- struct asm_stream_param_data_v2 data;
uint32_t enable_flag;
/*< Specifies whether the equalizer module is disabled (0) or enabled
* (nonzero).
@@ -8689,6 +8724,9 @@ struct asm_eq_params {
#define VSS_ICOMMON_CMD_SET_PARAM_V2 0x0001133D
#define VSS_ICOMMON_CMD_GET_PARAM_V2 0x0001133E
#define VSS_ICOMMON_RSP_GET_PARAM 0x00011008
+#define VSS_ICOMMON_CMD_SET_PARAM_V3 0x00013245
+#define VSS_ICOMMON_CMD_GET_PARAM_V3 0x00013246
+#define VSS_ICOMMON_RSP_GET_PARAM_V3 0x00013247
/** ID of the Bass Boost module.
This module supports the following parameter IDs:
@@ -9072,15 +9110,13 @@ struct afe_sp_th_vi_ftm_params {
} __packed;
struct afe_sp_th_vi_get_param {
- struct apr_hdr hdr;
- struct afe_port_cmd_get_param_v2 get_param;
- struct afe_port_param_data_v2 pdata;
+ struct param_hdr_v3 pdata;
struct afe_sp_th_vi_ftm_params param;
} __packed;
struct afe_sp_th_vi_get_param_resp {
uint32_t status;
- struct afe_port_param_data_v2 pdata;
+ struct param_hdr_v3 pdata;
struct afe_sp_th_vi_ftm_params param;
} __packed;
@@ -9146,15 +9182,13 @@ struct afe_sp_ex_vi_ftm_params {
} __packed;
struct afe_sp_ex_vi_get_param {
- struct apr_hdr hdr;
- struct afe_port_cmd_get_param_v2 get_param;
- struct afe_port_param_data_v2 pdata;
+ struct param_hdr_v3 pdata;
struct afe_sp_ex_vi_ftm_params param;
} __packed;
struct afe_sp_ex_vi_get_param_resp {
uint32_t status;
- struct afe_port_param_data_v2 pdata;
+ struct param_hdr_v3 pdata;
struct afe_sp_ex_vi_ftm_params param;
} __packed;
@@ -9169,23 +9203,16 @@ union afe_spkr_prot_config {
struct afe_sp_ex_vi_ftm_cfg ex_vi_ftm_cfg;
} __packed;
-struct afe_spkr_prot_config_command {
- struct apr_hdr hdr;
- struct afe_port_cmd_set_param_v2 param;
- struct afe_port_param_data_v2 pdata;
- union afe_spkr_prot_config prot_config;
-} __packed;
-
struct afe_spkr_prot_get_vi_calib {
struct apr_hdr hdr;
- struct afe_port_cmd_get_param_v2 get_param;
- struct afe_port_param_data_v2 pdata;
+ struct mem_mapping_hdr mem_hdr;
+ struct param_hdr_v3 pdata;
struct asm_calib_res_cfg res_cfg;
} __packed;
struct afe_spkr_prot_calib_get_resp {
uint32_t status;
- struct afe_port_param_data_v2 pdata;
+ struct param_hdr_v3 pdata;
struct asm_calib_res_cfg res_cfg;
} __packed;
@@ -9313,16 +9340,6 @@ struct srs_trumedia_params {
#define ASM_STREAM_POSTPROC_TOPO_ID_DTS_HPX 0x00010DED
#define ASM_STREAM_POSTPROC_TOPO_ID_HPX_PLUS 0x10015000
#define ASM_STREAM_POSTPROC_TOPO_ID_HPX_MASTER 0x10015001
-struct asm_dts_eagle_param {
- struct apr_hdr hdr;
- struct asm_stream_cmd_set_pp_params_v2 param;
- struct asm_stream_param_data_v2 data;
-} __packed;
-
-struct asm_dts_eagle_param_get {
- struct apr_hdr hdr;
- struct asm_stream_cmd_get_pp_params_v2 param;
-} __packed;
/* Opcode to set BT address and license for aptx decoder */
#define APTX_DECODER_BT_ADDRESS 0x00013201
@@ -9430,6 +9447,7 @@ struct avcs_fwk_ver_info {
#define LSM_SESSION_CMD_CLOSE_TX (0x00012A88)
#define LSM_SESSION_CMD_SET_PARAMS (0x00012A83)
#define LSM_SESSION_CMD_SET_PARAMS_V2 (0x00012A8F)
+#define LSM_SESSION_CMD_SET_PARAMS_V3 (0x00012A92)
#define LSM_SESSION_CMD_REGISTER_SOUND_MODEL (0x00012A84)
#define LSM_SESSION_CMD_DEREGISTER_SOUND_MODEL (0x00012A85)
#define LSM_SESSION_CMD_START (0x00012A86)
@@ -9476,6 +9494,7 @@ struct avcs_fwk_ver_info {
/* Commands/Params to pass the codec/slimbus data to DSP */
#define AFE_SVC_CMD_SET_PARAM (0x000100f3)
+#define AFE_SVC_CMD_SET_PARAM_V2 (0x000100fc)
#define AFE_MODULE_CDC_DEV_CFG (0x00010234)
#define AFE_PARAM_ID_CDC_SLIMBUS_SLAVE_CFG (0x00010235)
#define AFE_PARAM_ID_CDC_REG_CFG (0x00010236)
@@ -9860,13 +9879,6 @@ struct afe_clk_cfg {
#define AFE_MODULE_CLOCK_SET 0x0001028F
#define AFE_PARAM_ID_CLOCK_SET 0x00010290
-struct afe_lpass_clk_config_command {
- struct apr_hdr hdr;
- struct afe_port_cmd_set_param_v2 param;
- struct afe_port_param_data_v2 pdata;
- struct afe_clk_cfg clk_cfg;
-} __packed;
-
enum afe_lpass_digital_clk_src {
Q6AFE_LPASS_DIGITAL_ROOT_INVALID,
Q6AFE_LPASS_DIGITAL_ROOT_PRI_MI2S_OSR,
@@ -9902,14 +9914,6 @@ struct afe_digital_clk_cfg {
u16 reserved;
} __packed;
-
-struct afe_lpass_digital_clk_config_command {
- struct apr_hdr hdr;
- struct afe_port_cmd_set_param_v2 param;
- struct afe_port_param_data_v2 pdata;
- struct afe_digital_clk_cfg clk_cfg;
-} __packed;
-
/*
* Opcode for AFE to start DTMF.
*/
@@ -10018,107 +10022,43 @@ struct afe_param_cdc_reg_cfg_data {
struct afe_param_cdc_reg_cfg *reg_data;
} __packed;
-struct afe_svc_cmd_set_param {
- uint32_t payload_size;
- uint32_t payload_address_lsw;
- uint32_t payload_address_msw;
- uint32_t mem_map_handle;
-} __packed;
-
-struct afe_svc_param_data {
- uint32_t module_id;
- uint32_t param_id;
- uint16_t param_size;
- uint16_t reserved;
-} __packed;
-
-struct afe_param_hw_mad_ctrl {
- uint32_t minor_version;
- uint16_t mad_type;
- uint16_t mad_enable;
-} __packed;
-
-struct afe_cmd_hw_mad_ctrl {
- struct apr_hdr hdr;
- struct afe_port_cmd_set_param_v2 param;
- struct afe_port_param_data_v2 pdata;
- struct afe_param_hw_mad_ctrl payload;
-} __packed;
+struct afe_svc_cmd_set_param_v1 {
+ /* APR Header */
+ struct apr_hdr apr_hdr;
-struct afe_cmd_hw_mad_slimbus_slave_port_cfg {
- struct apr_hdr hdr;
- struct afe_port_cmd_set_param_v2 param;
- struct afe_port_param_data_v2 pdata;
- struct afe_param_slimbus_slave_port_cfg sb_port_cfg;
-} __packed;
+ /* The total size of the payload, including param_hdr_v3 */
+ uint32_t payload_size;
-struct afe_cmd_sw_mad_enable {
- struct apr_hdr hdr;
- struct afe_port_cmd_set_param_v2 param;
- struct afe_port_param_data_v2 pdata;
-} __packed;
+ /* The memory mapping header to be used when sending outband */
+ struct mem_mapping_hdr mem_hdr;
-struct afe_param_cdc_reg_cfg_payload {
- struct afe_svc_param_data common;
- struct afe_param_cdc_reg_cfg reg_cfg;
+ /* The parameter data to be filled when sent inband */
+ u32 param_data[0];
} __packed;
-struct afe_lpass_clk_config_command_v2 {
- struct apr_hdr hdr;
- struct afe_svc_cmd_set_param param;
- struct afe_svc_param_data pdata;
- struct afe_clk_set clk_cfg;
-} __packed;
+struct afe_svc_cmd_set_param_v2 {
+ /* APR Header */
+ struct apr_hdr apr_hdr;
-/*
- * reg_data's size can be up to AFE_MAX_CDC_REGISTERS_TO_CONFIG
- */
-struct afe_svc_cmd_cdc_reg_cfg {
- struct apr_hdr hdr;
- struct afe_svc_cmd_set_param param;
- struct afe_param_cdc_reg_cfg_payload reg_data[0];
-} __packed;
+ /* The memory mapping header to be used when sending outband */
+ struct mem_mapping_hdr mem_hdr;
-struct afe_svc_cmd_init_cdc_reg_cfg {
- struct apr_hdr hdr;
- struct afe_svc_cmd_set_param param;
- struct afe_port_param_data_v2 init;
-} __packed;
+ /* The total size of the payload, including param_hdr_v3 */
+ u32 payload_size;
-struct afe_svc_cmd_sb_slave_cfg {
- struct apr_hdr hdr;
- struct afe_svc_cmd_set_param param;
- struct afe_port_param_data_v2 pdata;
- struct afe_param_cdc_slimbus_slave_cfg sb_slave_cfg;
+ /* The parameter data to be filled when sent inband */
+ u32 param_data[0];
} __packed;
-struct afe_svc_cmd_cdc_reg_page_cfg {
- struct apr_hdr hdr;
- struct afe_svc_cmd_set_param param;
- struct afe_port_param_data_v2 pdata;
- struct afe_param_cdc_reg_page_cfg cdc_reg_page_cfg;
-} __packed;
-
-struct afe_svc_cmd_cdc_aanc_version {
- struct apr_hdr hdr;
- struct afe_svc_cmd_set_param param;
- struct afe_port_param_data_v2 pdata;
- struct afe_param_id_cdc_aanc_version version;
-} __packed;
-
-struct afe_port_cmd_set_aanc_param {
- struct apr_hdr hdr;
- struct afe_port_cmd_set_param_v2 param;
- struct afe_port_param_data_v2 pdata;
- union {
- struct afe_param_aanc_port_cfg aanc_port_cfg;
- struct afe_mod_enable_param mod_enable;
- } __packed data;
+struct afe_param_hw_mad_ctrl {
+ uint32_t minor_version;
+ uint16_t mad_type;
+ uint16_t mad_enable;
} __packed;
struct afe_port_cmd_set_aanc_acdb_table {
struct apr_hdr hdr;
- struct afe_port_cmd_set_param_v2 param;
+ struct mem_mapping_hdr mem_hdr;
} __packed;
/* Dolby DAP topology */
@@ -10141,13 +10081,6 @@ struct afe_port_cmd_set_aanc_acdb_table {
#define Q14_GAIN_ZERO_POINT_FIVE 0x2000
#define Q14_GAIN_UNITY 0x4000
-struct afe_svc_cmd_set_clip_bank_selection {
- struct apr_hdr hdr;
- struct afe_svc_cmd_set_param param;
- struct afe_port_param_data_v2 pdata;
- struct afe_param_id_clip_bank_sel bank_sel;
-} __packed;
-
/* Ultrasound supported formats */
#define US_POINT_EPOS_FORMAT_V2 0x0001272D
#define US_RAW_FORMAT_V2 0x0001272C
@@ -10361,13 +10294,6 @@ union afe_port_group_config {
struct afe_param_id_group_device_tdm_cfg tdm_cfg;
} __packed;
-struct afe_port_group_create {
- struct apr_hdr hdr;
- struct afe_svc_cmd_set_param param;
- struct afe_port_param_data_v2 pdata;
- union afe_port_group_config data;
-} __packed;
-
/* ID of the parameter used by #AFE_MODULE_AUDIO_DEV_INTERFACE to specify
* the timing statistics of the corresponding device interface.
* Client can periodically query for the device time statistics to help adjust
@@ -10457,16 +10383,9 @@ struct afe_param_id_dev_timing_stats {
u32 ref_timer_abs_ts_msw;
} __packed;
-struct afe_av_dev_drift_get_param {
- struct apr_hdr hdr;
- struct afe_port_cmd_get_param_v2 get_param;
- struct afe_port_param_data_v2 pdata;
- struct afe_param_id_dev_timing_stats timing_stats;
-} __packed;
-
struct afe_av_dev_drift_get_param_resp {
uint32_t status;
- struct afe_port_param_data_v2 pdata;
+ struct param_hdr_v3 pdata;
struct afe_param_id_dev_timing_stats timing_stats;
} __packed;
@@ -10678,7 +10597,7 @@ union asm_session_mtmx_strtr_param_config {
struct asm_mtmx_strtr_params {
struct apr_hdr hdr;
struct asm_session_cmd_set_mtmx_strstr_params_v2 param;
- struct asm_stream_param_data_v2 data;
+ struct param_hdr_v1 data;
union asm_session_mtmx_strtr_param_config config;
} __packed;
@@ -10788,7 +10707,7 @@ struct asm_mtmx_strtr_get_params {
struct asm_mtmx_strtr_get_params_cmdrsp {
uint32_t err_code;
- struct asm_stream_param_data_v2 param_info;
+ struct param_hdr_v1 param_info;
union asm_session_mtmx_strtr_data_type param_data;
} __packed;
@@ -10808,18 +10727,14 @@ enum {
#define AUDPROC_PARAM_ID_COMPRESSED_MUTE 0x00010771
struct adm_set_compressed_device_mute {
- struct adm_cmd_set_pp_params_v5 command;
- struct adm_param_data_v5 params;
- u32 mute_on;
+ u32 mute_on;
} __packed;
#define AUDPROC_MODULE_ID_COMPRESSED_LATENCY 0x0001076E
#define AUDPROC_PARAM_ID_COMPRESSED_LATENCY 0x0001076F
struct adm_set_compressed_device_latency {
- struct adm_cmd_set_pp_params_v5 command;
- struct adm_param_data_v5 params;
- u32 latency;
+ u32 latency;
} __packed;
#define VOICEPROC_MODULE_ID_GENERIC_TX 0x00010EF6
@@ -10849,12 +10764,6 @@ struct adm_param_fluence_soundfocus_t {
uint16_t reserved;
} __packed;
-struct adm_set_fluence_soundfocus_param {
- struct adm_cmd_set_pp_params_v5 params;
- struct adm_param_data_v5 data;
- struct adm_param_fluence_soundfocus_t soundfocus_data;
-} __packed;
-
struct adm_param_fluence_sourcetracking_t {
uint8_t vad[MAX_SECTORS];
uint16_t doa_speech;
@@ -10884,10 +10793,4 @@ struct admx_sec_primary_mic_ch {
uint16_t reserved1;
} __packed;
-
-struct adm_set_sec_primary_ch_params {
- struct adm_cmd_set_pp_params_v5 params;
- struct adm_param_data_v5 data;
- struct admx_sec_primary_mic_ch sec_primary_mic_ch_data;
-} __packed;
#endif /*_APR_AUDIO_V2_H_ */
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index 2b6e8f8240d9..147e448ed405 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -106,7 +106,7 @@ struct snd_pcm_ops {
#endif
#define SNDRV_PCM_IOCTL1_RESET 0
-#define SNDRV_PCM_IOCTL1_INFO 1
+/* 1 is absent slot. */
#define SNDRV_PCM_IOCTL1_CHANNEL_INFO 2
#define SNDRV_PCM_IOCTL1_GSTATE 3
#define SNDRV_PCM_IOCTL1_FIFO_SIZE 4
@@ -466,6 +466,7 @@ struct snd_pcm_substream {
const struct snd_pcm_ops *ops;
/* -- runtime information -- */
struct snd_pcm_runtime *runtime;
+ spinlock_t runtime_lock;
/* -- timer section -- */
struct snd_timer *timer; /* timer */
unsigned timer_running: 1; /* time is running */
diff --git a/include/sound/q6adm-v2.h b/include/sound/q6adm-v2.h
index 65c42ee18914..84087de3d4d8 100644
--- a/include/sound/q6adm-v2.h
+++ b/include/sound/q6adm-v2.h
@@ -25,6 +25,8 @@
#define MAX_MODULES_IN_TOPO 16
#define ADM_GET_TOPO_MODULE_LIST_LENGTH\
((MAX_MODULES_IN_TOPO + 1) * sizeof(uint32_t))
+#define ADM_GET_TOPO_MODULE_INSTANCE_LIST_LENGTH \
+ ((MAX_MODULES_IN_TOPO + 1) * 2 * sizeof(uint32_t))
#define AUD_PROC_BLOCK_SIZE 4096
#define AUD_VOL_BLOCK_SIZE 4096
#define AUDIO_RX_CALIBRATION_SIZE (AUD_PROC_BLOCK_SIZE + \
@@ -101,12 +103,24 @@ void adm_copp_mfc_cfg(int port_id, int copp_idx, int dst_sample_rate);
int adm_get_params(int port_id, int copp_idx, uint32_t module_id,
uint32_t param_id, uint32_t params_length, char *params);
+int adm_get_pp_params(int port_id, int copp_idx, uint32_t client_id,
+ struct mem_mapping_hdr *mem_hdr,
+ struct param_hdr_v3 *param_hdr, u8 *returned_param_data);
+
int adm_send_params_v5(int port_id, int copp_idx, char *params,
uint32_t params_length);
int adm_dolby_dap_send_params(int port_id, int copp_idx, char *params,
uint32_t params_length);
+int adm_set_pp_params(int port_id, int copp_idx,
+ struct mem_mapping_hdr *mem_hdr, u8 *param_data,
+ u32 params_size);
+
+int adm_pack_and_set_one_pp_param(int port_id, int copp_idx,
+ struct param_hdr_v3 param_hdr,
+ u8 *param_data);
+
int adm_open(int port, int path, int rate, int mode, int topology,
int perf_mode, uint16_t bits_per_sample,
int app_type, int acdbdev_id);
@@ -157,6 +171,10 @@ int adm_set_downmix_params(int port_id, int copp_idx,
int adm_get_pp_topo_module_list(int port_id, int copp_idx, int32_t param_length,
char *params);
+int adm_get_pp_topo_module_list_v2(int port_id, int copp_idx,
+ int32_t param_length,
+ int32_t *returned_params);
+
int adm_set_volume(int port_id, int copp_idx, int volume);
int adm_set_softvolume(int port_id, int copp_idx,
@@ -169,6 +187,9 @@ int adm_send_set_multichannel_ec_primary_mic_ch(int port_id, int copp_idx,
int adm_param_enable(int port_id, int copp_idx, int module_id, int enable);
+int adm_param_enable_v2(int port_id, int copp_idx,
+ struct module_instance_info mod_inst_info, int enable);
+
int adm_send_calibration(int port_id, int copp_idx, int path, int perf_mode,
int cal_type, char *params, int size);
diff --git a/include/sound/q6asm-v2.h b/include/sound/q6asm-v2.h
index 9ddd02cac9ac..285d32e249b8 100644
--- a/include/sound/q6asm-v2.h
+++ b/include/sound/q6asm-v2.h
@@ -265,6 +265,17 @@ int q6asm_audio_client_buf_alloc_contiguous(unsigned int dir
int q6asm_audio_client_buf_free_contiguous(unsigned int dir,
struct audio_client *ac);
+int q6asm_set_pp_params(struct audio_client *ac,
+ struct mem_mapping_hdr *mem_hdr, u8 *param_data,
+ u32 param_size);
+
+int q6asm_pack_and_set_pp_param_in_band(struct audio_client *ac,
+ struct param_hdr_v3 param_hdr,
+ u8 *param_data);
+
+int q6asm_set_soft_volume_module_instance_ids(int instance,
+ struct param_hdr_v3 *param_hdr);
+
int q6asm_open_read(struct audio_client *ac, uint32_t format
/*, uint16_t bits_per_sample*/);
diff --git a/include/sound/q6common.h b/include/sound/q6common.h
new file mode 100644
index 000000000000..b6208f756cd9
--- /dev/null
+++ b/include/sound/q6common.h
@@ -0,0 +1,23 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __Q6COMMON_H__
+#define __Q6COMMON_H__
+
+#include <sound/apr_audio-v2.h>
+
+void q6common_update_instance_id_support(bool supported);
+bool q6common_is_instance_id_supported(void);
+int q6common_pack_pp_params(u8 *dest, struct param_hdr_v3 *v3_hdr,
+ u8 *param_data, u32 *total_size);
+
+#endif /* __Q6COMMON_H__ */
diff --git a/include/sound/q6lsm.h b/include/sound/q6lsm.h
index 4805246766d6..c046cd468b49 100644
--- a/include/sound/q6lsm.h
+++ b/include/sound/q6lsm.h
@@ -112,31 +112,27 @@ struct lsm_custom_topologies {
uint32_t buffer_size;
} __packed;
-struct lsm_param_size_reserved {
- uint16_t param_size;
- uint16_t reserved;
-} __packed;
-
-union lsm_param_size {
- uint32_t param_size;
- struct lsm_param_size_reserved sr;
+struct lsm_session_cmd_set_params_v2 {
+ struct apr_hdr apr_hdr;
+ uint32_t payload_size;
+ struct mem_mapping_hdr mem_hdr;
+ u32 param_data[0];
} __packed;
-struct lsm_param_payload_common {
- uint32_t module_id;
- uint32_t param_id;
- union lsm_param_size p_size;
+struct lsm_session_cmd_set_params_v3 {
+ struct apr_hdr apr_hdr;
+ struct mem_mapping_hdr mem_hdr;
+ uint32_t payload_size;
+ u32 param_data[0];
} __packed;
struct lsm_param_op_mode {
- struct lsm_param_payload_common common;
uint32_t minor_version;
uint16_t mode;
uint16_t reserved;
} __packed;
struct lsm_param_connect_to_port {
- struct lsm_param_payload_common common;
uint32_t minor_version;
/* AFE port id that receives voice wake up data */
uint16_t port_id;
@@ -144,20 +140,17 @@ struct lsm_param_connect_to_port {
} __packed;
struct lsm_param_poll_enable {
- struct lsm_param_payload_common common;
uint32_t minor_version;
/* indicates to voice wakeup that HW MAD/SW polling is enabled or not */
uint32_t polling_enable;
} __packed;
struct lsm_param_fwk_mode_cfg {
- struct lsm_param_payload_common common;
uint32_t minor_version;
uint32_t mode;
} __packed;
struct lsm_param_media_fmt {
- struct lsm_param_payload_common common;
uint32_t minor_version;
uint32_t sample_rate;
uint16_t num_channels;
@@ -165,78 +158,23 @@ struct lsm_param_media_fmt {
uint8_t channel_mapping[LSM_MAX_NUM_CHANNELS];
} __packed;
-/*
- * This param cannot be sent in this format.
- * The actual number of confidence level values
- * need to appended to this param payload.
- */
-struct lsm_param_min_confidence_levels {
- struct lsm_param_payload_common common;
- uint8_t num_confidence_levels;
-} __packed;
-
-struct lsm_set_params_hdr {
- uint32_t data_payload_size;
- uint32_t data_payload_addr_lsw;
- uint32_t data_payload_addr_msw;
- uint32_t mem_map_handle;
-} __packed;
-
-struct lsm_cmd_set_params {
- struct apr_hdr msg_hdr;
- struct lsm_set_params_hdr param_hdr;
-} __packed;
-
-struct lsm_cmd_set_params_conf {
- struct apr_hdr msg_hdr;
- struct lsm_set_params_hdr params_hdr;
- struct lsm_param_min_confidence_levels conf_payload;
-} __packed;
-
-struct lsm_cmd_set_params_opmode {
- struct apr_hdr msg_hdr;
- struct lsm_set_params_hdr params_hdr;
- struct lsm_param_op_mode op_mode;
-} __packed;
-
-struct lsm_cmd_set_connectport {
- struct apr_hdr msg_hdr;
- struct lsm_set_params_hdr params_hdr;
- struct lsm_param_connect_to_port connect_to_port;
-} __packed;
-
-struct lsm_cmd_poll_enable {
- struct apr_hdr msg_hdr;
- struct lsm_set_params_hdr params_hdr;
- struct lsm_param_poll_enable poll_enable;
+struct lsm_param_confidence_levels {
+ uint8_t num_confidence_levels;
+ uint8_t confidence_levels[0];
} __packed;
struct lsm_param_epd_thres {
- struct lsm_param_payload_common common;
uint32_t minor_version;
uint32_t epd_begin;
uint32_t epd_end;
} __packed;
-struct lsm_cmd_set_epd_threshold {
- struct apr_hdr msg_hdr;
- struct lsm_set_params_hdr param_hdr;
- struct lsm_param_epd_thres epd_thres;
-} __packed;
-
struct lsm_param_gain {
- struct lsm_param_payload_common common;
uint32_t minor_version;
uint16_t gain;
uint16_t reserved;
} __packed;
-struct lsm_cmd_set_gain {
- struct apr_hdr msg_hdr;
- struct lsm_set_params_hdr param_hdr;
- struct lsm_param_gain lsm_gain;
-} __packed;
-
struct lsm_cmd_reg_snd_model {
struct apr_hdr hdr;
uint32_t model_size;
@@ -245,31 +183,16 @@ struct lsm_cmd_reg_snd_model {
uint32_t mem_map_handle;
} __packed;
-struct lsm_lab_enable {
- struct lsm_param_payload_common common;
+struct lsm_param_lab_enable {
uint16_t enable;
uint16_t reserved;
} __packed;
-struct lsm_params_lab_enable {
- struct apr_hdr msg_hdr;
- struct lsm_set_params_hdr params_hdr;
- struct lsm_lab_enable lab_enable;
-} __packed;
-
-struct lsm_lab_config {
- struct lsm_param_payload_common common;
+struct lsm_param_lab_config {
uint32_t minor_version;
uint32_t wake_up_latency_ms;
} __packed;
-
-struct lsm_params_lab_config {
- struct apr_hdr msg_hdr;
- struct lsm_set_params_hdr params_hdr;
- struct lsm_lab_config lab_config;
-} __packed;
-
struct lsm_cmd_read {
struct apr_hdr hdr;
uint32_t buf_addr_lsw;
@@ -291,19 +214,6 @@ struct lsm_cmd_read_done {
uint32_t flags;
} __packed;
-struct lsm_cmd_set_fwk_mode_cfg {
- struct apr_hdr msg_hdr;
- struct lsm_set_params_hdr params_hdr;
- struct lsm_param_fwk_mode_cfg fwk_mode_cfg;
-} __packed;
-
-struct lsm_cmd_set_media_fmt {
- struct apr_hdr msg_hdr;
- struct lsm_set_params_hdr params_hdr;
- struct lsm_param_media_fmt media_fmt;
-} __packed;
-
-
struct lsm_client *q6lsm_client_alloc(lsm_app_cb cb, void *priv);
void q6lsm_client_free(struct lsm_client *client);
int q6lsm_open(struct lsm_client *client, uint16_t app_id);
diff --git a/include/sound/seq_kernel.h b/include/sound/seq_kernel.h
index feb58d455560..4b9ee3009aa0 100644
--- a/include/sound/seq_kernel.h
+++ b/include/sound/seq_kernel.h
@@ -49,7 +49,8 @@ typedef union snd_seq_timestamp snd_seq_timestamp_t;
#define SNDRV_SEQ_DEFAULT_CLIENT_EVENTS 200
/* max delivery path length */
-#define SNDRV_SEQ_MAX_HOPS 10
+/* NOTE: this shouldn't be greater than MAX_LOCKDEP_SUBCLASSES */
+#define SNDRV_SEQ_MAX_HOPS 8
/* max size of event size */
#define SNDRV_SEQ_MAX_EVENT_LEN 0x3fffffff
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 1adf8739980c..8555321306fb 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -199,6 +199,7 @@ enum tcm_tmreq_table {
TMR_LUN_RESET = 5,
TMR_TARGET_WARM_RESET = 6,
TMR_TARGET_COLD_RESET = 7,
+ TMR_UNKNOWN = 0xff,
};
/* fabric independent task management response values */
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index 7063bbcca03b..589df6f73789 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -128,6 +128,18 @@ TRACE_DEFINE_ENUM(CP_TRIMMED);
{ CP_DISCARD, "Discard" }, \
{ CP_UMOUNT | CP_TRIMMED, "Umount,Trimmed" })
+#define show_fsync_cpreason(type) \
+ __print_symbolic(type, \
+ { CP_NO_NEEDED, "no needed" }, \
+ { CP_NON_REGULAR, "non regular" }, \
+ { CP_HARDLINK, "hardlink" }, \
+ { CP_SB_NEED_CP, "sb needs cp" }, \
+ { CP_WRONG_PINO, "wrong pino" }, \
+ { CP_NO_SPC_ROLL, "no space roll forward" }, \
+ { CP_NODE_NEED_CP, "node needs cp" }, \
+ { CP_FASTBOOT_MODE, "fastboot mode" }, \
+ { CP_SPEC_LOG_NUM, "log type is 2" })
+
struct victim_sel_policy;
struct f2fs_map_blocks;
@@ -202,14 +214,14 @@ DEFINE_EVENT(f2fs__inode, f2fs_sync_file_enter,
TRACE_EVENT(f2fs_sync_file_exit,
- TP_PROTO(struct inode *inode, int need_cp, int datasync, int ret),
+ TP_PROTO(struct inode *inode, int cp_reason, int datasync, int ret),
- TP_ARGS(inode, need_cp, datasync, ret),
+ TP_ARGS(inode, cp_reason, datasync, ret),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(ino_t, ino)
- __field(int, need_cp)
+ __field(int, cp_reason)
__field(int, datasync)
__field(int, ret)
),
@@ -217,15 +229,15 @@ TRACE_EVENT(f2fs_sync_file_exit,
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
- __entry->need_cp = need_cp;
+ __entry->cp_reason = cp_reason;
__entry->datasync = datasync;
__entry->ret = ret;
),
- TP_printk("dev = (%d,%d), ino = %lu, checkpoint is %s, "
+ TP_printk("dev = (%d,%d), ino = %lu, cp_reason: %s, "
"datasync = %d, ret = %d",
show_dev_ino(__entry),
- __entry->need_cp ? "needed" : "not needed",
+ show_fsync_cpreason(__entry->cp_reason),
__entry->datasync,
__entry->ret)
);
@@ -716,6 +728,91 @@ TRACE_EVENT(f2fs_get_victim,
__entry->free)
);
+TRACE_EVENT(f2fs_lookup_start,
+
+ TP_PROTO(struct inode *dir, struct dentry *dentry, unsigned int flags),
+
+ TP_ARGS(dir, dentry, flags),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(const char *, name)
+ __field(unsigned int, flags)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dir->i_sb->s_dev;
+ __entry->ino = dir->i_ino;
+ __entry->name = dentry->d_name.name;
+ __entry->flags = flags;
+ ),
+
+ TP_printk("dev = (%d,%d), pino = %lu, name:%s, flags:%u",
+ show_dev_ino(__entry),
+ __entry->name,
+ __entry->flags)
+);
+
+TRACE_EVENT(f2fs_lookup_end,
+
+ TP_PROTO(struct inode *dir, struct dentry *dentry, nid_t ino,
+ int err),
+
+ TP_ARGS(dir, dentry, ino, err),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(const char *, name)
+ __field(nid_t, cino)
+ __field(int, err)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dir->i_sb->s_dev;
+ __entry->ino = dir->i_ino;
+ __entry->name = dentry->d_name.name;
+ __entry->cino = ino;
+ __entry->err = err;
+ ),
+
+ TP_printk("dev = (%d,%d), pino = %lu, name:%s, ino:%u, err:%d",
+ show_dev_ino(__entry),
+ __entry->name,
+ __entry->cino,
+ __entry->err)
+);
+
+TRACE_EVENT(f2fs_readdir,
+
+ TP_PROTO(struct inode *dir, loff_t start_pos, loff_t end_pos, int err),
+
+ TP_ARGS(dir, start_pos, end_pos, err),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(loff_t, start)
+ __field(loff_t, end)
+ __field(int, err)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dir->i_sb->s_dev;
+ __entry->ino = dir->i_ino;
+ __entry->start = start_pos;
+ __entry->end = end_pos;
+ __entry->err = err;
+ ),
+
+ TP_printk("dev = (%d,%d), ino = %lu, start_pos:%llu, end_pos:%llu, err:%d",
+ show_dev_ino(__entry),
+ __entry->start,
+ __entry->end,
+ __entry->err)
+);
+
TRACE_EVENT(f2fs_fallocate,
TP_PROTO(struct inode *inode, int mode,
@@ -1274,6 +1371,13 @@ DEFINE_EVENT(f2fs_discard, f2fs_issue_discard,
TP_ARGS(dev, blkstart, blklen)
);
+DEFINE_EVENT(f2fs_discard, f2fs_remove_discard,
+
+ TP_PROTO(struct block_device *dev, block_t blkstart, block_t blklen),
+
+ TP_ARGS(dev, blkstart, blklen)
+);
+
TRACE_EVENT(f2fs_issue_reset_zone,
TP_PROTO(struct block_device *dev, block_t blkstart),
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 739bcb89f602..cc0ebe6867a5 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -436,9 +436,9 @@ TRACE_EVENT(sched_update_task_ravg,
TRACE_EVENT(sched_get_task_cpu_cycles,
- TP_PROTO(int cpu, int event, u64 cycles, u64 exec_time),
+ TP_PROTO(int cpu, int event, u64 cycles, u64 exec_time, struct task_struct *p),
- TP_ARGS(cpu, event, cycles, exec_time),
+ TP_ARGS(cpu, event, cycles, exec_time, p),
TP_STRUCT__entry(
__field(int, cpu )
@@ -448,6 +448,8 @@ TRACE_EVENT(sched_get_task_cpu_cycles,
__field(u32, freq )
__field(u32, legacy_freq )
__field(u32, max_freq)
+ __field(pid_t, pid )
+ __array(char, comm, TASK_COMM_LEN )
),
TP_fast_assign(
@@ -458,12 +460,14 @@ TRACE_EVENT(sched_get_task_cpu_cycles,
__entry->freq = cpu_cycles_to_freq(cycles, exec_time);
__entry->legacy_freq = cpu_cur_freq(cpu);
__entry->max_freq = cpu_max_freq(cpu);
+ __entry->pid = p->pid;
+ memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
),
- TP_printk("cpu=%d event=%d cycles=%llu exec_time=%llu freq=%u legacy_freq=%u max_freq=%u",
+ TP_printk("cpu=%d event=%d cycles=%llu exec_time=%llu freq=%u legacy_freq=%u max_freq=%u task=%d (%s)",
__entry->cpu, __entry->event, __entry->cycles,
__entry->exec_time, __entry->freq, __entry->legacy_freq,
- __entry->max_freq)
+ __entry->max_freq, __entry->pid, __entry->comm)
);
TRACE_EVENT(sched_update_history,
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index 5664ca07c9c7..a01a076ea060 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -455,20 +455,22 @@ TRACE_EVENT(svc_recv,
TP_ARGS(rqst, status),
TP_STRUCT__entry(
- __field(struct sockaddr *, addr)
__field(__be32, xid)
__field(int, status)
__field(unsigned long, flags)
+ __dynamic_array(unsigned char, addr, rqst->rq_addrlen)
),
TP_fast_assign(
- __entry->addr = (struct sockaddr *)&rqst->rq_addr;
__entry->xid = status > 0 ? rqst->rq_xid : 0;
__entry->status = status;
__entry->flags = rqst->rq_flags;
+ memcpy(__get_dynamic_array(addr),
+ &rqst->rq_addr, rqst->rq_addrlen);
),
- TP_printk("addr=%pIScp xid=0x%x status=%d flags=%s", __entry->addr,
+ TP_printk("addr=%pIScp xid=0x%x status=%d flags=%s",
+ (struct sockaddr *)__get_dynamic_array(addr),
be32_to_cpu(__entry->xid), __entry->status,
show_rqstp_flags(__entry->flags))
);
@@ -480,22 +482,23 @@ DECLARE_EVENT_CLASS(svc_rqst_status,
TP_ARGS(rqst, status),
TP_STRUCT__entry(
- __field(struct sockaddr *, addr)
__field(__be32, xid)
- __field(int, dropme)
__field(int, status)
__field(unsigned long, flags)
+ __dynamic_array(unsigned char, addr, rqst->rq_addrlen)
),
TP_fast_assign(
- __entry->addr = (struct sockaddr *)&rqst->rq_addr;
__entry->xid = rqst->rq_xid;
__entry->status = status;
__entry->flags = rqst->rq_flags;
+ memcpy(__get_dynamic_array(addr),
+ &rqst->rq_addr, rqst->rq_addrlen);
),
TP_printk("addr=%pIScp rq_xid=0x%x status=%d flags=%s",
- __entry->addr, be32_to_cpu(__entry->xid),
+ (struct sockaddr *)__get_dynamic_array(addr),
+ be32_to_cpu(__entry->xid),
__entry->status, show_rqstp_flags(__entry->flags))
);
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index 0630e0f64b9c..f693b5b5b7ab 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -80,6 +80,20 @@ extern "C" {
#define DRM_MODE_FLAG_SUPPORTS_RGB (1<<20)
#define DRM_MODE_FLAG_SUPPORTS_YUV (1<<21)
+/* Picture aspect ratio options */
+#define DRM_MODE_PICTURE_ASPECT_NONE 0
+#define DRM_MODE_PICTURE_ASPECT_4_3 1
+#define DRM_MODE_PICTURE_ASPECT_16_9 2
+
+/* Aspect ratio flag bitmask (4 bits 27:24) */
+#define DRM_MODE_FLAG_PIC_AR_MASK (0x0F<<24)
+#define DRM_MODE_FLAG_PIC_AR_NONE \
+ (DRM_MODE_PICTURE_ASPECT_NONE<<24)
+#define DRM_MODE_FLAG_PIC_AR_4_3 \
+ (DRM_MODE_PICTURE_ASPECT_4_3<<24)
+#define DRM_MODE_FLAG_PIC_AR_16_9 \
+ (DRM_MODE_PICTURE_ASPECT_16_9<<24)
+
/* DPMS flags */
/* bit compatible with the xorg definitions. */
#define DRM_MODE_DPMS_ON 0
@@ -94,11 +108,6 @@ extern "C" {
#define DRM_MODE_SCALE_CENTER 2 /* Centered, no scaling */
#define DRM_MODE_SCALE_ASPECT 3 /* Full screen, preserve aspect */
-/* Picture aspect ratio options */
-#define DRM_MODE_PICTURE_ASPECT_NONE 0
-#define DRM_MODE_PICTURE_ASPECT_4_3 1
-#define DRM_MODE_PICTURE_ASPECT_16_9 2
-
/* Dithering mode options */
#define DRM_MODE_DITHERING_OFF 0
#define DRM_MODE_DITHERING_ON 1
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index a852f2a3701f..30ae8c3c1c85 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -112,13 +112,14 @@ struct drm_msm_ext_panel_hdr_properties {
__u32 hdr_min_luminance; /* Min Luminance */
};
-#define MSM_PARAM_GPU_ID 0x01
-#define MSM_PARAM_GMEM_SIZE 0x02
-#define MSM_PARAM_CHIP_ID 0x03
-#define MSM_PARAM_MAX_FREQ 0x04
-#define MSM_PARAM_TIMESTAMP 0x05
-#define MSM_PARAM_GMEM_BASE 0x06
-#define MSM_PARAM_NR_RINGS 0x07
+#define MSM_PARAM_GPU_ID 0x01
+#define MSM_PARAM_GMEM_SIZE 0x02
+#define MSM_PARAM_CHIP_ID 0x03
+#define MSM_PARAM_MAX_FREQ 0x04
+#define MSM_PARAM_TIMESTAMP 0x05
+#define MSM_PARAM_GMEM_BASE 0x06
+#define MSM_PARAM_NR_RINGS 0x07
+#define MSM_PARAM_GPU_HANG_TIMEOUT 0xa0 /* timeout in ms */
struct drm_msm_param {
__u32 pipe; /* in, MSM_PIPE_x */
diff --git a/include/uapi/linux/bcache.h b/include/uapi/linux/bcache.h
index 22b6ad31c706..8562b1cb776b 100644
--- a/include/uapi/linux/bcache.h
+++ b/include/uapi/linux/bcache.h
@@ -90,7 +90,7 @@ PTR_FIELD(PTR_GEN, 0, 8)
#define PTR_CHECK_DEV ((1 << PTR_DEV_BITS) - 1)
-#define PTR(gen, offset, dev) \
+#define MAKE_PTR(gen, offset, dev) \
((((__u64) dev) << 51) | ((__u64) offset) << 8 | gen)
/* Bkey utility code */
diff --git a/include/uapi/linux/habmm.h b/include/uapi/linux/habmm.h
index 902bd35ee474..59b603a0fcf7 100644
--- a/include/uapi/linux/habmm.h
+++ b/include/uapi/linux/habmm.h
@@ -73,8 +73,9 @@ struct hab_unimport {
#define MM_AUD_END 105
#define MM_CAM_START 200
-#define MM_CAM 201
-#define MM_CAM_END 202
+#define MM_CAM_1 201
+#define MM_CAM_2 202
+#define MM_CAM_END 203
#define MM_DISP_START 300
#define MM_DISP_1 301
@@ -102,7 +103,13 @@ struct hab_unimport {
#define MM_QCPE_VM3 703
#define MM_QCPE_VM4 704
#define MM_QCPE_END 705
-#define MM_ID_MAX 706
+
+#define MM_CLK_START 800
+#define MM_CLK_VM1 801
+#define MM_CLK_VM2 802
+#define MM_CLK_END 803
+
+#define MM_ID_MAX 804
#define HABMM_SOCKET_OPEN_FLAGS_SINGLE_BE_SINGLE_FE 0x00000000
#define HABMM_SOCKET_OPEN_FLAGS_SINGLE_BE_SINGLE_DOMU 0x00000001
@@ -110,6 +117,14 @@ struct hab_unimport {
#define HABMM_SOCKET_SEND_FLAGS_NON_BLOCKING 0x00000001
+/*
+ * Collect cross-VM stats: client provides stat-buffer large enough to allow 2
+ * ets of a 2-uint64_t pair to collect seconds and nano-seconds at the
+ * beginning of the stat-buffer. Stats are collected when the stat-buffer leaves
+ * VM1, then enters VM2
+ */
+#define HABMM_SOCKET_SEND_FLAGS_XING_VM_STAT 0x00000002
+
#define HABMM_SOCKET_RECV_FLAGS_NON_BLOCKING 0x00000001
#define HABMM_EXP_MEM_TYPE_DMA 0x00000001
diff --git a/include/uapi/linux/msm_ipa.h b/include/uapi/linux/msm_ipa.h
index 0bdfc9741d19..3d330990676c 100644
--- a/include/uapi/linux/msm_ipa.h
+++ b/include/uapi/linux/msm_ipa.h
@@ -163,6 +163,8 @@
#define IPA_FLT_MAC_SRC_ADDR_802_3 (1ul << 19)
#define IPA_FLT_MAC_DST_ADDR_802_3 (1ul << 20)
#define IPA_FLT_MAC_ETHER_TYPE (1ul << 21)
+#define IPA_FLT_TCP_SYN (1ul << 23)
+#define IPA_FLT_TCP_SYN_L2TP (1ul << 24)
/**
* maximal number of NAT PDNs in the PDN config table
diff --git a/include/uapi/linux/rds.h b/include/uapi/linux/rds.h
index 7af20a136429..804c9b2bfce3 100644
--- a/include/uapi/linux/rds.h
+++ b/include/uapi/linux/rds.h
@@ -104,8 +104,8 @@
#define RDS_INFO_LAST 10010
struct rds_info_counter {
- uint8_t name[32];
- uint64_t value;
+ __u8 name[32];
+ __u64 value;
} __attribute__((packed));
#define RDS_INFO_CONNECTION_FLAG_SENDING 0x01
@@ -115,35 +115,35 @@ struct rds_info_counter {
#define TRANSNAMSIZ 16
struct rds_info_connection {
- uint64_t next_tx_seq;
- uint64_t next_rx_seq;
+ __u64 next_tx_seq;
+ __u64 next_rx_seq;
__be32 laddr;
__be32 faddr;
- uint8_t transport[TRANSNAMSIZ]; /* null term ascii */
- uint8_t flags;
+ __u8 transport[TRANSNAMSIZ]; /* null term ascii */
+ __u8 flags;
} __attribute__((packed));
#define RDS_INFO_MESSAGE_FLAG_ACK 0x01
#define RDS_INFO_MESSAGE_FLAG_FAST_ACK 0x02
struct rds_info_message {
- uint64_t seq;
- uint32_t len;
+ __u64 seq;
+ __u32 len;
__be32 laddr;
__be32 faddr;
__be16 lport;
__be16 fport;
- uint8_t flags;
+ __u8 flags;
} __attribute__((packed));
struct rds_info_socket {
- uint32_t sndbuf;
+ __u32 sndbuf;
__be32 bound_addr;
__be32 connected_addr;
__be16 bound_port;
__be16 connected_port;
- uint32_t rcvbuf;
- uint64_t inum;
+ __u32 rcvbuf;
+ __u64 inum;
} __attribute__((packed));
struct rds_info_tcp_socket {
@@ -151,25 +151,25 @@ struct rds_info_tcp_socket {
__be16 local_port;
__be32 peer_addr;
__be16 peer_port;
- uint64_t hdr_rem;
- uint64_t data_rem;
- uint32_t last_sent_nxt;
- uint32_t last_expected_una;
- uint32_t last_seen_una;
+ __u64 hdr_rem;
+ __u64 data_rem;
+ __u32 last_sent_nxt;
+ __u32 last_expected_una;
+ __u32 last_seen_una;
} __attribute__((packed));
#define RDS_IB_GID_LEN 16
struct rds_info_rdma_connection {
__be32 src_addr;
__be32 dst_addr;
- uint8_t src_gid[RDS_IB_GID_LEN];
- uint8_t dst_gid[RDS_IB_GID_LEN];
+ __u8 src_gid[RDS_IB_GID_LEN];
+ __u8 dst_gid[RDS_IB_GID_LEN];
- uint32_t max_send_wr;
- uint32_t max_recv_wr;
- uint32_t max_send_sge;
- uint32_t rdma_mr_max;
- uint32_t rdma_mr_size;
+ __u32 max_send_wr;
+ __u32 max_recv_wr;
+ __u32 max_send_sge;
+ __u32 rdma_mr_max;
+ __u32 rdma_mr_size;
};
/*
@@ -210,70 +210,70 @@ struct rds_info_rdma_connection {
* (so that the application does not have to worry about
* alignment).
*/
-typedef uint64_t rds_rdma_cookie_t;
+typedef __u64 rds_rdma_cookie_t;
struct rds_iovec {
- uint64_t addr;
- uint64_t bytes;
+ __u64 addr;
+ __u64 bytes;
};
struct rds_get_mr_args {
struct rds_iovec vec;
- uint64_t cookie_addr;
- uint64_t flags;
+ __u64 cookie_addr;
+ __u64 flags;
};
struct rds_get_mr_for_dest_args {
struct __kernel_sockaddr_storage dest_addr;
struct rds_iovec vec;
- uint64_t cookie_addr;
- uint64_t flags;
+ __u64 cookie_addr;
+ __u64 flags;
};
struct rds_free_mr_args {
rds_rdma_cookie_t cookie;
- uint64_t flags;
+ __u64 flags;
};
struct rds_rdma_args {
rds_rdma_cookie_t cookie;
struct rds_iovec remote_vec;
- uint64_t local_vec_addr;
- uint64_t nr_local;
- uint64_t flags;
- uint64_t user_token;
+ __u64 local_vec_addr;
+ __u64 nr_local;
+ __u64 flags;
+ __u64 user_token;
};
struct rds_atomic_args {
rds_rdma_cookie_t cookie;
- uint64_t local_addr;
- uint64_t remote_addr;
+ __u64 local_addr;
+ __u64 remote_addr;
union {
struct {
- uint64_t compare;
- uint64_t swap;
+ __u64 compare;
+ __u64 swap;
} cswp;
struct {
- uint64_t add;
+ __u64 add;
} fadd;
struct {
- uint64_t compare;
- uint64_t swap;
- uint64_t compare_mask;
- uint64_t swap_mask;
+ __u64 compare;
+ __u64 swap;
+ __u64 compare_mask;
+ __u64 swap_mask;
} m_cswp;
struct {
- uint64_t add;
- uint64_t nocarry_mask;
+ __u64 add;
+ __u64 nocarry_mask;
} m_fadd;
};
- uint64_t flags;
- uint64_t user_token;
+ __u64 flags;
+ __u64 user_token;
};
struct rds_rdma_notify {
- uint64_t user_token;
- int32_t status;
+ __u64 user_token;
+ __s32 status;
};
#define RDS_RDMA_SUCCESS 0
diff --git a/include/uapi/linux/spi/spidev.h b/include/uapi/linux/spi/spidev.h
index dd5f21e75805..856de39d0b89 100644
--- a/include/uapi/linux/spi/spidev.h
+++ b/include/uapi/linux/spi/spidev.h
@@ -23,6 +23,7 @@
#define SPIDEV_H
#include <linux/types.h>
+#include <linux/ioctl.h>
/* User space versions of kernel symbols for SPI clocking modes,
* matching <linux/spi/spi.h>
diff --git a/include/uapi/linux/tee.h b/include/uapi/linux/tee.h
new file mode 100644
index 000000000000..370d8845ab21
--- /dev/null
+++ b/include/uapi/linux/tee.h
@@ -0,0 +1,346 @@
+/*
+ * Copyright (c) 2015-2016, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __TEE_H
+#define __TEE_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+/*
+ * This file describes the API provided by a TEE driver to user space.
+ *
+ * Each TEE driver defines a TEE specific protocol which is used for the
+ * data passed back and forth using TEE_IOC_CMD.
+ */
+
+/* Helpers to make the ioctl defines */
+#define TEE_IOC_MAGIC 0xa4
+#define TEE_IOC_BASE 0
+
+/* Flags relating to shared memory */
+#define TEE_IOCTL_SHM_MAPPED 0x1 /* memory mapped in normal world */
+#define TEE_IOCTL_SHM_DMA_BUF 0x2 /* dma-buf handle on shared memory */
+
+#define TEE_MAX_ARG_SIZE 1024
+
+#define TEE_GEN_CAP_GP (1 << 0)/* GlobalPlatform compliant TEE */
+
+/*
+ * TEE Implementation ID
+ */
+#define TEE_IMPL_ID_OPTEE 1
+
+/*
+ * OP-TEE specific capabilities
+ */
+#define TEE_OPTEE_CAP_TZ (1 << 0)
+
+/**
+ * struct tee_ioctl_version_data - TEE version
+ * @impl_id: [out] TEE implementation id
+ * @impl_caps: [out] Implementation specific capabilities
+ * @gen_caps: [out] Generic capabilities, defined by TEE_GEN_CAPS_* above
+ *
+ * Identifies the TEE implementation, @impl_id is one of TEE_IMPL_ID_* above.
+ * @impl_caps is implementation specific, for example TEE_OPTEE_CAP_*
+ * is valid when @impl_id == TEE_IMPL_ID_OPTEE.
+ */
+struct tee_ioctl_version_data {
+ __u32 impl_id;
+ __u32 impl_caps;
+ __u32 gen_caps;
+};
+
+/**
+ * TEE_IOC_VERSION - query version of TEE
+ *
+ * Takes a tee_ioctl_version_data struct and returns with the TEE version
+ * data filled in.
+ */
+#define TEE_IOC_VERSION _IOR(TEE_IOC_MAGIC, TEE_IOC_BASE + 0, \
+ struct tee_ioctl_version_data)
+
+/**
+ * struct tee_ioctl_shm_alloc_data - Shared memory allocate argument
+ * @size: [in/out] Size of shared memory to allocate
+ * @flags: [in/out] Flags to/from allocation.
+ * @id: [out] Identifier of the shared memory
+ *
+ * The flags field should currently be zero as input. Updated by the call
+ * with actual flags as defined by TEE_IOCTL_SHM_* above.
+ * This structure is used as argument for TEE_IOC_SHM_ALLOC below.
+ */
+struct tee_ioctl_shm_alloc_data {
+ __u64 size;
+ __u32 flags;
+ __s32 id;
+};
+
+/**
+ * TEE_IOC_SHM_ALLOC - allocate shared memory
+ *
+ * Allocates shared memory between the user space process and secure OS.
+ *
+ * Returns a file descriptor on success or < 0 on failure
+ *
+ * The returned file descriptor is used to map the shared memory into user
+ * space. The shared memory is freed when the descriptor is closed and the
+ * memory is unmapped.
+ */
+#define TEE_IOC_SHM_ALLOC _IOWR(TEE_IOC_MAGIC, TEE_IOC_BASE + 1, \
+ struct tee_ioctl_shm_alloc_data)
+
+/**
+ * struct tee_ioctl_buf_data - Variable sized buffer
+ * @buf_ptr: [in] A __user pointer to a buffer
+ * @buf_len: [in] Length of the buffer above
+ *
+ * Used as argument for TEE_IOC_OPEN_SESSION, TEE_IOC_INVOKE,
+ * TEE_IOC_SUPPL_RECV, and TEE_IOC_SUPPL_SEND below.
+ */
+struct tee_ioctl_buf_data {
+ __u64 buf_ptr;
+ __u64 buf_len;
+};
+
+/*
+ * Attributes for struct tee_ioctl_param, selects field in the union
+ */
+#define TEE_IOCTL_PARAM_ATTR_TYPE_NONE 0 /* parameter not used */
+
+/*
+ * These defines value parameters (struct tee_ioctl_param_value)
+ */
+#define TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT 1
+#define TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT 2
+#define TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT 3 /* input and output */
+
+/*
+ * These defines shared memory reference parameters (struct
+ * tee_ioctl_param_memref)
+ */
+#define TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT 5
+#define TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT 6
+#define TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT 7 /* input and output */
+
+/*
+ * Mask for the type part of the attribute, leaves room for more types
+ */
+#define TEE_IOCTL_PARAM_ATTR_TYPE_MASK 0xff
+
+/*
+ * Matches TEEC_LOGIN_* in GP TEE Client API
+ * Are only defined for GP compliant TEEs
+ */
+#define TEE_IOCTL_LOGIN_PUBLIC 0
+#define TEE_IOCTL_LOGIN_USER 1
+#define TEE_IOCTL_LOGIN_GROUP 2
+#define TEE_IOCTL_LOGIN_APPLICATION 4
+#define TEE_IOCTL_LOGIN_USER_APPLICATION 5
+#define TEE_IOCTL_LOGIN_GROUP_APPLICATION 6
+
+/**
+ * struct tee_ioctl_param - parameter
+ * @attr: attributes
+ * @a: if a memref, offset into the shared memory object, else a value parameter
+ * @b: if a memref, size of the buffer, else a value parameter
+ * @c: if a memref, shared memory identifier, else a value parameter
+ *
+ * @attr & TEE_PARAM_ATTR_TYPE_MASK indicates if memref or value is used in
+ * the union. TEE_PARAM_ATTR_TYPE_VALUE_* indicates value and
+ * TEE_PARAM_ATTR_TYPE_MEMREF_* indicates memref. TEE_PARAM_ATTR_TYPE_NONE
+ * indicates that none of the members are used.
+ *
+ * Shared memory is allocated with TEE_IOC_SHM_ALLOC which returns an
+ * identifier representing the shared memory object. A memref can reference
+ * a part of a shared memory by specifying an offset (@a) and size (@b) of
+ * the object. To supply the entire shared memory object set the offset
+ * (@a) to 0 and size (@b) to the previously returned size of the object.
+ */
+struct tee_ioctl_param {
+ __u64 attr;
+ __u64 a;
+ __u64 b;
+ __u64 c;
+};
+
+#define TEE_IOCTL_UUID_LEN 16
+
+/**
+ * struct tee_ioctl_open_session_arg - Open session argument
+ * @uuid: [in] UUID of the Trusted Application
+ * @clnt_uuid: [in] UUID of client
+ * @clnt_login: [in] Login class of client, TEE_IOCTL_LOGIN_* above
+ * @cancel_id: [in] Cancellation id, a unique value to identify this request
+ * @session: [out] Session id
+ * @ret: [out] return value
+ * @ret_origin [out] origin of the return value
+ * @num_params [in] number of parameters following this struct
+ */
+struct tee_ioctl_open_session_arg {
+ __u8 uuid[TEE_IOCTL_UUID_LEN];
+ __u8 clnt_uuid[TEE_IOCTL_UUID_LEN];
+ __u32 clnt_login;
+ __u32 cancel_id;
+ __u32 session;
+ __u32 ret;
+ __u32 ret_origin;
+ __u32 num_params;
+ /* num_params tells the actual number of element in params */
+ struct tee_ioctl_param params[];
+};
+
+/**
+ * TEE_IOC_OPEN_SESSION - opens a session to a Trusted Application
+ *
+ * Takes a struct tee_ioctl_buf_data which contains a struct
+ * tee_ioctl_open_session_arg followed by any array of struct
+ * tee_ioctl_param
+ */
+#define TEE_IOC_OPEN_SESSION _IOR(TEE_IOC_MAGIC, TEE_IOC_BASE + 2, \
+ struct tee_ioctl_buf_data)
+
+/**
+ * struct tee_ioctl_invoke_func_arg - Invokes a function in a Trusted
+ * Application
+ * @func: [in] Trusted Application function, specific to the TA
+ * @session: [in] Session id
+ * @cancel_id: [in] Cancellation id, a unique value to identify this request
+ * @ret: [out] return value
+ * @ret_origin [out] origin of the return value
+ * @num_params [in] number of parameters following this struct
+ */
+struct tee_ioctl_invoke_arg {
+ __u32 func;
+ __u32 session;
+ __u32 cancel_id;
+ __u32 ret;
+ __u32 ret_origin;
+ __u32 num_params;
+ /* num_params tells the actual number of element in params */
+ struct tee_ioctl_param params[];
+};
+
+/**
+ * TEE_IOC_INVOKE - Invokes a function in a Trusted Application
+ *
+ * Takes a struct tee_ioctl_buf_data which contains a struct
+ * tee_invoke_func_arg followed by any array of struct tee_param
+ */
+#define TEE_IOC_INVOKE _IOR(TEE_IOC_MAGIC, TEE_IOC_BASE + 3, \
+ struct tee_ioctl_buf_data)
+
+/**
+ * struct tee_ioctl_cancel_arg - Cancels an open session or invoke ioctl
+ * @cancel_id: [in] Cancellation id, a unique value to identify this request
+ * @session: [in] Session id, if the session is opened, else set to 0
+ */
+struct tee_ioctl_cancel_arg {
+ __u32 cancel_id;
+ __u32 session;
+};
+
+/**
+ * TEE_IOC_CANCEL - Cancels an open session or invoke
+ */
+#define TEE_IOC_CANCEL _IOR(TEE_IOC_MAGIC, TEE_IOC_BASE + 4, \
+ struct tee_ioctl_cancel_arg)
+
+/**
+ * struct tee_ioctl_close_session_arg - Closes an open session
+ * @session: [in] Session id
+ */
+struct tee_ioctl_close_session_arg {
+ __u32 session;
+};
+
+/**
+ * TEE_IOC_CLOSE_SESSION - Closes a session
+ */
+#define TEE_IOC_CLOSE_SESSION _IOR(TEE_IOC_MAGIC, TEE_IOC_BASE + 5, \
+ struct tee_ioctl_close_session_arg)
+
+/**
+ * struct tee_iocl_supp_recv_arg - Receive a request for a supplicant function
+ * @func: [in] supplicant function
+ * @num_params [in/out] number of parameters following this struct
+ *
+ * @num_params is the number of params that tee-supplicant has room to
+ * receive when input, @num_params is the number of actual params
+ * tee-supplicant receives when output.
+ */
+struct tee_iocl_supp_recv_arg {
+ __u32 func;
+ __u32 num_params;
+ /* num_params tells the actual number of element in params */
+ struct tee_ioctl_param params[];
+};
+
+/**
+ * TEE_IOC_SUPPL_RECV - Receive a request for a supplicant function
+ *
+ * Takes a struct tee_ioctl_buf_data which contains a struct
+ * tee_iocl_supp_recv_arg followed by any array of struct tee_param
+ */
+#define TEE_IOC_SUPPL_RECV _IOR(TEE_IOC_MAGIC, TEE_IOC_BASE + 6, \
+ struct tee_ioctl_buf_data)
+
+/**
+ * struct tee_iocl_supp_send_arg - Send a response to a received request
+ * @ret: [out] return value
+ * @num_params [in] number of parameters following this struct
+ */
+struct tee_iocl_supp_send_arg {
+ __u32 ret;
+ __u32 num_params;
+ /* num_params tells the actual number of element in params */
+ struct tee_ioctl_param params[];
+};
+
+/**
+ * TEE_IOC_SUPPL_SEND - Receive a request for a supplicant function
+ *
+ * Takes a struct tee_ioctl_buf_data which contains a struct
+ * tee_iocl_supp_send_arg followed by any array of struct tee_param
+ */
+#define TEE_IOC_SUPPL_SEND _IOR(TEE_IOC_MAGIC, TEE_IOC_BASE + 7, \
+ struct tee_ioctl_buf_data)
+
+/*
+ * Five syscalls are used when communicating with the TEE driver.
+ * open(): opens the device associated with the driver
+ * ioctl(): as described above operating on the file descriptor from open()
+ * close(): two cases
+ * - closes the device file descriptor
+ * - closes a file descriptor connected to allocated shared memory
+ * mmap(): maps shared memory into user space using information from struct
+ * tee_ioctl_shm_alloc_data
+ * munmap(): unmaps previously shared memory
+ */
+
+#endif /*__TEE_H*/
diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h
index b6c14b1ebdaf..7778723e4405 100644
--- a/include/uapi/linux/usb/ch9.h
+++ b/include/uapi/linux/usb/ch9.h
@@ -812,6 +812,8 @@ struct usb_wireless_cap_descriptor { /* Ultra Wide Band */
__u8 bReserved;
} __attribute__((packed));
+#define USB_DT_USB_WIRELESS_CAP_SIZE 11
+
/* USB 2.0 Extension descriptor */
#define USB_CAP_TYPE_EXT 2
@@ -896,6 +898,17 @@ struct usb_ssp_cap_descriptor {
} __attribute__((packed));
/*
+ * Precision time measurement capability descriptor: advertised by devices and
+ * hubs that support PTM
+ */
+#define USB_PTM_CAP_TYPE 0xb
+struct usb_ptm_cap_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDevCapabilityType;
+} __attribute__((packed));
+
+/*
* Configuration Summary descriptors: Defines a list of functions in the
* configuration. This descriptor may be used by Host software to decide
* which Configuration to use to obtain the desired functionality.
@@ -919,6 +932,12 @@ struct usb_config_summary_descriptor {
struct function_class_info cs_info[];
} __attribute__((packed));
+/*
+ * The size of the descriptor for the Sublink Speed Attribute Count
+ * (SSAC) specified in bmAttributes[4:0].
+ */
+#define USB_DT_USB_SSP_CAP_SIZE(ssac) (16 + ssac * 4)
+
/*-------------------------------------------------------------------------*/
/* USB_DT_WIRELESS_ENDPOINT_COMP: companion descriptor associated with
@@ -1014,6 +1033,7 @@ enum usb3_link_state {
USB3_LPM_U3
};
+#define USB_DT_USB_PTM_ID_SIZE 3
/*
* A U1 timeout of 0x0 means the parent hub will reject any transitions to U1.
* 0xff means the parent hub will accept transitions to U1, but will not
diff --git a/include/uapi/media/Kbuild b/include/uapi/media/Kbuild
index 421c65d8a901..640002326ace 100644
--- a/include/uapi/media/Kbuild
+++ b/include/uapi/media/Kbuild
@@ -20,3 +20,4 @@ header-y += msmb_ispif.h
header-y += msmb_pproc.h
header-y += radio-iris.h
header-y += radio-iris-commands.h
+header-y += msm_ba.h
diff --git a/include/uapi/media/msm_ba.h b/include/uapi/media/msm_ba.h
new file mode 100644
index 000000000000..587d14652f3f
--- /dev/null
+++ b/include/uapi/media/msm_ba.h
@@ -0,0 +1,35 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __UAPI_MSM_BA_H__
+#define __UAPI_MSM_BA_H__
+
+#include <linux/videodev2.h>
+#include <linux/types.h>
+
+/* CSI control params */
+struct csi_ctrl_params {
+ uint32_t settle_count;
+ uint32_t lane_count;
+};
+
+/* private ioctl structure */
+struct msm_ba_v4l2_ioctl_t {
+ size_t len;
+ void __user *ptr;
+};
+
+/* ADV7481 private ioctls for CSI control params */
+#define VIDIOC_G_CSI_PARAMS \
+ _IOWR('V', BASE_VIDIOC_PRIVATE, struct msm_ba_v4l2_ioctl_t)
+#endif
diff --git a/include/uapi/sound/audio_effects.h b/include/uapi/sound/audio_effects.h
index 6565acff4073..147e877db71e 100644
--- a/include/uapi/sound/audio_effects.h
+++ b/include/uapi/sound/audio_effects.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2015, 2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -159,8 +159,12 @@
#define PBE_ENABLE_PARAM_LEN 1
#define PBE_CONFIG_PARAM_LEN 28
+/* Command Payload length and size for Non-IID commands */
#define COMMAND_PAYLOAD_LEN 3
#define COMMAND_PAYLOAD_SZ (COMMAND_PAYLOAD_LEN * sizeof(uint32_t))
+/* Command Payload length and size for IID commands */
+#define COMMAND_IID_PAYLOAD_LEN 4
+#define COMMAND_IID_PAYLOAD_SZ (COMMAND_IID_PAYLOAD_LEN * sizeof(uint32_t))
#define MAX_INBAND_PARAM_SZ 4096
#define Q27_UNITY (1 << 27)
#define Q8_UNITY (1 << 8)
diff --git a/init/initramfs.c b/init/initramfs.c
index f8ce812ba43e..52059169f64d 100644
--- a/init/initramfs.c
+++ b/init/initramfs.c
@@ -621,8 +621,11 @@ static int __init populate_rootfs(void)
{
char *err;
- if (do_skip_initramfs)
+ if (do_skip_initramfs) {
+ if (initrd_start)
+ free_initrd();
return default_rootfs();
+ }
err = unpack_to_rootfs(__initramfs_start, __initramfs_size);
if (err)
diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
index a353df46c8e4..64464b44a265 100644
--- a/kernel/sched/Makefile
+++ b/kernel/sched/Makefile
@@ -23,5 +23,4 @@ obj-$(CONFIG_SCHED_TUNE) += tune.o
obj-$(CONFIG_CGROUP_CPUACCT) += cpuacct.o
obj-$(CONFIG_SCHED_CORE_CTL) += core_ctl.o
obj-$(CONFIG_CPU_FREQ) += cpufreq.o
-obj-$(CONFIG_CPU_FREQ_GOV_SCHED) += cpufreq_sched.o
obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index eacfd2ac56a1..2370e7631728 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -99,10 +99,6 @@
ATOMIC_NOTIFIER_HEAD(load_alert_notifier_head);
-#ifdef CONFIG_SMP
-static bool have_sched_energy_data(void);
-#endif
-
DEFINE_MUTEX(sched_domains_mutex);
DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
@@ -205,11 +201,6 @@ static int sched_feat_set(char *cmp)
sysctl_sched_features &= ~(1UL << i);
sched_feat_disable(i);
} else {
-#ifdef CONFIG_SMP
- if (i == __SCHED_FEAT_ENERGY_AWARE)
- WARN(!have_sched_energy_data(),
- "Missing sched energy data\n");
-#endif
sysctl_sched_features |= (1UL << i);
sched_feat_enable(i);
}
@@ -2334,11 +2325,11 @@ void sched_exit(struct task_struct *p)
reset_task_stats(p);
p->ravg.mark_start = wallclock;
p->ravg.sum_history[0] = EXITING_TASK_MARKER;
- free_task_load_ptrs(p);
enqueue_task(rq, p, 0);
clear_ed_task(p, rq);
task_rq_unlock(rq, p, &flags);
+ free_task_load_ptrs(p);
}
#endif /* CONFIG_SCHED_HMP */
@@ -3176,91 +3167,6 @@ unsigned long long task_sched_runtime(struct task_struct *p)
return ns;
}
-#ifdef CONFIG_CPU_FREQ_GOV_SCHED
-
-static inline
-unsigned long add_capacity_margin(unsigned long cpu_capacity)
-{
- cpu_capacity = cpu_capacity * capacity_margin;
- cpu_capacity /= SCHED_CAPACITY_SCALE;
- return cpu_capacity;
-}
-
-static inline
-unsigned long sum_capacity_reqs(unsigned long cfs_cap,
- struct sched_capacity_reqs *scr)
-{
- unsigned long total = add_capacity_margin(cfs_cap + scr->rt);
- return total += scr->dl;
-}
-
-unsigned long boosted_cpu_util(int cpu);
-static void sched_freq_tick_pelt(int cpu)
-{
- unsigned long cpu_utilization = boosted_cpu_util(cpu);
- unsigned long capacity_curr = capacity_curr_of(cpu);
- struct sched_capacity_reqs *scr;
-
- scr = &per_cpu(cpu_sched_capacity_reqs, cpu);
- if (sum_capacity_reqs(cpu_utilization, scr) < capacity_curr)
- return;
-
- /*
- * To make free room for a task that is building up its "real"
- * utilization and to harm its performance the least, request
- * a jump to a higher OPP as soon as the margin of free capacity
- * is impacted (specified by capacity_margin).
- * Remember CPU utilization in sched_capacity_reqs should be normalised.
- */
- cpu_utilization = cpu_utilization * SCHED_CAPACITY_SCALE / capacity_orig_of(cpu);
- set_cfs_cpu_capacity(cpu, true, cpu_utilization);
-}
-
-#ifdef CONFIG_SCHED_WALT
-static void sched_freq_tick_walt(int cpu)
-{
- unsigned long cpu_utilization = cpu_util_freq(cpu);
- unsigned long capacity_curr = capacity_curr_of(cpu);
-
- if (walt_disabled || !sysctl_sched_use_walt_cpu_util)
- return sched_freq_tick_pelt(cpu);
-
- /*
- * Add a margin to the WALT utilization to check if we will need to
- * increase frequency.
- * NOTE: WALT tracks a single CPU signal for all the scheduling
- * classes, thus this margin is going to be added to the DL class as
- * well, which is something we do not do in sched_freq_tick_pelt case.
- */
- if (add_capacity_margin(cpu_utilization) <= capacity_curr)
- return;
-
- /*
- * It is likely that the load is growing so we
- * keep the added margin in our request as an
- * extra boost.
- * Remember CPU utilization in sched_capacity_reqs should be normalised.
- */
- cpu_utilization = cpu_utilization * SCHED_CAPACITY_SCALE / capacity_orig_of(cpu);
- set_cfs_cpu_capacity(cpu, true, cpu_utilization);
-
-}
-#define _sched_freq_tick(cpu) sched_freq_tick_walt(cpu)
-#else
-#define _sched_freq_tick(cpu) sched_freq_tick_pelt(cpu)
-#endif /* CONFIG_SCHED_WALT */
-
-static void sched_freq_tick(int cpu)
-{
- if (!sched_freq())
- return;
-
- _sched_freq_tick(cpu);
-}
-#else
-static inline void sched_freq_tick(int cpu) { }
-#endif /* CONFIG_CPU_FREQ_GOV_SCHED */
-
/*
* This function gets called by the timer code, with HZ frequency.
* We call it with interrupts disabled.
@@ -3287,7 +3193,6 @@ void scheduler_tick(void)
wallclock = sched_ktime_clock();
update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
early_notif = early_detection_notify(rq, wallclock);
- sched_freq_tick(cpu);
raw_spin_unlock(&rq->lock);
if (early_notif)
@@ -5009,6 +4914,15 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask)
raw_spin_lock_irqsave(&p->pi_lock, flags);
cpumask_and(mask, &p->cpus_allowed, cpu_active_mask);
+
+ /*
+ * The userspace tasks are forbidden to run on
+ * isolated CPUs. So exclude isolated CPUs from
+ * the getaffinity.
+ */
+ if (!(p->flags & PF_KTHREAD))
+ cpumask_andnot(mask, mask, cpu_isolated_mask);
+
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
out_unlock:
@@ -6727,6 +6641,12 @@ static int init_rootdomain(struct root_domain *rd)
if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
goto free_dlo_mask;
+#ifdef HAVE_RT_PUSH_IPI
+ rd->rto_cpu = -1;
+ raw_spin_lock_init(&rd->rto_lock);
+ init_irq_work(&rd->rto_push_work, rto_push_irq_work_func);
+#endif
+
init_dl_bw(&rd->dl_bw);
if (cpudl_init(&rd->cpudl) != 0)
goto free_dlo_mask;
@@ -7202,19 +7122,6 @@ static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
atomic_set(&sg->sgc->nr_busy_cpus, sg->group_weight);
}
-static bool have_sched_energy_data(void)
-{
- int cpu;
-
- for_each_possible_cpu(cpu) {
- if (!rcu_dereference(per_cpu(sd_scs, cpu)) ||
- !rcu_dereference(per_cpu(sd_ea, cpu)))
- return false;
- }
-
- return true;
-}
-
/*
* Check that the per-cpu provided sd energy data is consistent for all cpus
* within the mask.
@@ -8031,9 +7938,6 @@ static int build_sched_domains(const struct cpumask *cpu_map,
}
rcu_read_unlock();
- WARN(sched_feat(ENERGY_AWARE) && !have_sched_energy_data(),
- "Missing data for energy aware scheduling\n");
-
ret = 0;
error:
__free_domain_allocs(&d, alloc_state, cpu_map);
diff --git a/kernel/sched/cpufreq_sched.c b/kernel/sched/cpufreq_sched.c
deleted file mode 100644
index ec0aed7a8f96..000000000000
--- a/kernel/sched/cpufreq_sched.c
+++ /dev/null
@@ -1,525 +0,0 @@
-/*
- * Copyright (C) 2015 Michael Turquette <mturquette@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/cpufreq.h>
-#include <linux/module.h>
-#include <linux/kthread.h>
-#include <linux/percpu.h>
-#include <linux/irq_work.h>
-#include <linux/delay.h>
-#include <linux/string.h>
-
-#define CREATE_TRACE_POINTS
-#include <trace/events/cpufreq_sched.h>
-
-#include "sched.h"
-
-#define THROTTLE_DOWN_NSEC 50000000 /* 50ms default */
-#define THROTTLE_UP_NSEC 500000 /* 500us default */
-
-struct static_key __read_mostly __sched_freq = STATIC_KEY_INIT_FALSE;
-static bool __read_mostly cpufreq_driver_slow;
-
-#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHED
-static struct cpufreq_governor cpufreq_gov_sched;
-#endif
-
-static DEFINE_PER_CPU(unsigned long, enabled);
-DEFINE_PER_CPU(struct sched_capacity_reqs, cpu_sched_capacity_reqs);
-
-struct gov_tunables {
- struct gov_attr_set attr_set;
- unsigned int up_throttle_nsec;
- unsigned int down_throttle_nsec;
-};
-
-/**
- * gov_data - per-policy data internal to the governor
- * @up_throttle: next throttling period expiry if increasing OPP
- * @down_throttle: next throttling period expiry if decreasing OPP
- * @up_throttle_nsec: throttle period length in nanoseconds if increasing OPP
- * @down_throttle_nsec: throttle period length in nanoseconds if decreasing OPP
- * @task: worker thread for dvfs transition that may block/sleep
- * @irq_work: callback used to wake up worker thread
- * @requested_freq: last frequency requested by the sched governor
- *
- * struct gov_data is the per-policy cpufreq_sched-specific data structure. A
- * per-policy instance of it is created when the cpufreq_sched governor receives
- * the CPUFREQ_GOV_START condition and a pointer to it exists in the gov_data
- * member of struct cpufreq_policy.
- *
- * Readers of this data must call down_read(policy->rwsem). Writers must
- * call down_write(policy->rwsem).
- */
-struct gov_data {
- ktime_t up_throttle;
- ktime_t down_throttle;
- struct gov_tunables *tunables;
- struct list_head tunables_hook;
- struct task_struct *task;
- struct irq_work irq_work;
- unsigned int requested_freq;
-};
-
-static void cpufreq_sched_try_driver_target(struct cpufreq_policy *policy,
- unsigned int freq)
-{
- struct gov_data *gd = policy->governor_data;
-
- /* avoid race with cpufreq_sched_stop */
- if (!down_write_trylock(&policy->rwsem))
- return;
-
- __cpufreq_driver_target(policy, freq, CPUFREQ_RELATION_L);
-
- gd->up_throttle = ktime_add_ns(ktime_get(),
- gd->tunables->up_throttle_nsec);
- gd->down_throttle = ktime_add_ns(ktime_get(),
- gd->tunables->down_throttle_nsec);
- up_write(&policy->rwsem);
-}
-
-static bool finish_last_request(struct gov_data *gd, unsigned int cur_freq)
-{
- ktime_t now = ktime_get();
-
- ktime_t throttle = gd->requested_freq < cur_freq ?
- gd->down_throttle : gd->up_throttle;
-
- if (ktime_after(now, throttle))
- return false;
-
- while (1) {
- int usec_left = ktime_to_ns(ktime_sub(throttle, now));
-
- usec_left /= NSEC_PER_USEC;
- trace_cpufreq_sched_throttled(usec_left);
- usleep_range(usec_left, usec_left + 100);
- now = ktime_get();
- if (ktime_after(now, throttle))
- return true;
- }
-}
-
-/*
- * we pass in struct cpufreq_policy. This is safe because changing out the
- * policy requires a call to __cpufreq_governor(policy, CPUFREQ_GOV_STOP),
- * which tears down all of the data structures and __cpufreq_governor(policy,
- * CPUFREQ_GOV_START) will do a full rebuild, including this kthread with the
- * new policy pointer
- */
-static int cpufreq_sched_thread(void *data)
-{
- struct sched_param param;
- struct cpufreq_policy *policy;
- struct gov_data *gd;
- unsigned int new_request = 0;
- unsigned int last_request = 0;
- int ret;
-
- policy = (struct cpufreq_policy *) data;
- gd = policy->governor_data;
-
- param.sched_priority = 50;
- ret = sched_setscheduler_nocheck(gd->task, SCHED_FIFO, &param);
- if (ret) {
- pr_warn("%s: failed to set SCHED_FIFO\n", __func__);
- do_exit(-EINVAL);
- } else {
- pr_debug("%s: kthread (%d) set to SCHED_FIFO\n",
- __func__, gd->task->pid);
- }
-
- do {
- new_request = gd->requested_freq;
- if (new_request == last_request) {
- set_current_state(TASK_INTERRUPTIBLE);
- if (kthread_should_stop())
- break;
- schedule();
- } else {
- /*
- * if the frequency thread sleeps while waiting to be
- * unthrottled, start over to check for a newer request
- */
- if (finish_last_request(gd, policy->cur))
- continue;
- last_request = new_request;
- cpufreq_sched_try_driver_target(policy, new_request);
- }
- } while (!kthread_should_stop());
-
- return 0;
-}
-
-static void cpufreq_sched_irq_work(struct irq_work *irq_work)
-{
- struct gov_data *gd;
-
- gd = container_of(irq_work, struct gov_data, irq_work);
- if (!gd)
- return;
-
- wake_up_process(gd->task);
-}
-
-static void update_fdomain_capacity_request(int cpu)
-{
- unsigned int freq_new, index_new, cpu_tmp;
- struct cpufreq_policy *policy;
- struct gov_data *gd;
- unsigned long capacity = 0;
-
- /*
- * Avoid grabbing the policy if possible. A test is still
- * required after locking the CPU's policy to avoid racing
- * with the governor changing.
- */
- if (!per_cpu(enabled, cpu))
- return;
-
- policy = cpufreq_cpu_get(cpu);
- if (IS_ERR_OR_NULL(policy))
- return;
-
- if (policy->governor != &cpufreq_gov_sched ||
- !policy->governor_data)
- goto out;
-
- gd = policy->governor_data;
-
- /* find max capacity requested by cpus in this policy */
- for_each_cpu(cpu_tmp, policy->cpus) {
- struct sched_capacity_reqs *scr;
-
- scr = &per_cpu(cpu_sched_capacity_reqs, cpu_tmp);
- capacity = max(capacity, scr->total);
- }
-
- /* Convert the new maximum capacity request into a cpu frequency */
- freq_new = capacity * policy->cpuinfo.max_freq >> SCHED_CAPACITY_SHIFT;
- if (cpufreq_frequency_table_target(policy, policy->freq_table,
- freq_new, CPUFREQ_RELATION_L,
- &index_new))
- goto out;
- freq_new = policy->freq_table[index_new].frequency;
-
- if (freq_new > policy->max)
- freq_new = policy->max;
-
- if (freq_new < policy->min)
- freq_new = policy->min;
-
- trace_cpufreq_sched_request_opp(cpu, capacity, freq_new,
- gd->requested_freq);
- if (freq_new == gd->requested_freq)
- goto out;
-
- gd->requested_freq = freq_new;
-
- /*
- * Throttling is not yet supported on platforms with fast cpufreq
- * drivers.
- */
- if (cpufreq_driver_slow)
- irq_work_queue_on(&gd->irq_work, cpu);
- else
- cpufreq_sched_try_driver_target(policy, freq_new);
-
-out:
- cpufreq_cpu_put(policy);
-}
-
-#ifdef CONFIG_SCHED_WALT
-static inline unsigned long
-requested_capacity(struct sched_capacity_reqs *scr)
-{
- if (!walt_disabled && sysctl_sched_use_walt_cpu_util)
- return scr->cfs;
- return scr->cfs + scr->rt;
-}
-#else
-#define requested_capacity(scr) (scr->cfs + scr->rt)
-#endif
-
-void update_cpu_capacity_request(int cpu, bool request)
-{
- unsigned long new_capacity;
- struct sched_capacity_reqs *scr;
-
- /* The rq lock serializes access to the CPU's sched_capacity_reqs. */
- lockdep_assert_held(&cpu_rq(cpu)->lock);
-
- scr = &per_cpu(cpu_sched_capacity_reqs, cpu);
-
- new_capacity = requested_capacity(scr);
- new_capacity = new_capacity * capacity_margin
- / SCHED_CAPACITY_SCALE;
- new_capacity += scr->dl;
-
- if (new_capacity == scr->total)
- return;
-
- trace_cpufreq_sched_update_capacity(cpu, request, scr, new_capacity);
-
- scr->total = new_capacity;
- if (request)
- update_fdomain_capacity_request(cpu);
-}
-
-static inline void set_sched_freq(void)
-{
- static_key_slow_inc(&__sched_freq);
-}
-
-static inline void clear_sched_freq(void)
-{
- static_key_slow_dec(&__sched_freq);
-}
-
-/* Tunables */
-static struct gov_tunables *global_tunables;
-
-static inline struct gov_tunables *to_tunables(struct gov_attr_set *attr_set)
-{
- return container_of(attr_set, struct gov_tunables, attr_set);
-}
-
-static ssize_t up_throttle_nsec_show(struct gov_attr_set *attr_set, char *buf)
-{
- struct gov_tunables *tunables = to_tunables(attr_set);
-
- return sprintf(buf, "%u\n", tunables->up_throttle_nsec);
-}
-
-static ssize_t up_throttle_nsec_store(struct gov_attr_set *attr_set,
- const char *buf, size_t count)
-{
- struct gov_tunables *tunables = to_tunables(attr_set);
- int ret;
- long unsigned int val;
-
- ret = kstrtoul(buf, 0, &val);
- if (ret < 0)
- return ret;
- tunables->up_throttle_nsec = val;
- return count;
-}
-
-static ssize_t down_throttle_nsec_show(struct gov_attr_set *attr_set, char *buf)
-{
- struct gov_tunables *tunables = to_tunables(attr_set);
-
- return sprintf(buf, "%u\n", tunables->down_throttle_nsec);
-}
-
-static ssize_t down_throttle_nsec_store(struct gov_attr_set *attr_set,
- const char *buf, size_t count)
-{
- struct gov_tunables *tunables = to_tunables(attr_set);
- int ret;
- long unsigned int val;
-
- ret = kstrtoul(buf, 0, &val);
- if (ret < 0)
- return ret;
- tunables->down_throttle_nsec = val;
- return count;
-}
-
-static struct governor_attr up_throttle_nsec = __ATTR_RW(up_throttle_nsec);
-static struct governor_attr down_throttle_nsec = __ATTR_RW(down_throttle_nsec);
-
-static struct attribute *schedfreq_attributes[] = {
- &up_throttle_nsec.attr,
- &down_throttle_nsec.attr,
- NULL
-};
-
-static struct kobj_type tunables_ktype = {
- .default_attrs = schedfreq_attributes,
- .sysfs_ops = &governor_sysfs_ops,
-};
-
-static int cpufreq_sched_policy_init(struct cpufreq_policy *policy)
-{
- struct gov_data *gd;
- int cpu;
- int rc;
-
- for_each_cpu(cpu, policy->cpus)
- memset(&per_cpu(cpu_sched_capacity_reqs, cpu), 0,
- sizeof(struct sched_capacity_reqs));
-
- gd = kzalloc(sizeof(*gd), GFP_KERNEL);
- if (!gd)
- return -ENOMEM;
-
- policy->governor_data = gd;
-
- if (!global_tunables) {
- gd->tunables = kzalloc(sizeof(*gd->tunables), GFP_KERNEL);
- if (!gd->tunables)
- goto free_gd;
-
- gd->tunables->up_throttle_nsec =
- policy->cpuinfo.transition_latency ?
- policy->cpuinfo.transition_latency :
- THROTTLE_UP_NSEC;
- gd->tunables->down_throttle_nsec =
- THROTTLE_DOWN_NSEC;
-
- rc = kobject_init_and_add(&gd->tunables->attr_set.kobj,
- &tunables_ktype,
- get_governor_parent_kobj(policy),
- "%s", cpufreq_gov_sched.name);
- if (rc)
- goto free_tunables;
-
- gov_attr_set_init(&gd->tunables->attr_set,
- &gd->tunables_hook);
-
- pr_debug("%s: throttle_threshold = %u [ns]\n",
- __func__, gd->tunables->up_throttle_nsec);
-
- if (!have_governor_per_policy())
- global_tunables = gd->tunables;
- } else {
- gd->tunables = global_tunables;
- gov_attr_set_get(&global_tunables->attr_set,
- &gd->tunables_hook);
- }
-
- policy->governor_data = gd;
- if (cpufreq_driver_is_slow()) {
- cpufreq_driver_slow = true;
- gd->task = kthread_create(cpufreq_sched_thread, policy,
- "kschedfreq:%d",
- cpumask_first(policy->related_cpus));
- if (IS_ERR_OR_NULL(gd->task)) {
- pr_err("%s: failed to create kschedfreq thread\n",
- __func__);
- goto free_tunables;
- }
- get_task_struct(gd->task);
- kthread_bind_mask(gd->task, policy->related_cpus);
- wake_up_process(gd->task);
- init_irq_work(&gd->irq_work, cpufreq_sched_irq_work);
- }
-
- set_sched_freq();
-
- return 0;
-
-free_tunables:
- kfree(gd->tunables);
-free_gd:
- policy->governor_data = NULL;
- kfree(gd);
- return -ENOMEM;
-}
-
-static int cpufreq_sched_policy_exit(struct cpufreq_policy *policy)
-{
- unsigned int count;
- struct gov_data *gd = policy->governor_data;
-
- clear_sched_freq();
- if (cpufreq_driver_slow) {
- kthread_stop(gd->task);
- put_task_struct(gd->task);
- }
-
- count = gov_attr_set_put(&gd->tunables->attr_set, &gd->tunables_hook);
- if (!count) {
- if (!have_governor_per_policy())
- global_tunables = NULL;
- kfree(gd->tunables);
- }
-
- policy->governor_data = NULL;
-
- kfree(gd);
- return 0;
-}
-
-static int cpufreq_sched_start(struct cpufreq_policy *policy)
-{
- int cpu;
-
- for_each_cpu(cpu, policy->cpus)
- per_cpu(enabled, cpu) = 1;
-
- return 0;
-}
-
-static void cpufreq_sched_limits(struct cpufreq_policy *policy)
-{
- unsigned int clamp_freq;
- struct gov_data *gd = policy->governor_data;;
-
- pr_debug("limit event for cpu %u: %u - %u kHz, currently %u kHz\n",
- policy->cpu, policy->min, policy->max,
- policy->cur);
-
- clamp_freq = clamp(gd->requested_freq, policy->min, policy->max);
-
- if (policy->cur != clamp_freq)
- __cpufreq_driver_target(policy, clamp_freq, CPUFREQ_RELATION_L);
-}
-
-static int cpufreq_sched_stop(struct cpufreq_policy *policy)
-{
- int cpu;
-
- for_each_cpu(cpu, policy->cpus)
- per_cpu(enabled, cpu) = 0;
-
- return 0;
-}
-
-static int cpufreq_sched_setup(struct cpufreq_policy *policy,
- unsigned int event)
-{
- switch (event) {
- case CPUFREQ_GOV_POLICY_INIT:
- return cpufreq_sched_policy_init(policy);
- case CPUFREQ_GOV_POLICY_EXIT:
- return cpufreq_sched_policy_exit(policy);
- case CPUFREQ_GOV_START:
- return cpufreq_sched_start(policy);
- case CPUFREQ_GOV_STOP:
- return cpufreq_sched_stop(policy);
- case CPUFREQ_GOV_LIMITS:
- cpufreq_sched_limits(policy);
- break;
- }
- return 0;
-}
-
-
-#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHED
-static
-#endif
-struct cpufreq_governor cpufreq_gov_sched = {
- .name = "sched",
- .governor = cpufreq_sched_setup,
- .owner = THIS_MODULE,
-};
-
-static int __init cpufreq_sched_init(void)
-{
- int cpu;
-
- for_each_cpu(cpu, cpu_possible_mask)
- per_cpu(enabled, cpu) = 0;
- return cpufreq_register_governor(&cpufreq_gov_sched);
-}
-
-/* Try to make this the default governor */
-fs_initcall(cpufreq_sched_init);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 9263ffd5673f..6fc5de10673e 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -53,7 +53,6 @@ unsigned int sysctl_sched_latency = 6000000ULL;
unsigned int normalized_sysctl_sched_latency = 6000000ULL;
unsigned int sysctl_sched_sync_hint_enable = 1;
-unsigned int sysctl_sched_initial_task_util = 0;
unsigned int sysctl_sched_cstate_aware = 1;
/*
@@ -746,9 +745,7 @@ void init_entity_runnable_average(struct sched_entity *se)
sa->load_sum = sa->load_avg * LOAD_AVG_MAX;
/*
* In previous Android versions, we used to have:
- * sa->util_avg = sched_freq() ?
- * sysctl_sched_initial_task_util :
- * scale_load_down(SCHED_LOAD_SCALE);
+ * sa->util_avg = scale_load_down(SCHED_LOAD_SCALE);
* sa->util_sum = sa->util_avg * LOAD_AVG_MAX;
* However, that functionality has been moved to enqueue.
* It is unclear if we should restore this in enqueue.
@@ -5821,23 +5818,6 @@ unsigned long boosted_cpu_util(int cpu);
#define boosted_cpu_util(cpu) cpu_util_freq(cpu)
#endif
-#if defined(CONFIG_SMP) && defined(CONFIG_CPU_FREQ_GOV_SCHED)
-static void update_capacity_of(int cpu)
-{
- unsigned long req_cap;
-
- if (!sched_freq())
- return;
-
- /* Normalize scale-invariant capacity to cpu. */
- req_cap = boosted_cpu_util(cpu);
- req_cap = req_cap * SCHED_CAPACITY_SCALE / capacity_orig_of(cpu);
- set_cfs_cpu_capacity(cpu, true, req_cap);
-}
-#else
-#define update_capacity_of(X) do {} while(0)
-#endif /* SMP and CPU_FREQ_GOV_SCHED */
-
/*
* The enqueue_task method is called before nr_running is
* increased. Here we update the fair scheduling stats and
@@ -5850,7 +5830,6 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
struct sched_entity *se = &p->se;
#ifdef CONFIG_SMP
int task_new = flags & ENQUEUE_WAKEUP_NEW;
- int task_wakeup = flags & ENQUEUE_WAKEUP;
#endif
/*
@@ -5925,19 +5904,6 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
rq->rd->overutilized = true;
trace_sched_overutilized(true);
}
-
- }
-
- if (!se) {
- /*
- * We want to potentially trigger a freq switch
- * request only for tasks that are waking up; this is
- * because we get here also during load balancing, but
- * in these cases it seems wise to trigger as single
- * request after load balancing is done.
- */
- if (task_new || task_wakeup)
- update_capacity_of(cpu_of(rq));
}
#endif /* CONFIG_SMP */
@@ -6015,23 +5981,6 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
*/
schedtune_dequeue_task(p, cpu_of(rq));
- if (!se) {
- /*
- * We want to potentially trigger a freq switch
- * request only for tasks that are going to sleep;
- * this is because we get here also during load
- * balancing, but in these cases it seems wise to
- * trigger as single request after load balancing is
- * done.
- */
- if (task_sleep) {
- if (rq->cfs.nr_running)
- update_capacity_of(cpu_of(rq));
- else if (sched_freq())
- set_cfs_cpu_capacity(cpu_of(rq), false, 0); /* no normalization required for 0 */
- }
- }
-
#endif /* CONFIG_SMP */
hrtick_update(rq);
@@ -6583,13 +6532,6 @@ static int group_idle_state(struct energy_env *eenv, struct sched_group *sg)
/* Take non-cpuidle idling into account (active idle/arch_cpu_idle()) */
state++;
- /*
- * Try to estimate if a deeper idle state is
- * achievable when we move the task.
- */
- for_each_cpu(i, sched_group_cpus(sg))
- grp_util += cpu_util(i);
-
src_in_grp = cpumask_test_cpu(eenv->src_cpu, sched_group_cpus(sg));
dst_in_grp = cpumask_test_cpu(eenv->dst_cpu, sched_group_cpus(sg));
if (src_in_grp == dst_in_grp) {
@@ -6598,10 +6540,16 @@ static int group_idle_state(struct energy_env *eenv, struct sched_group *sg)
*/
goto end;
}
- /* add or remove util as appropriate to indicate what group util
- * will be (worst case - no concurrent execution) after moving the task
+
+ /*
+ * Try to estimate if a deeper idle state is
+ * achievable when we move the task.
*/
- grp_util += src_in_grp ? -eenv->util_delta : eenv->util_delta;
+ for_each_cpu(i, sched_group_cpus(sg)) {
+ grp_util += cpu_util_wake(i, eenv->task);
+ if (unlikely(i == eenv->trg_cpu))
+ grp_util += eenv->util_delta;
+ }
if (grp_util <=
((long)sg->sgc->max_capacity * (int)sg->group_weight)) {
@@ -6688,13 +6636,13 @@ static int sched_group_energy(struct energy_env *eenv)
if (sg->group_weight == 1) {
/* Remove capacity of src CPU (before task move) */
- if (eenv->util_delta == 0 &&
+ if (eenv->trg_cpu == eenv->src_cpu &&
cpumask_test_cpu(eenv->src_cpu, sched_group_cpus(sg))) {
eenv->cap.before = sg->sge->cap_states[cap_idx].cap;
eenv->cap.delta -= eenv->cap.before;
}
/* Add capacity of dst CPU (after task move) */
- if (eenv->util_delta != 0 &&
+ if (eenv->trg_cpu == eenv->dst_cpu &&
cpumask_test_cpu(eenv->dst_cpu, sched_group_cpus(sg))) {
eenv->cap.after = sg->sge->cap_states[cap_idx].cap;
eenv->cap.delta += eenv->cap.after;
@@ -7464,7 +7412,9 @@ done:
/*
* cpu_util_wake: Compute cpu utilization with any contributions from
- * the waking task p removed.
+ * the waking task p removed. check_for_migration() looks for a better CPU of
+ * rq->curr. For that case we should return cpu util with contributions from
+ * currently running task p removed.
*/
static int cpu_util_wake(int cpu, struct task_struct *p)
{
@@ -7477,7 +7427,8 @@ static int cpu_util_wake(int cpu, struct task_struct *p)
* utilization from cpu utilization. Instead just use
* cpu_util for this case.
*/
- if (!walt_disabled && sysctl_sched_use_walt_cpu_util)
+ if (!walt_disabled && sysctl_sched_use_walt_cpu_util &&
+ p->state == TASK_WAKING)
return cpu_util(cpu);
#endif
/* Task has no contribution or is new */
@@ -7872,6 +7823,7 @@ static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync
/* No energy saving for target_cpu, try backup */
target_cpu = tmp_backup;
eenv.dst_cpu = target_cpu;
+ eenv.trg_cpu = target_cpu;
if (tmp_backup < 0 ||
tmp_backup == prev_cpu ||
energy_diff(&eenv) >= 0) {
@@ -8924,10 +8876,6 @@ static void attach_one_task(struct rq *rq, struct task_struct *p)
{
raw_spin_lock(&rq->lock);
attach_task(rq, p);
- /*
- * We want to potentially raise target_cpu's OPP.
- */
- update_capacity_of(cpu_of(rq));
raw_spin_unlock(&rq->lock);
}
@@ -8949,11 +8897,6 @@ static void attach_tasks(struct lb_env *env)
attach_task(env->dst_rq, p);
}
- /*
- * We want to potentially raise env.dst_cpu's OPP.
- */
- update_capacity_of(env->dst_cpu);
-
raw_spin_unlock(&env->dst_rq->lock);
}
@@ -10513,11 +10456,6 @@ more_balance:
* ld_moved - cumulative load moved across iterations
*/
cur_ld_moved = detach_tasks(&env);
- /*
- * We want to potentially lower env.src_cpu's OPP.
- */
- if (cur_ld_moved)
- update_capacity_of(env.src_cpu);
/*
* We've detached some tasks from busiest_rq. Every
@@ -10767,7 +10705,6 @@ static int idle_balance(struct rq *this_rq)
struct sched_domain *sd;
int pulled_task = 0;
u64 curr_cost = 0;
- long removed_util=0;
if (cpu_isolated(this_cpu))
return 0;
@@ -10794,17 +10731,6 @@ static int idle_balance(struct rq *this_rq)
raw_spin_unlock(&this_rq->lock);
- /*
- * If removed_util_avg is !0 we most probably migrated some task away
- * from this_cpu. In this case we might be willing to trigger an OPP
- * update, but we want to do so if we don't find anybody else to pull
- * here (we will trigger an OPP update with the pulled task's enqueue
- * anyway).
- *
- * Record removed_util before calling update_blocked_averages, and use
- * it below (before returning) to see if an OPP update is required.
- */
- removed_util = atomic_long_read(&(this_rq->cfs).removed_util_avg);
update_blocked_averages(this_cpu);
rcu_read_lock();
for_each_domain(this_cpu, sd) {
@@ -10872,12 +10798,6 @@ out:
if (pulled_task) {
idle_exit_fair(this_rq);
this_rq->idle_stamp = 0;
- } else if (removed_util) {
- /*
- * No task pulled and someone has been migrated away.
- * Good case to trigger an OPP update.
- */
- update_capacity_of(this_cpu);
}
return pulled_task;
@@ -10962,10 +10882,6 @@ static int active_load_balance_cpu_stop(void *data)
p = detach_one_task(&env);
if (p) {
schedstat_inc(sd, alb_pushed);
- /*
- * We want to potentially lower env.src_cpu's OPP.
- */
- update_capacity_of(env.src_cpu);
moved = true;
} else {
schedstat_inc(sd, alb_failed);
diff --git a/kernel/sched/hmp.c b/kernel/sched/hmp.c
index ae6876e62c0f..ea066ab8376b 100644
--- a/kernel/sched/hmp.c
+++ b/kernel/sched/hmp.c
@@ -1526,6 +1526,10 @@ unsigned int cpu_temp(int cpu)
return 0;
}
+/*
+ * kfree() may wakeup kswapd. So this function should NOT be called
+ * with any CPU's rq->lock acquired.
+ */
void free_task_load_ptrs(struct task_struct *p)
{
kfree(p->ravg.curr_window_cpu);
@@ -2608,7 +2612,8 @@ update_task_rq_cpu_cycles(struct task_struct *p, struct rq *rq, int event,
p->cpu_cycles = cur_cycles;
- trace_sched_get_task_cpu_cycles(cpu, event, rq->cc.cycles, rq->cc.time);
+ trace_sched_get_task_cpu_cycles(cpu, event, rq->cc.cycles,
+ rq->cc.time, p);
}
static int
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 47e97ef57eb8..c290db7f289a 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -10,6 +10,8 @@
#include <linux/irq_work.h>
#include <trace/events/sched.h>
+#include "tune.h"
+
int sched_rr_timeslice = RR_TIMESLICE;
static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
@@ -66,10 +68,6 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
raw_spin_unlock(&rt_b->rt_runtime_lock);
}
-#if defined(CONFIG_SMP) && defined(HAVE_RT_PUSH_IPI)
-static void push_irq_work_func(struct irq_work *work);
-#endif
-
void init_rt_rq(struct rt_rq *rt_rq)
{
struct rt_prio_array *array;
@@ -89,13 +87,6 @@ void init_rt_rq(struct rt_rq *rt_rq)
rt_rq->rt_nr_migratory = 0;
rt_rq->overloaded = 0;
plist_head_init(&rt_rq->pushable_tasks);
-
-#ifdef HAVE_RT_PUSH_IPI
- rt_rq->push_flags = 0;
- rt_rq->push_cpu = nr_cpu_ids;
- raw_spin_lock_init(&rt_rq->push_lock);
- init_irq_work(&rt_rq->push_work, push_irq_work_func);
-#endif
#endif /* CONFIG_SMP */
/* We start is dequeued state, because no RT tasks are queued */
rt_rq->rt_queued = 0;
@@ -1394,6 +1385,8 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
enqueue_pushable_task(rq, p);
+
+ schedtune_enqueue_task(p, cpu_of(rq));
}
static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
@@ -1405,6 +1398,7 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
dec_hmp_sched_stats_rt(rq, p);
dequeue_pushable_task(rq, p);
+ schedtune_dequeue_task(p, cpu_of(rq));
}
/*
@@ -1612,41 +1606,6 @@ static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flag
#endif
}
-#if defined(CONFIG_SMP) && defined(CONFIG_CPU_FREQ_GOV_SCHED)
-static void sched_rt_update_capacity_req(struct rq *rq)
-{
- u64 total, used, age_stamp, avg;
- s64 delta;
-
- if (!sched_freq())
- return;
-
- sched_avg_update(rq);
- /*
- * Since we're reading these variables without serialization make sure
- * we read them once before doing sanity checks on them.
- */
- age_stamp = READ_ONCE(rq->age_stamp);
- avg = READ_ONCE(rq->rt_avg);
- delta = rq_clock(rq) - age_stamp;
-
- if (unlikely(delta < 0))
- delta = 0;
-
- total = sched_avg_period() + delta;
-
- used = div_u64(avg, total);
- if (unlikely(used > SCHED_CAPACITY_SCALE))
- used = SCHED_CAPACITY_SCALE;
-
- set_rt_cpu_capacity(rq->cpu, 1, (unsigned long)(used));
-}
-#else
-static inline void sched_rt_update_capacity_req(struct rq *rq)
-{ }
-
-#endif
-
static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
struct rt_rq *rt_rq)
{
@@ -1715,17 +1674,8 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev)
if (prev->sched_class == &rt_sched_class)
update_curr_rt(rq);
- if (!rt_rq->rt_queued) {
- /*
- * The next task to be picked on this rq will have a lower
- * priority than rt tasks so we can spend some time to update
- * the capacity used by rt tasks based on the last activity.
- * This value will be the used as an estimation of the next
- * activity.
- */
- sched_rt_update_capacity_req(rq);
+ if (!rt_rq->rt_queued)
return NULL;
- }
put_prev_task(rq, prev);
@@ -2143,160 +2093,166 @@ static void push_rt_tasks(struct rq *rq)
}
#ifdef HAVE_RT_PUSH_IPI
+
/*
- * The search for the next cpu always starts at rq->cpu and ends
- * when we reach rq->cpu again. It will never return rq->cpu.
- * This returns the next cpu to check, or nr_cpu_ids if the loop
- * is complete.
+ * When a high priority task schedules out from a CPU and a lower priority
+ * task is scheduled in, a check is made to see if there's any RT tasks
+ * on other CPUs that are waiting to run because a higher priority RT task
+ * is currently running on its CPU. In this case, the CPU with multiple RT
+ * tasks queued on it (overloaded) needs to be notified that a CPU has opened
+ * up that may be able to run one of its non-running queued RT tasks.
+ *
+ * All CPUs with overloaded RT tasks need to be notified as there is currently
+ * no way to know which of these CPUs have the highest priority task waiting
+ * to run. Instead of trying to take a spinlock on each of these CPUs,
+ * which has shown to cause large latency when done on machines with many
+ * CPUs, sending an IPI to the CPUs to have them push off the overloaded
+ * RT tasks waiting to run.
+ *
+ * Just sending an IPI to each of the CPUs is also an issue, as on large
+ * count CPU machines, this can cause an IPI storm on a CPU, especially
+ * if its the only CPU with multiple RT tasks queued, and a large number
+ * of CPUs scheduling a lower priority task at the same time.
+ *
+ * Each root domain has its own irq work function that can iterate over
+ * all CPUs with RT overloaded tasks. Since all CPUs with overloaded RT
+ * tassk must be checked if there's one or many CPUs that are lowering
+ * their priority, there's a single irq work iterator that will try to
+ * push off RT tasks that are waiting to run.
+ *
+ * When a CPU schedules a lower priority task, it will kick off the
+ * irq work iterator that will jump to each CPU with overloaded RT tasks.
+ * As it only takes the first CPU that schedules a lower priority task
+ * to start the process, the rto_start variable is incremented and if
+ * the atomic result is one, then that CPU will try to take the rto_lock.
+ * This prevents high contention on the lock as the process handles all
+ * CPUs scheduling lower priority tasks.
+ *
+ * All CPUs that are scheduling a lower priority task will increment the
+ * rt_loop_next variable. This will make sure that the irq work iterator
+ * checks all RT overloaded CPUs whenever a CPU schedules a new lower
+ * priority task, even if the iterator is in the middle of a scan. Incrementing
+ * the rt_loop_next will cause the iterator to perform another scan.
*
- * rq->rt.push_cpu holds the last cpu returned by this function,
- * or if this is the first instance, it must hold rq->cpu.
*/
static int rto_next_cpu(struct rq *rq)
{
- int prev_cpu = rq->rt.push_cpu;
+ struct root_domain *rd = rq->rd;
+ int next;
int cpu;
- cpu = cpumask_next(prev_cpu, rq->rd->rto_mask);
-
/*
- * If the previous cpu is less than the rq's CPU, then it already
- * passed the end of the mask, and has started from the beginning.
- * We end if the next CPU is greater or equal to rq's CPU.
+ * When starting the IPI RT pushing, the rto_cpu is set to -1,
+ * rt_next_cpu() will simply return the first CPU found in
+ * the rto_mask.
+ *
+ * If rto_next_cpu() is called with rto_cpu is a valid cpu, it
+ * will return the next CPU found in the rto_mask.
+ *
+ * If there are no more CPUs left in the rto_mask, then a check is made
+ * against rto_loop and rto_loop_next. rto_loop is only updated with
+ * the rto_lock held, but any CPU may increment the rto_loop_next
+ * without any locking.
*/
- if (prev_cpu < rq->cpu) {
- if (cpu >= rq->cpu)
- return nr_cpu_ids;
+ for (;;) {
- } else if (cpu >= nr_cpu_ids) {
- /*
- * We passed the end of the mask, start at the beginning.
- * If the result is greater or equal to the rq's CPU, then
- * the loop is finished.
- */
- cpu = cpumask_first(rq->rd->rto_mask);
- if (cpu >= rq->cpu)
- return nr_cpu_ids;
- }
- rq->rt.push_cpu = cpu;
+ /* When rto_cpu is -1 this acts like cpumask_first() */
+ cpu = cpumask_next(rd->rto_cpu, rd->rto_mask);
- /* Return cpu to let the caller know if the loop is finished or not */
- return cpu;
-}
+ rd->rto_cpu = cpu;
-static int find_next_push_cpu(struct rq *rq)
-{
- struct rq *next_rq;
- int cpu;
+ if (cpu < nr_cpu_ids)
+ return cpu;
- while (1) {
- cpu = rto_next_cpu(rq);
- if (cpu >= nr_cpu_ids)
- break;
- next_rq = cpu_rq(cpu);
+ rd->rto_cpu = -1;
+
+ /*
+ * ACQUIRE ensures we see the @rto_mask changes
+ * made prior to the @next value observed.
+ *
+ * Matches WMB in rt_set_overload().
+ */
+ next = atomic_read_acquire(&rd->rto_loop_next);
- /* Make sure the next rq can push to this rq */
- if (next_rq->rt.highest_prio.next < rq->rt.highest_prio.curr)
+ if (rd->rto_loop == next)
break;
+
+ rd->rto_loop = next;
}
- return cpu;
+ return -1;
}
-#define RT_PUSH_IPI_EXECUTING 1
-#define RT_PUSH_IPI_RESTART 2
+static inline bool rto_start_trylock(atomic_t *v)
+{
+ return !atomic_cmpxchg_acquire(v, 0, 1);
+}
-static void tell_cpu_to_push(struct rq *rq)
+static inline void rto_start_unlock(atomic_t *v)
{
- int cpu;
+ atomic_set_release(v, 0);
+}
- if (rq->rt.push_flags & RT_PUSH_IPI_EXECUTING) {
- raw_spin_lock(&rq->rt.push_lock);
- /* Make sure it's still executing */
- if (rq->rt.push_flags & RT_PUSH_IPI_EXECUTING) {
- /*
- * Tell the IPI to restart the loop as things have
- * changed since it started.
- */
- rq->rt.push_flags |= RT_PUSH_IPI_RESTART;
- raw_spin_unlock(&rq->rt.push_lock);
- return;
- }
- raw_spin_unlock(&rq->rt.push_lock);
- }
+static void tell_cpu_to_push(struct rq *rq)
+{
+ int cpu = -1;
- /* When here, there's no IPI going around */
+ /* Keep the loop going if the IPI is currently active */
+ atomic_inc(&rq->rd->rto_loop_next);
- rq->rt.push_cpu = rq->cpu;
- cpu = find_next_push_cpu(rq);
- if (cpu >= nr_cpu_ids)
+ /* Only one CPU can initiate a loop at a time */
+ if (!rto_start_trylock(&rq->rd->rto_loop_start))
return;
- rq->rt.push_flags = RT_PUSH_IPI_EXECUTING;
+ raw_spin_lock(&rq->rd->rto_lock);
- irq_work_queue_on(&rq->rt.push_work, cpu);
+ /*
+ * The rto_cpu is updated under the lock, if it has a valid cpu
+ * then the IPI is still running and will continue due to the
+ * update to loop_next, and nothing needs to be done here.
+ * Otherwise it is finishing up and an ipi needs to be sent.
+ */
+ if (rq->rd->rto_cpu < 0)
+ cpu = rto_next_cpu(rq);
+
+ raw_spin_unlock(&rq->rd->rto_lock);
+
+ rto_start_unlock(&rq->rd->rto_loop_start);
+
+ if (cpu >= 0)
+ irq_work_queue_on(&rq->rd->rto_push_work, cpu);
}
/* Called from hardirq context */
-static void try_to_push_tasks(void *arg)
+void rto_push_irq_work_func(struct irq_work *work)
{
- struct rt_rq *rt_rq = arg;
- struct rq *rq, *src_rq;
- int this_cpu;
+ struct rq *rq;
int cpu;
- this_cpu = rt_rq->push_cpu;
-
- /* Paranoid check */
- BUG_ON(this_cpu != smp_processor_id());
-
- rq = cpu_rq(this_cpu);
- src_rq = rq_of_rt_rq(rt_rq);
+ rq = this_rq();
-again:
+ /*
+ * We do not need to grab the lock to check for has_pushable_tasks.
+ * When it gets updated, a check is made if a push is possible.
+ */
if (has_pushable_tasks(rq)) {
raw_spin_lock(&rq->lock);
- push_rt_task(rq);
+ push_rt_tasks(rq);
raw_spin_unlock(&rq->lock);
}
- /* Pass the IPI to the next rt overloaded queue */
- raw_spin_lock(&rt_rq->push_lock);
- /*
- * If the source queue changed since the IPI went out,
- * we need to restart the search from that CPU again.
- */
- if (rt_rq->push_flags & RT_PUSH_IPI_RESTART) {
- rt_rq->push_flags &= ~RT_PUSH_IPI_RESTART;
- rt_rq->push_cpu = src_rq->cpu;
- }
+ raw_spin_lock(&rq->rd->rto_lock);
- cpu = find_next_push_cpu(src_rq);
+ /* Pass the IPI to the next rt overloaded queue */
+ cpu = rto_next_cpu(rq);
- if (cpu >= nr_cpu_ids)
- rt_rq->push_flags &= ~RT_PUSH_IPI_EXECUTING;
- raw_spin_unlock(&rt_rq->push_lock);
+ raw_spin_unlock(&rq->rd->rto_lock);
- if (cpu >= nr_cpu_ids)
+ if (cpu < 0)
return;
- /*
- * It is possible that a restart caused this CPU to be
- * chosen again. Don't bother with an IPI, just see if we
- * have more to push.
- */
- if (unlikely(cpu == rq->cpu))
- goto again;
-
/* Try the next RT overloaded CPU */
- irq_work_queue_on(&rt_rq->push_work, cpu);
-}
-
-static void push_irq_work_func(struct irq_work *work)
-{
- struct rt_rq *rt_rq = container_of(work, struct rt_rq, push_work);
-
- try_to_push_tasks(rt_rq);
+ irq_work_queue_on(&rq->rd->rto_push_work, cpu);
}
#endif /* HAVE_RT_PUSH_IPI */
@@ -2558,9 +2514,6 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
update_curr_rt(rq);
- if (rq->rt.rt_nr_running)
- sched_rt_update_capacity_req(rq);
-
watchdog(rq, p);
/*
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index cc5ae5ddee6b..ca2294d06f44 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -532,7 +532,7 @@ static inline int rt_bandwidth_enabled(void)
}
/* RT IPI pull logic requires IRQ_WORK */
-#ifdef CONFIG_IRQ_WORK
+#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_SMP)
# define HAVE_RT_PUSH_IPI
#endif
@@ -553,12 +553,6 @@ struct rt_rq {
unsigned long rt_nr_total;
int overloaded;
struct plist_head pushable_tasks;
-#ifdef HAVE_RT_PUSH_IPI
- int push_flags;
- int push_cpu;
- struct irq_work push_work;
- raw_spinlock_t push_lock;
-#endif
#endif /* CONFIG_SMP */
int rt_queued;
@@ -651,6 +645,19 @@ struct root_domain {
struct dl_bw dl_bw;
struct cpudl cpudl;
+#ifdef HAVE_RT_PUSH_IPI
+ /*
+ * For IPI pull requests, loop across the rto_mask.
+ */
+ struct irq_work rto_push_work;
+ raw_spinlock_t rto_lock;
+ /* These are only updated and read within rto_lock */
+ int rto_loop;
+ int rto_cpu;
+ /* These atomics are updated outside of a lock */
+ atomic_t rto_loop_next;
+ atomic_t rto_loop_start;
+#endif
/*
* The "RT overload" flag: it gets set if a CPU has more than
* one runnable RT task.
@@ -667,6 +674,9 @@ struct root_domain {
extern struct root_domain def_root_domain;
+#ifdef HAVE_RT_PUSH_IPI
+extern void rto_push_irq_work_func(struct irq_work *work);
+#endif
#endif /* CONFIG_SMP */
/*
@@ -1247,7 +1257,7 @@ static inline int cpu_min_power_cost(int cpu)
return cpu_rq(cpu)->cluster->min_power_cost;
}
-static inline u32 cpu_cycles_to_freq(u64 cycles, u32 period)
+static inline u32 cpu_cycles_to_freq(u64 cycles, u64 period)
{
return div64_u64(cycles, period);
}
@@ -2422,64 +2432,6 @@ static inline unsigned long cpu_util_freq(int cpu)
#endif
-#ifdef CONFIG_CPU_FREQ_GOV_SCHED
-#define capacity_max SCHED_CAPACITY_SCALE
-extern unsigned int capacity_margin;
-extern struct static_key __sched_freq;
-
-static inline bool sched_freq(void)
-{
- return static_key_false(&__sched_freq);
-}
-
-/*
- * sched_capacity_reqs expects capacity requests to be normalised.
- * All capacities should sum to the range of 0-1024.
- */
-DECLARE_PER_CPU(struct sched_capacity_reqs, cpu_sched_capacity_reqs);
-void update_cpu_capacity_request(int cpu, bool request);
-
-static inline void set_cfs_cpu_capacity(int cpu, bool request,
- unsigned long capacity)
-{
- struct sched_capacity_reqs *scr = &per_cpu(cpu_sched_capacity_reqs, cpu);
-
- if (scr->cfs != capacity) {
- scr->cfs = capacity;
- update_cpu_capacity_request(cpu, request);
- }
-}
-
-static inline void set_rt_cpu_capacity(int cpu, bool request,
- unsigned long capacity)
-{
- if (per_cpu(cpu_sched_capacity_reqs, cpu).rt != capacity) {
- per_cpu(cpu_sched_capacity_reqs, cpu).rt = capacity;
- update_cpu_capacity_request(cpu, request);
- }
-}
-
-static inline void set_dl_cpu_capacity(int cpu, bool request,
- unsigned long capacity)
-{
- if (per_cpu(cpu_sched_capacity_reqs, cpu).dl != capacity) {
- per_cpu(cpu_sched_capacity_reqs, cpu).dl = capacity;
- update_cpu_capacity_request(cpu, request);
- }
-}
-#else
-#define sched_freq() false
-static inline void set_cfs_cpu_capacity(int cpu, bool request,
- unsigned long capacity)
-{ }
-static inline void set_rt_cpu_capacity(int cpu, bool request,
- unsigned long capacity)
-{ }
-static inline void set_dl_cpu_capacity(int cpu, bool request,
- unsigned long capacity)
-{ }
-#endif
-
#ifdef CONFIG_SCHED_HMP
/*
* HMP and EAS are orthogonal. Hopefully the compiler just elides out all code
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 3fbe2765f307..bc4ca30ddc21 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -529,13 +529,6 @@ static struct ctl_table kern_table[] = {
.proc_handler = proc_dointvec,
},
{
- .procname = "sched_initial_task_util",
- .data = &sysctl_sched_initial_task_util,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
- {
.procname = "sched_cstate_aware",
.data = &sysctl_sched_cstate_aware,
.maxlen = sizeof(unsigned int),
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 738f3467d169..fc86fdcce932 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -70,6 +70,10 @@ static inline void tk_normalize_xtime(struct timekeeper *tk)
tk->tkr_mono.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_mono.shift;
tk->xtime_sec++;
}
+ while (tk->tkr_raw.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_raw.shift)) {
+ tk->tkr_raw.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_raw.shift;
+ tk->raw_sec++;
+ }
}
static inline struct timespec64 tk_xtime(struct timekeeper *tk)
@@ -277,18 +281,19 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
/* Go back from cycles -> shifted ns */
tk->xtime_interval = (u64) interval * clock->mult;
tk->xtime_remainder = ntpinterval - tk->xtime_interval;
- tk->raw_interval =
- ((u64) interval * clock->mult) >> clock->shift;
+ tk->raw_interval = interval * clock->mult;
/* if changing clocks, convert xtime_nsec shift units */
if (old_clock) {
int shift_change = clock->shift - old_clock->shift;
- if (shift_change < 0)
+ if (shift_change < 0) {
tk->tkr_mono.xtime_nsec >>= -shift_change;
- else
+ tk->tkr_raw.xtime_nsec >>= -shift_change;
+ } else {
tk->tkr_mono.xtime_nsec <<= shift_change;
+ tk->tkr_raw.xtime_nsec <<= shift_change;
+ }
}
- tk->tkr_raw.xtime_nsec = 0;
tk->tkr_mono.shift = clock->shift;
tk->tkr_raw.shift = clock->shift;
@@ -617,9 +622,6 @@ static inline void tk_update_ktime_data(struct timekeeper *tk)
nsec = (u32) tk->wall_to_monotonic.tv_nsec;
tk->tkr_mono.base = ns_to_ktime(seconds * NSEC_PER_SEC + nsec);
- /* Update the monotonic raw base */
- tk->tkr_raw.base = timespec64_to_ktime(tk->raw_time);
-
/*
* The sum of the nanoseconds portions of xtime and
* wall_to_monotonic can be greater/equal one second. Take
@@ -629,6 +631,11 @@ static inline void tk_update_ktime_data(struct timekeeper *tk)
if (nsec >= NSEC_PER_SEC)
seconds++;
tk->ktime_sec = seconds;
+
+ /* Update the monotonic raw base */
+ seconds = tk->raw_sec;
+ nsec = (u32)(tk->tkr_raw.xtime_nsec >> tk->tkr_raw.shift);
+ tk->tkr_raw.base = ns_to_ktime(seconds * NSEC_PER_SEC + nsec);
}
/* must hold timekeeper_lock */
@@ -670,7 +677,6 @@ static void timekeeping_update(struct timekeeper *tk, unsigned int action)
static void timekeeping_forward_now(struct timekeeper *tk)
{
cycle_t cycle_now, delta;
- s64 nsec;
cycle_now = tk_clock_read(&tk->tkr_mono);
delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
@@ -682,10 +688,13 @@ static void timekeeping_forward_now(struct timekeeper *tk)
/* If arch requires, add in get_arch_timeoffset() */
tk->tkr_mono.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr_mono.shift;
- tk_normalize_xtime(tk);
- nsec = clocksource_cyc2ns(delta, tk->tkr_raw.mult, tk->tkr_raw.shift);
- timespec64_add_ns(&tk->raw_time, nsec);
+ tk->tkr_raw.xtime_nsec += delta * tk->tkr_raw.mult;
+
+ /* If arch requires, add in get_arch_timeoffset() */
+ tk->tkr_raw.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr_raw.shift;
+
+ tk_normalize_xtime(tk);
}
/**
@@ -1179,19 +1188,18 @@ int timekeeping_notify(struct clocksource *clock)
void getrawmonotonic64(struct timespec64 *ts)
{
struct timekeeper *tk = &tk_core.timekeeper;
- struct timespec64 ts64;
unsigned long seq;
s64 nsecs;
do {
seq = read_seqcount_begin(&tk_core.seq);
+ ts->tv_sec = tk->raw_sec;
nsecs = timekeeping_get_ns(&tk->tkr_raw);
- ts64 = tk->raw_time;
} while (read_seqcount_retry(&tk_core.seq, seq));
- timespec64_add_ns(&ts64, nsecs);
- *ts = ts64;
+ ts->tv_nsec = 0;
+ timespec64_add_ns(ts, nsecs);
}
EXPORT_SYMBOL(getrawmonotonic64);
@@ -1315,8 +1323,7 @@ void __init timekeeping_init(void)
tk_setup_internals(tk, clock);
tk_set_xtime(tk, &now);
- tk->raw_time.tv_sec = 0;
- tk->raw_time.tv_nsec = 0;
+ tk->raw_sec = 0;
if (boot.tv_sec == 0 && boot.tv_nsec == 0)
boot = tk_xtime(tk);
@@ -1796,7 +1803,7 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
unsigned int *clock_set)
{
cycle_t interval = tk->cycle_interval << shift;
- u64 raw_nsecs;
+ u64 snsec_per_sec;
/* If the offset is smaller than a shifted interval, do nothing */
if (offset < interval)
@@ -1811,14 +1818,12 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
*clock_set |= accumulate_nsecs_to_secs(tk);
/* Accumulate raw time */
- raw_nsecs = (u64)tk->raw_interval << shift;
- raw_nsecs += tk->raw_time.tv_nsec;
- if (raw_nsecs >= NSEC_PER_SEC) {
- u64 raw_secs = raw_nsecs;
- raw_nsecs = do_div(raw_secs, NSEC_PER_SEC);
- tk->raw_time.tv_sec += raw_secs;
+ tk->tkr_raw.xtime_nsec += tk->raw_interval << shift;
+ snsec_per_sec = (u64)NSEC_PER_SEC << tk->tkr_raw.shift;
+ while (tk->tkr_raw.xtime_nsec >= snsec_per_sec) {
+ tk->tkr_raw.xtime_nsec -= snsec_per_sec;
+ tk->raw_sec++;
}
- tk->raw_time.tv_nsec = raw_nsecs;
/* Accumulate error between NTP and clock interval */
tk->ntp_error += tk->ntp_tick << shift;
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 80b5dbfd187d..e56ba414839c 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -70,6 +70,7 @@ enum {
* attach_mutex to avoid changing binding state while
* worker_attach_to_pool() is in progress.
*/
+ POOL_MANAGER_ACTIVE = 1 << 0, /* being managed */
POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */
/* worker flags */
@@ -167,7 +168,6 @@ struct worker_pool {
/* L: hash of busy workers */
/* see manage_workers() for details on the two manager mutexes */
- struct mutex manager_arb; /* manager arbitration */
struct worker *manager; /* L: purely informational */
struct mutex attach_mutex; /* attach/detach exclusion */
struct list_head workers; /* A: attached workers */
@@ -299,6 +299,7 @@ static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf;
static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */
static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */
+static DECLARE_WAIT_QUEUE_HEAD(wq_manager_wait); /* wait for manager to go away */
static LIST_HEAD(workqueues); /* PR: list of all workqueues */
static bool workqueue_freezing; /* PL: have wqs started freezing? */
@@ -812,7 +813,7 @@ static bool need_to_create_worker(struct worker_pool *pool)
/* Do we have too many workers and should some go away? */
static bool too_many_workers(struct worker_pool *pool)
{
- bool managing = mutex_is_locked(&pool->manager_arb);
+ bool managing = pool->flags & POOL_MANAGER_ACTIVE;
int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
int nr_busy = pool->nr_workers - nr_idle;
@@ -1964,24 +1965,17 @@ static bool manage_workers(struct worker *worker)
{
struct worker_pool *pool = worker->pool;
- /*
- * Anyone who successfully grabs manager_arb wins the arbitration
- * and becomes the manager. mutex_trylock() on pool->manager_arb
- * failure while holding pool->lock reliably indicates that someone
- * else is managing the pool and the worker which failed trylock
- * can proceed to executing work items. This means that anyone
- * grabbing manager_arb is responsible for actually performing
- * manager duties. If manager_arb is grabbed and released without
- * actual management, the pool may stall indefinitely.
- */
- if (!mutex_trylock(&pool->manager_arb))
+ if (pool->flags & POOL_MANAGER_ACTIVE)
return false;
+
+ pool->flags |= POOL_MANAGER_ACTIVE;
pool->manager = worker;
maybe_create_worker(pool);
pool->manager = NULL;
- mutex_unlock(&pool->manager_arb);
+ pool->flags &= ~POOL_MANAGER_ACTIVE;
+ wake_up(&wq_manager_wait);
return true;
}
@@ -3141,7 +3135,6 @@ static int init_worker_pool(struct worker_pool *pool)
setup_timer(&pool->mayday_timer, pool_mayday_timeout,
(unsigned long)pool);
- mutex_init(&pool->manager_arb);
mutex_init(&pool->attach_mutex);
INIT_LIST_HEAD(&pool->workers);
@@ -3211,13 +3204,15 @@ static void put_unbound_pool(struct worker_pool *pool)
hash_del(&pool->hash_node);
/*
- * Become the manager and destroy all workers. Grabbing
- * manager_arb prevents @pool's workers from blocking on
- * attach_mutex.
+ * Become the manager and destroy all workers. This prevents
+ * @pool's workers from blocking on attach_mutex. We're the last
+ * manager and @pool gets freed with the flag set.
*/
- mutex_lock(&pool->manager_arb);
-
spin_lock_irq(&pool->lock);
+ wait_event_lock_irq(wq_manager_wait,
+ !(pool->flags & POOL_MANAGER_ACTIVE), pool->lock);
+ pool->flags |= POOL_MANAGER_ACTIVE;
+
while ((worker = first_idle_worker(pool)))
destroy_worker(worker);
WARN_ON(pool->nr_workers || pool->nr_idle);
@@ -3231,8 +3226,6 @@ static void put_unbound_pool(struct worker_pool *pool)
if (pool->detach_completion)
wait_for_completion(pool->detach_completion);
- mutex_unlock(&pool->manager_arb);
-
/* shut down the timers */
del_timer_sync(&pool->idle_timer);
del_timer_sync(&pool->mayday_timer);
diff --git a/kernel/workqueue_internal.h b/kernel/workqueue_internal.h
index 45215870ac6c..3fa9c146fccb 100644
--- a/kernel/workqueue_internal.h
+++ b/kernel/workqueue_internal.h
@@ -9,6 +9,7 @@
#include <linux/workqueue.h>
#include <linux/kthread.h>
+#include <linux/preempt.h>
struct worker_pool;
@@ -59,7 +60,7 @@ struct worker {
*/
static inline struct worker *current_wq_worker(void)
{
- if (current->flags & PF_WQ_WORKER)
+ if (in_task() && (current->flags & PF_WQ_WORKER))
return kthread_data(current);
return NULL;
}
diff --git a/lib/asn1_decoder.c b/lib/asn1_decoder.c
index 554522934c44..4fa2e54b3f59 100644
--- a/lib/asn1_decoder.c
+++ b/lib/asn1_decoder.c
@@ -227,7 +227,7 @@ next_op:
hdr = 2;
/* Extract a tag from the data */
- if (unlikely(dp >= datalen - 1))
+ if (unlikely(datalen - dp < 2))
goto data_overrun_error;
tag = data[dp++];
if (unlikely((tag & 0x1f) == ASN1_LONG_TAG))
@@ -273,7 +273,7 @@ next_op:
int n = len - 0x80;
if (unlikely(n > 2))
goto length_too_long;
- if (unlikely(dp >= datalen - n))
+ if (unlikely(n > datalen - dp))
goto data_overrun_error;
hdr += n;
for (len = 0; n > 0; n--) {
@@ -283,6 +283,9 @@ next_op:
if (unlikely(len > datalen - dp))
goto data_overrun_error;
}
+ } else {
+ if (unlikely(len > datalen - dp))
+ goto data_overrun_error;
}
if (flags & FLAG_CONS) {
diff --git a/lib/assoc_array.c b/lib/assoc_array.c
index 59fd7c0b119c..5cd093589c5a 100644
--- a/lib/assoc_array.c
+++ b/lib/assoc_array.c
@@ -598,21 +598,31 @@ static bool assoc_array_insert_into_terminal_node(struct assoc_array_edit *edit,
if ((edit->segment_cache[ASSOC_ARRAY_FAN_OUT] ^ base_seg) == 0)
goto all_leaves_cluster_together;
- /* Otherwise we can just insert a new node ahead of the old
- * one.
+ /* Otherwise all the old leaves cluster in the same slot, but
+ * the new leaf wants to go into a different slot - so we
+ * create a new node (n0) to hold the new leaf and a pointer to
+ * a new node (n1) holding all the old leaves.
+ *
+ * This can be done by falling through to the node splitting
+ * path.
*/
- goto present_leaves_cluster_but_not_new_leaf;
+ pr_devel("present leaves cluster but not new leaf\n");
}
split_node:
pr_devel("split node\n");
- /* We need to split the current node; we know that the node doesn't
- * simply contain a full set of leaves that cluster together (it
- * contains meta pointers and/or non-clustering leaves).
+ /* We need to split the current node. The node must contain anything
+ * from a single leaf (in the one leaf case, this leaf will cluster
+ * with the new leaf) and the rest meta-pointers, to all leaves, some
+ * of which may cluster.
+ *
+ * It won't contain the case in which all the current leaves plus the
+ * new leaves want to cluster in the same slot.
*
* We need to expel at least two leaves out of a set consisting of the
- * leaves in the node and the new leaf.
+ * leaves in the node and the new leaf. The current meta pointers can
+ * just be copied as they shouldn't cluster with any of the leaves.
*
* We need a new node (n0) to replace the current one and a new node to
* take the expelled nodes (n1).
@@ -717,33 +727,6 @@ found_slot_for_multiple_occupancy:
pr_devel("<--%s() = ok [split node]\n", __func__);
return true;
-present_leaves_cluster_but_not_new_leaf:
- /* All the old leaves cluster in the same slot, but the new leaf wants
- * to go into a different slot, so we create a new node to hold the new
- * leaf and a pointer to a new node holding all the old leaves.
- */
- pr_devel("present leaves cluster but not new leaf\n");
-
- new_n0->back_pointer = node->back_pointer;
- new_n0->parent_slot = node->parent_slot;
- new_n0->nr_leaves_on_branch = node->nr_leaves_on_branch;
- new_n1->back_pointer = assoc_array_node_to_ptr(new_n0);
- new_n1->parent_slot = edit->segment_cache[0];
- new_n1->nr_leaves_on_branch = node->nr_leaves_on_branch;
- edit->adjust_count_on = new_n0;
-
- for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++)
- new_n1->slots[i] = node->slots[i];
-
- new_n0->slots[edit->segment_cache[0]] = assoc_array_node_to_ptr(new_n0);
- edit->leaf_p = &new_n0->slots[edit->segment_cache[ASSOC_ARRAY_FAN_OUT]];
-
- edit->set[0].ptr = &assoc_array_ptr_to_node(node->back_pointer)->slots[node->parent_slot];
- edit->set[0].to = assoc_array_node_to_ptr(new_n0);
- edit->excised_meta[0] = assoc_array_node_to_ptr(node);
- pr_devel("<--%s() = ok [insert node before]\n", __func__);
- return true;
-
all_leaves_cluster_together:
/* All the leaves, new and old, want to cluster together in this node
* in the same slot, so we have to replace this node with a shortcut to
diff --git a/lib/mpi/mpi-pow.c b/lib/mpi/mpi-pow.c
index e24388a863a7..468fb7cd1221 100644
--- a/lib/mpi/mpi-pow.c
+++ b/lib/mpi/mpi-pow.c
@@ -26,6 +26,7 @@
* however I decided to publish this code under the plain GPL.
*/
+#include <linux/sched.h>
#include <linux/string.h>
#include "mpi-internal.h"
#include "longlong.h"
@@ -256,6 +257,7 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod)
}
e <<= 1;
c--;
+ cond_resched();
}
i--;
diff --git a/lib/test_firmware.c b/lib/test_firmware.c
index 86374c1c49a4..841191061816 100644
--- a/lib/test_firmware.c
+++ b/lib/test_firmware.c
@@ -65,14 +65,19 @@ static ssize_t trigger_request_store(struct device *dev,
release_firmware(test_firmware);
test_firmware = NULL;
rc = request_firmware(&test_firmware, name, dev);
- if (rc)
+ if (rc) {
pr_info("load of '%s' failed: %d\n", name, rc);
- pr_info("loaded: %zu\n", test_firmware ? test_firmware->size : 0);
+ goto out;
+ }
+ pr_info("loaded: %zu\n", test_firmware->size);
+ rc = count;
+
+out:
mutex_unlock(&test_fw_mutex);
kfree(name);
- return count;
+ return rc;
}
static DEVICE_ATTR_WO(trigger_request);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 6c6f5ccfcda1..8f3769ec8575 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1304,17 +1304,11 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
VM_BUG_ON_PAGE(!PageHead(page), page);
if (flags & FOLL_TOUCH) {
pmd_t _pmd;
- /*
- * We should set the dirty bit only for FOLL_WRITE but
- * for now the dirty bit in the pmd is meaningless.
- * And if the dirty bit will become meaningful and
- * we'll only set it with FOLL_WRITE, an atomic
- * set_bit will be required on the pmd to set the
- * young bit, instead of the current set_pmd_at.
- */
- _pmd = pmd_mkyoung(pmd_mkdirty(*pmd));
+ _pmd = pmd_mkyoung(*pmd);
+ if (flags & FOLL_WRITE)
+ _pmd = pmd_mkdirty(_pmd);
if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK,
- pmd, _pmd, 1))
+ pmd, _pmd, flags & FOLL_WRITE))
update_mmu_cache_pmd(vma, addr, pmd);
}
if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
diff --git a/mm/madvise.c b/mm/madvise.c
index c154e1076303..b04f2d26cdb8 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -223,15 +223,14 @@ static long madvise_willneed(struct vm_area_struct *vma,
{
struct file *file = vma->vm_file;
+ *prev = vma;
#ifdef CONFIG_SWAP
if (!file) {
- *prev = vma;
force_swapin_readahead(vma, start, end);
return 0;
}
if (shmem_mapping(file->f_mapping)) {
- *prev = vma;
force_shm_swapin_readahead(vma, start, end,
file->f_mapping);
return 0;
@@ -246,7 +245,6 @@ static long madvise_willneed(struct vm_area_struct *vma,
return 0;
}
- *prev = vma;
start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
if (end > vma->vm_end)
end = vma->vm_end;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 915c60258935..2ea77b967709 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -286,28 +286,37 @@ EXPORT_SYMBOL(nr_online_nodes);
int page_group_by_mobility_disabled __read_mostly;
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
+
+/*
+ * Determine how many pages need to be initialized durig early boot
+ * (non-deferred initialization).
+ * The value of first_deferred_pfn will be set later, once non-deferred pages
+ * are initialized, but for now set it ULONG_MAX.
+ */
static inline void reset_deferred_meminit(pg_data_t *pgdat)
{
- unsigned long max_initialise;
- unsigned long reserved_lowmem;
+ phys_addr_t start_addr, end_addr;
+ unsigned long max_pgcnt;
+ unsigned long reserved;
/*
* Initialise at least 2G of a node but also take into account that
* two large system hashes that can take up 1GB for 0.25TB/node.
*/
- max_initialise = max(2UL << (30 - PAGE_SHIFT),
- (pgdat->node_spanned_pages >> 8));
+ max_pgcnt = max(2UL << (30 - PAGE_SHIFT),
+ (pgdat->node_spanned_pages >> 8));
/*
* Compensate the all the memblock reservations (e.g. crash kernel)
* from the initial estimation to make sure we will initialize enough
* memory to boot.
*/
- reserved_lowmem = memblock_reserved_memory_within(pgdat->node_start_pfn,
- pgdat->node_start_pfn + max_initialise);
- max_initialise += reserved_lowmem;
+ start_addr = PFN_PHYS(pgdat->node_start_pfn);
+ end_addr = PFN_PHYS(pgdat->node_start_pfn + max_pgcnt);
+ reserved = memblock_reserved_memory_within(start_addr, end_addr);
+ max_pgcnt += PHYS_PFN(reserved);
- pgdat->static_init_size = min(max_initialise, pgdat->node_spanned_pages);
+ pgdat->static_init_pgcnt = min(max_pgcnt, pgdat->node_spanned_pages);
pgdat->first_deferred_pfn = ULONG_MAX;
}
@@ -343,7 +352,7 @@ static inline bool update_defer_init(pg_data_t *pgdat,
return true;
/* Initialise at least 2G of the highest zone */
(*nr_initialised)++;
- if ((*nr_initialised > pgdat->static_init_size) &&
+ if ((*nr_initialised > pgdat->static_init_pgcnt) &&
(pfn & (PAGES_PER_SECTION - 1)) == 0) {
pgdat->first_deferred_pfn = pfn;
return false;
diff --git a/mm/page_ext.c b/mm/page_ext.c
index 916accfec86a..f02ad1cc7d24 100644
--- a/mm/page_ext.c
+++ b/mm/page_ext.c
@@ -103,7 +103,6 @@ struct page_ext *lookup_page_ext(struct page *page)
struct page_ext *base;
base = NODE_DATA(page_to_nid(page))->node_page_ext;
-#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAGE_POISONING)
/*
* The sanity checks the page allocator does upon freeing a
* page can reach here before the page_ext arrays are
@@ -115,7 +114,6 @@ struct page_ext *lookup_page_ext(struct page *page)
*/
if (unlikely(!base))
return NULL;
-#endif
offset = pfn - round_down(node_start_pfn(page_to_nid(page)),
MAX_ORDER_NR_PAGES);
return base + offset;
@@ -180,7 +178,6 @@ struct page_ext *lookup_page_ext(struct page *page)
{
unsigned long pfn = page_to_pfn(page);
struct mem_section *section = __pfn_to_section(pfn);
-#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAGE_POISONING)
/*
* The sanity checks the page allocator does upon freeing a
* page can reach here before the page_ext arrays are
@@ -192,7 +189,6 @@ struct page_ext *lookup_page_ext(struct page *page)
*/
if (!section->page_ext)
return NULL;
-#endif
return section->page_ext + pfn;
}
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index 29f2f8b853ae..c2cbd2620169 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -142,8 +142,12 @@ static int walk_hugetlb_range(unsigned long addr, unsigned long end,
do {
next = hugetlb_entry_end(h, addr, end);
pte = huge_pte_offset(walk->mm, addr & hmask);
- if (pte && walk->hugetlb_entry)
+
+ if (pte)
err = walk->hugetlb_entry(pte, hmask, addr, next, walk);
+ else if (walk->pte_hole)
+ err = walk->pte_hole(addr, next, walk);
+
if (err)
break;
} while (addr = next, addr != end);
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 5e4199d5a388..01abb6431fd9 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -376,6 +376,9 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
dev->name);
vlan_vid_add(dev, htons(ETH_P_8021Q), 0);
}
+ if (event == NETDEV_DOWN &&
+ (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
+ vlan_vid_del(dev, htons(ETH_P_8021Q), 0);
vlan_info = rtnl_dereference(dev->vlan_info);
if (!vlan_info)
@@ -423,9 +426,6 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
struct net_device *tmp;
LIST_HEAD(close_list);
- if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
- vlan_vid_del(dev, htons(ETH_P_8021Q), 0);
-
/* Put all VLANs for this dev in the down state too. */
vlan_group_for_each_dev(grp, i, vlandev) {
flgs = vlandev->flags;
diff --git a/net/9p/client.c b/net/9p/client.c
index f5feac4ff4ec..3ff26eb1ea20 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -749,8 +749,7 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...)
}
again:
/* Wait for the response */
- err = wait_event_interruptible(*req->wq,
- req->status >= REQ_STATUS_RCVD);
+ err = wait_event_killable(*req->wq, req->status >= REQ_STATUS_RCVD);
/*
* Make sure our req is coherent with regard to updates in other
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index 6e70ddb158b4..2ddeecca5b12 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -290,8 +290,8 @@ req_retry:
if (err == -ENOSPC) {
chan->ring_bufs_avail = 0;
spin_unlock_irqrestore(&chan->lock, flags);
- err = wait_event_interruptible(*chan->vc_wq,
- chan->ring_bufs_avail);
+ err = wait_event_killable(*chan->vc_wq,
+ chan->ring_bufs_avail);
if (err == -ERESTARTSYS)
return err;
@@ -331,7 +331,7 @@ static int p9_get_mapped_pages(struct virtio_chan *chan,
* Other zc request to finish here
*/
if (atomic_read(&vp_pinned) >= chan->p9_max_pages) {
- err = wait_event_interruptible(vp_wq,
+ err = wait_event_killable(vp_wq,
(atomic_read(&vp_pinned) < chan->p9_max_pages));
if (err == -ERESTARTSYS)
return err;
@@ -475,8 +475,8 @@ req_retry_pinned:
if (err == -ENOSPC) {
chan->ring_bufs_avail = 0;
spin_unlock_irqrestore(&chan->lock, flags);
- err = wait_event_interruptible(*chan->vc_wq,
- chan->ring_bufs_avail);
+ err = wait_event_killable(*chan->vc_wq,
+ chan->ring_bufs_avail);
if (err == -ERESTARTSYS)
goto err_out;
@@ -493,8 +493,7 @@ req_retry_pinned:
virtqueue_kick(chan->vq);
spin_unlock_irqrestore(&chan->lock, flags);
p9_debug(P9_DEBUG_TRANS, "virtio request kicked\n");
- err = wait_event_interruptible(*req->wq,
- req->status >= REQ_STATUS_RCVD);
+ err = wait_event_killable(*req->wq, req->status >= REQ_STATUS_RCVD);
/*
* Non kernel buffers are pinned, unpin them
*/
diff --git a/net/core/dev.c b/net/core/dev.c
index 8fc44bf48b9c..b26aaa5729d9 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1113,9 +1113,8 @@ static int dev_alloc_name_ns(struct net *net,
return ret;
}
-static int dev_get_valid_name(struct net *net,
- struct net_device *dev,
- const char *name)
+int dev_get_valid_name(struct net *net, struct net_device *dev,
+ const char *name)
{
BUG_ON(!net);
@@ -1131,6 +1130,7 @@ static int dev_get_valid_name(struct net *net,
return 0;
}
+EXPORT_SYMBOL(dev_get_valid_name);
/**
* dev_change_name - change name of a device
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index fedcee8263b6..c3ec257493bc 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -4255,6 +4255,7 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
if (!xnet)
return;
+ ipvs_reset(skb);
skb_orphan(skb);
skb->mark = 0;
}
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index e217f17997a4..6eb2bbf9873b 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -414,8 +414,7 @@ struct sock *dccp_v4_request_recv_sock(const struct sock *sk,
sk_daddr_set(newsk, ireq->ir_rmt_addr);
sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
newinet->inet_saddr = ireq->ir_loc_addr;
- newinet->inet_opt = ireq->opt;
- ireq->opt = NULL;
+ RCU_INIT_POINTER(newinet->inet_opt, rcu_dereference(ireq->ireq_opt));
newinet->mc_index = inet_iif(skb);
newinet->mc_ttl = ip_hdr(skb)->ttl;
newinet->inet_id = jiffies;
@@ -430,7 +429,10 @@ struct sock *dccp_v4_request_recv_sock(const struct sock *sk,
if (__inet_inherit_port(sk, newsk) < 0)
goto put_and_exit;
*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
-
+ if (*own_req)
+ ireq->ireq_opt = NULL;
+ else
+ newinet->inet_opt = NULL;
return newsk;
exit_overflow:
@@ -441,6 +443,7 @@ exit:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
return NULL;
put_and_exit:
+ newinet->inet_opt = NULL;
inet_csk_prepare_forced_close(newsk);
dccp_done(newsk);
goto exit;
@@ -492,7 +495,7 @@ static int dccp_v4_send_response(const struct sock *sk, struct request_sock *req
ireq->ir_rmt_addr);
err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
ireq->ir_rmt_addr,
- ireq->opt);
+ ireq_opt_deref(ireq));
err = net_xmit_eval(err);
}
@@ -546,7 +549,7 @@ out:
static void dccp_v4_reqsk_destructor(struct request_sock *req)
{
dccp_feat_list_purge(&dccp_rsk(req)->dreq_featneg);
- kfree(inet_rsk(req)->opt);
+ kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1));
}
void dccp_syn_ack_timeout(const struct request_sock *req)
diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig
index ff7736f7ff42..fc0c09e770e6 100644
--- a/net/dsa/Kconfig
+++ b/net/dsa/Kconfig
@@ -1,12 +1,13 @@
config HAVE_NET_DSA
def_bool y
- depends on NETDEVICES && !S390
+ depends on INET && NETDEVICES && !S390
# Drivers must select NET_DSA and the appropriate tagging format
config NET_DSA
tristate "Distributed Switch Architecture"
- depends on HAVE_NET_DSA && NET_SWITCHDEV
+ depends on HAVE_NET_DSA
+ select NET_SWITCHDEV
select PHYLIB
---help---
Say Y if you want to enable support for the hardware switches supported
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index f2a71025a770..22377c8ff14b 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -270,6 +270,9 @@ static void ah_input_done(struct crypto_async_request *base, int err)
int ihl = ip_hdrlen(skb);
int ah_hlen = (ah->hdrlen + 2) << 2;
+ if (err)
+ goto out;
+
work_iph = AH_SKB_CB(skb)->tmp;
auth_data = ah_tmp_auth(work_iph, ihl);
icv = ah_tmp_icv(ahp->ahash, auth_data, ahp->icv_trunc_len);
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index 6cc3e1d602fb..5f3b81941a6f 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -2012,7 +2012,7 @@ int cipso_v4_req_setattr(struct request_sock *req,
buf = NULL;
req_inet = inet_rsk(req);
- opt = xchg(&req_inet->opt, opt);
+ opt = xchg((__force struct ip_options_rcu **)&req_inet->ireq_opt, opt);
if (opt)
kfree_rcu(opt, rcu);
@@ -2034,11 +2034,13 @@ req_setattr_failure:
* values on failure.
*
*/
-static int cipso_v4_delopt(struct ip_options_rcu **opt_ptr)
+static int cipso_v4_delopt(struct ip_options_rcu __rcu **opt_ptr)
{
+ struct ip_options_rcu *opt = rcu_dereference_protected(*opt_ptr, 1);
int hdr_delta = 0;
- struct ip_options_rcu *opt = *opt_ptr;
+ if (!opt || opt->opt.cipso == 0)
+ return 0;
if (opt->opt.srr || opt->opt.rr || opt->opt.ts || opt->opt.router_alert) {
u8 cipso_len;
u8 cipso_off;
@@ -2100,14 +2102,10 @@ static int cipso_v4_delopt(struct ip_options_rcu **opt_ptr)
*/
void cipso_v4_sock_delattr(struct sock *sk)
{
- int hdr_delta;
- struct ip_options_rcu *opt;
struct inet_sock *sk_inet;
+ int hdr_delta;
sk_inet = inet_sk(sk);
- opt = rcu_dereference_protected(sk_inet->inet_opt, 1);
- if (!opt || opt->opt.cipso == 0)
- return;
hdr_delta = cipso_v4_delopt(&sk_inet->inet_opt);
if (sk_inet->is_icsk && hdr_delta > 0) {
@@ -2127,15 +2125,7 @@ void cipso_v4_sock_delattr(struct sock *sk)
*/
void cipso_v4_req_delattr(struct request_sock *req)
{
- struct ip_options_rcu *opt;
- struct inet_request_sock *req_inet;
-
- req_inet = inet_rsk(req);
- opt = req_inet->opt;
- if (!opt || opt->opt.cipso == 0)
- return;
-
- cipso_v4_delopt(&req_inet->opt);
+ cipso_v4_delopt(&inet_rsk(req)->ireq_opt);
}
/**
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 81fcff83d309..6640547df8f5 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -419,9 +419,11 @@ struct dst_entry *inet_csk_route_req(const struct sock *sk,
{
const struct inet_request_sock *ireq = inet_rsk(req);
struct net *net = read_pnet(&ireq->ireq_net);
- struct ip_options_rcu *opt = ireq->opt;
+ struct ip_options_rcu *opt;
struct rtable *rt;
+ opt = ireq_opt_deref(ireq);
+
flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
sk->sk_protocol, inet_sk_flowi_flags(sk),
@@ -455,10 +457,9 @@ struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
struct flowi4 *fl4;
struct rtable *rt;
+ opt = rcu_dereference(ireq->ireq_opt);
fl4 = &newinet->cork.fl.u.ip4;
- rcu_read_lock();
- opt = rcu_dereference(newinet->inet_opt);
flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
sk->sk_protocol, inet_sk_flowi_flags(sk),
@@ -471,13 +472,11 @@ struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
goto no_route;
if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
goto route_err;
- rcu_read_unlock();
return &rt->dst;
route_err:
ip_rt_put(rt);
no_route:
- rcu_read_unlock();
IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
return NULL;
}
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index f300d1cbfa91..097a1243c16c 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -808,6 +808,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
{
struct ip_mreqn mreq;
struct net_device *dev = NULL;
+ int midx;
if (sk->sk_type == SOCK_STREAM)
goto e_inval;
@@ -852,11 +853,15 @@ static int do_ip_setsockopt(struct sock *sk, int level,
err = -EADDRNOTAVAIL;
if (!dev)
break;
+
+ midx = l3mdev_master_ifindex(dev);
+
dev_put(dev);
err = -EINVAL;
if (sk->sk_bound_dev_if &&
- mreq.imr_ifindex != sk->sk_bound_dev_if)
+ mreq.imr_ifindex != sk->sk_bound_dev_if &&
+ (!midx || midx != sk->sk_bound_dev_if))
break;
inet->mc_index = mreq.imr_ifindex;
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index a09fb0dec725..486b283a6cd1 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -129,42 +129,68 @@ static struct rtnl_link_ops ipip_link_ops __read_mostly;
static int ipip_err(struct sk_buff *skb, u32 info)
{
-/* All the routers (except for Linux) return only
- 8 bytes of packet payload. It means, that precise relaying of
- ICMP in the real Internet is absolutely infeasible.
- */
+ /* All the routers (except for Linux) return only
+ 8 bytes of packet payload. It means, that precise relaying of
+ ICMP in the real Internet is absolutely infeasible.
+ */
struct net *net = dev_net(skb->dev);
struct ip_tunnel_net *itn = net_generic(net, ipip_net_id);
const struct iphdr *iph = (const struct iphdr *)skb->data;
- struct ip_tunnel *t;
- int err;
const int type = icmp_hdr(skb)->type;
const int code = icmp_hdr(skb)->code;
+ struct ip_tunnel *t;
+ int err = 0;
+
+ switch (type) {
+ case ICMP_DEST_UNREACH:
+ switch (code) {
+ case ICMP_SR_FAILED:
+ /* Impossible event. */
+ goto out;
+ default:
+ /* All others are translated to HOST_UNREACH.
+ * rfc2003 contains "deep thoughts" about NET_UNREACH,
+ * I believe they are just ether pollution. --ANK
+ */
+ break;
+ }
+ break;
+
+ case ICMP_TIME_EXCEEDED:
+ if (code != ICMP_EXC_TTL)
+ goto out;
+ break;
+
+ case ICMP_REDIRECT:
+ break;
+
+ default:
+ goto out;
+ }
- err = -ENOENT;
t = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
iph->daddr, iph->saddr, 0);
- if (!t)
+ if (!t) {
+ err = -ENOENT;
goto out;
+ }
if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
- ipv4_update_pmtu(skb, dev_net(skb->dev), info,
- t->parms.link, 0, IPPROTO_IPIP, 0);
- err = 0;
+ ipv4_update_pmtu(skb, net, info, t->parms.link, 0,
+ iph->protocol, 0);
goto out;
}
if (type == ICMP_REDIRECT) {
- ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0,
- IPPROTO_IPIP, 0);
- err = 0;
+ ipv4_redirect(skb, net, t->parms.link, 0, iph->protocol, 0);
goto out;
}
- if (t->parms.iph.daddr == 0)
+ if (t->parms.iph.daddr == 0) {
+ err = -ENOENT;
goto out;
+ }
- err = 0;
if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
goto out;
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index a2e1142145df..57bbcd5b650a 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -357,7 +357,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
/* We throwed the options of the initial SYN away, so we hope
* the ACK carries the same options again (see RFC1122 4.2.3.8)
*/
- ireq->opt = tcp_v4_save_options(skb);
+ RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(skb));
if (security_inet_conn_request(sk, skb, req)) {
reqsk_free(req);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 0047b151e8e8..277e502ff253 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -4943,7 +4943,7 @@ static void tcp_check_space(struct sock *sk)
if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) {
sock_reset_flag(sk, SOCK_QUEUE_SHRUNK);
/* pairs with tcp_poll() */
- smp_mb__after_atomic();
+ smp_mb();
if (sk->sk_socket &&
test_bit(SOCK_NOSPACE, &sk->sk_socket->flags))
tcp_new_space(sk);
@@ -6107,7 +6107,7 @@ struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops,
struct inet_request_sock *ireq = inet_rsk(req);
kmemcheck_annotate_bitfield(ireq, flags);
- ireq->opt = NULL;
+ ireq->ireq_opt = NULL;
atomic64_set(&ireq->ir_cookie, 0);
ireq->ireq_state = TCP_NEW_SYN_RECV;
write_pnet(&ireq->ireq_net, sock_net(sk_listener));
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 3845ab04a9b4..30d4e38a6241 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -860,7 +860,7 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
ireq->ir_rmt_addr,
- ireq->opt);
+ ireq_opt_deref(ireq));
err = net_xmit_eval(err);
}
@@ -872,7 +872,7 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
*/
static void tcp_v4_reqsk_destructor(struct request_sock *req)
{
- kfree(inet_rsk(req)->opt);
+ kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1));
}
@@ -1201,7 +1201,7 @@ static void tcp_v4_init_req(struct request_sock *req,
sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
ireq->no_srccheck = inet_sk(sk_listener)->transparent;
- ireq->opt = tcp_v4_save_options(skb);
+ RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(skb));
}
static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
@@ -1297,10 +1297,9 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
ireq = inet_rsk(req);
sk_daddr_set(newsk, ireq->ir_rmt_addr);
sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
- newinet->inet_saddr = ireq->ir_loc_addr;
- inet_opt = ireq->opt;
- rcu_assign_pointer(newinet->inet_opt, inet_opt);
- ireq->opt = NULL;
+ newinet->inet_saddr = ireq->ir_loc_addr;
+ inet_opt = rcu_dereference(ireq->ireq_opt);
+ RCU_INIT_POINTER(newinet->inet_opt, inet_opt);
newinet->mc_index = inet_iif(skb);
newinet->mc_ttl = ip_hdr(skb)->ttl;
newinet->rcv_tos = ip_hdr(skb)->tos;
@@ -1348,9 +1347,12 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
if (__inet_inherit_port(sk, newsk) < 0)
goto put_and_exit;
*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
- if (*own_req)
+ if (likely(*own_req)) {
tcp_move_syn(newtp, req);
-
+ ireq->ireq_opt = NULL;
+ } else {
+ newinet->inet_opt = NULL;
+ }
return newsk;
exit_overflow:
@@ -1361,6 +1363,7 @@ exit:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
return NULL;
put_and_exit:
+ newinet->inet_opt = NULL;
inet_csk_prepare_forced_close(newsk);
tcp_done(newsk);
goto exit;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 4e88f93f71c8..7d82c172db78 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1951,6 +1951,7 @@ static int tcp_mtu_probe(struct sock *sk)
nskb->ip_summed = skb->ip_summed;
tcp_insert_write_queue_before(nskb, skb, sk);
+ tcp_highest_sack_replace(sk, skb, nskb);
len = 0;
tcp_for_write_queue_from_safe(skb, next, sk) {
@@ -2464,7 +2465,7 @@ static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1);
- tcp_highest_sack_combine(sk, next_skb, skb);
+ tcp_highest_sack_replace(sk, next_skb, skb);
tcp_unlink_write_queue(next_skb, sk);
@@ -3017,13 +3018,8 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
tcp_ecn_make_synack(req, th);
th->source = htons(ireq->ir_num);
th->dest = ireq->ir_rmt_port;
- /* Setting of flags are superfluous here for callers (and ECE is
- * not even correctly set)
- */
- tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn,
- TCPHDR_SYN | TCPHDR_ACK);
-
- th->seq = htonl(TCP_SKB_CB(skb)->seq);
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ th->seq = htonl(tcp_rsk(req)->snt_isn);
/* XXX data is queued and acked as is. No buffer/window check */
th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt);
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index dc2db4f7b182..f3a0a9c0f61e 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -315,6 +315,7 @@ struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space,
}
opt_space->dst1opt = fopt->dst1opt;
opt_space->opt_flen = fopt->opt_flen;
+ opt_space->tot_len = fopt->tot_len;
return opt_space;
}
EXPORT_SYMBOL_GPL(fl6_merge_options);
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 6150a038711b..9d1a54de33f2 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -409,13 +409,16 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
case ICMPV6_DEST_UNREACH:
net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n",
t->parms.name);
- break;
+ if (code != ICMPV6_PORT_UNREACH)
+ break;
+ return;
case ICMPV6_TIME_EXCEED:
if (code == ICMPV6_EXC_HOPLIMIT) {
net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
t->parms.name);
+ break;
}
- break;
+ return;
case ICMPV6_PARAMPROB:
teli = 0;
if (code == ICMPV6_HDR_FIELD)
@@ -431,13 +434,13 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
t->parms.name);
}
- break;
+ return;
case ICMPV6_PKT_TOOBIG:
mtu = be32_to_cpu(info) - offset;
if (mtu < IPV6_MIN_MTU)
mtu = IPV6_MIN_MTU;
t->dev->mtu = mtu;
- break;
+ return;
}
if (time_before(jiffies, t->err_time + IP6TUNNEL_ERR_TIMEO))
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index e22339fad10b..71624cf26832 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1201,11 +1201,11 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
if (WARN_ON(v6_cork->opt))
return -EINVAL;
- v6_cork->opt = kzalloc(opt->tot_len, sk->sk_allocation);
+ v6_cork->opt = kzalloc(sizeof(*opt), sk->sk_allocation);
if (unlikely(!v6_cork->opt))
return -ENOBUFS;
- v6_cork->opt->tot_len = opt->tot_len;
+ v6_cork->opt->tot_len = sizeof(*opt);
v6_cork->opt->opt_flen = opt->opt_flen;
v6_cork->opt->opt_nflen = opt->opt_nflen;
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index f615f982961a..1831fb108ad1 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -189,12 +189,12 @@ static int vti6_tnl_create2(struct net_device *dev)
struct vti6_net *ip6n = net_generic(net, vti6_net_id);
int err;
+ dev->rtnl_link_ops = &vti6_link_ops;
err = register_netdevice(dev);
if (err < 0)
goto out;
strcpy(t->parms.name, dev->name);
- dev->rtnl_link_ops = &vti6_link_ops;
dev_hold(dev);
vti6_tnl_link(ip6n, t);
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 4449ad1f8114..a4a30d2ca66f 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -583,16 +583,24 @@ done:
if (val) {
struct net_device *dev;
+ int midx;
- if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != val)
- goto e_inval;
+ rcu_read_lock();
- dev = dev_get_by_index(net, val);
+ dev = dev_get_by_index_rcu(net, val);
if (!dev) {
+ rcu_read_unlock();
retv = -ENODEV;
break;
}
- dev_put(dev);
+ midx = l3mdev_master_ifindex_rcu(dev);
+
+ rcu_read_unlock();
+
+ if (sk->sk_bound_dev_if &&
+ sk->sk_bound_dev_if != val &&
+ (!midx || midx != sk->sk_bound_dev_if))
+ goto e_inval;
}
np->mcast_oif = val;
retv = 0;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 5c710f78163e..e367ce026db3 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -3380,7 +3380,11 @@ static int ip6_route_dev_notify(struct notifier_block *this,
net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
#endif
- } else if (event == NETDEV_UNREGISTER) {
+ } else if (event == NETDEV_UNREGISTER &&
+ dev->reg_state != NETREG_UNREGISTERED) {
+ /* NETDEV_UNREGISTER could be fired for multiple times by
+ * netdev_wait_allrefs(). Make sure we only call this once.
+ */
in6_dev_put(net->ipv6.ip6_null_entry->rt6i_idev);
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
in6_dev_put(net->ipv6.ip6_prohibit_entry->rt6i_idev);
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 8ab9c5d74416..67f2e72723b2 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -1015,6 +1015,9 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session,
session->name, cmd, arg);
sk = ps->sock;
+ if (!sk)
+ return -EBADR;
+
sock_hold(sk);
switch (cmd) {
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 67fede656ea5..424aca76a192 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -682,7 +682,6 @@ struct ieee80211_if_mesh {
const struct ieee80211_mesh_sync_ops *sync_ops;
s64 sync_offset_clockdrift_max;
spinlock_t sync_offset_lock;
- bool adjusting_tbtt;
/* mesh power save */
enum nl80211_mesh_power_mode nonpeer_pm;
int ps_peers_light_sleep;
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 44388d6a1d8e..4a72c0d1e56f 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -4,6 +4,7 @@
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2007-2008 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
+ * Copyright 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -18,6 +19,7 @@
#include <linux/slab.h>
#include <linux/export.h>
#include <net/mac80211.h>
+#include <crypto/algapi.h>
#include <asm/unaligned.h>
#include "ieee80211_i.h"
#include "driver-ops.h"
@@ -606,6 +608,39 @@ void ieee80211_key_free_unused(struct ieee80211_key *key)
ieee80211_key_free_common(key);
}
+static bool ieee80211_key_identical(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_key *old,
+ struct ieee80211_key *new)
+{
+ u8 tkip_old[WLAN_KEY_LEN_TKIP], tkip_new[WLAN_KEY_LEN_TKIP];
+ u8 *tk_old, *tk_new;
+
+ if (!old || new->conf.keylen != old->conf.keylen)
+ return false;
+
+ tk_old = old->conf.key;
+ tk_new = new->conf.key;
+
+ /*
+ * In station mode, don't compare the TX MIC key, as it's never used
+ * and offloaded rekeying may not care to send it to the host. This
+ * is the case in iwlwifi, for example.
+ */
+ if (sdata->vif.type == NL80211_IFTYPE_STATION &&
+ new->conf.cipher == WLAN_CIPHER_SUITE_TKIP &&
+ new->conf.keylen == WLAN_KEY_LEN_TKIP &&
+ !(new->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
+ memcpy(tkip_old, tk_old, WLAN_KEY_LEN_TKIP);
+ memcpy(tkip_new, tk_new, WLAN_KEY_LEN_TKIP);
+ memset(tkip_old + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY, 0, 8);
+ memset(tkip_new + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY, 0, 8);
+ tk_old = tkip_old;
+ tk_new = tkip_new;
+ }
+
+ return !crypto_memneq(tk_old, tk_new, new->conf.keylen);
+}
+
int ieee80211_key_link(struct ieee80211_key *key,
struct ieee80211_sub_if_data *sdata,
struct sta_info *sta)
@@ -617,9 +652,6 @@ int ieee80211_key_link(struct ieee80211_key *key,
pairwise = key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE;
idx = key->conf.keyidx;
- key->local = sdata->local;
- key->sdata = sdata;
- key->sta = sta;
mutex_lock(&sdata->local->key_mtx);
@@ -630,6 +662,20 @@ int ieee80211_key_link(struct ieee80211_key *key,
else
old_key = key_mtx_dereference(sdata->local, sdata->keys[idx]);
+ /*
+ * Silently accept key re-installation without really installing the
+ * new version of the key to avoid nonce reuse or replay issues.
+ */
+ if (ieee80211_key_identical(sdata, old_key, key)) {
+ ieee80211_key_free_unused(key);
+ ret = 0;
+ goto out;
+ }
+
+ key->local = sdata->local;
+ key->sdata = sdata;
+ key->sta = sta;
+
increment_tailroom_need_count(sdata);
ieee80211_key_replace(sdata, sta, pairwise, old_key, key);
@@ -645,6 +691,7 @@ int ieee80211_key_link(struct ieee80211_key *key,
ret = 0;
}
+ out:
mutex_unlock(&sdata->local->key_mtx);
return ret;
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 9063e8e736ad..9e1ded80a992 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -295,8 +295,6 @@ int mesh_add_meshconf_ie(struct ieee80211_sub_if_data *sdata,
/* Mesh PS mode. See IEEE802.11-2012 8.4.2.100.8 */
*pos |= ifmsh->ps_peers_deep_sleep ?
IEEE80211_MESHCONF_CAPAB_POWER_SAVE_LEVEL : 0x00;
- *pos++ |= ifmsh->adjusting_tbtt ?
- IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING : 0x00;
*pos++ = 0x00;
return 0;
@@ -866,7 +864,6 @@ int ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata)
ifmsh->mesh_cc_id = 0; /* Disabled */
/* register sync ops from extensible synchronization framework */
ifmsh->sync_ops = ieee80211_mesh_sync_ops_get(ifmsh->mesh_sp_id);
- ifmsh->adjusting_tbtt = false;
ifmsh->sync_offset_clockdrift_max = 0;
set_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags);
ieee80211_mesh_root_setup(ifmsh);
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index bd3d55eb21d4..9f02e54ad2a5 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -495,12 +495,14 @@ mesh_sta_info_alloc(struct ieee80211_sub_if_data *sdata, u8 *addr,
/* Userspace handles station allocation */
if (sdata->u.mesh.user_mpm ||
- sdata->u.mesh.security & IEEE80211_MESH_SEC_AUTHED)
- cfg80211_notify_new_peer_candidate(sdata->dev, addr,
- elems->ie_start,
- elems->total_len,
- GFP_KERNEL);
- else
+ sdata->u.mesh.security & IEEE80211_MESH_SEC_AUTHED) {
+ if (mesh_peer_accepts_plinks(elems) &&
+ mesh_plink_availables(sdata))
+ cfg80211_notify_new_peer_candidate(sdata->dev, addr,
+ elems->ie_start,
+ elems->total_len,
+ GFP_KERNEL);
+ } else
sta = __mesh_sta_info_alloc(sdata, addr);
return sta;
diff --git a/net/mac80211/mesh_sync.c b/net/mac80211/mesh_sync.c
index 64bc22ad9496..16ed43fe4841 100644
--- a/net/mac80211/mesh_sync.c
+++ b/net/mac80211/mesh_sync.c
@@ -119,7 +119,6 @@ static void mesh_sync_offset_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
*/
if (elems->mesh_config && mesh_peer_tbtt_adjusting(elems)) {
- clear_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN);
msync_dbg(sdata, "STA %pM : is adjusting TBTT\n",
sta->sta.addr);
goto no_sync;
@@ -168,11 +167,9 @@ static void mesh_sync_offset_adjust_tbtt(struct ieee80211_sub_if_data *sdata,
struct beacon_data *beacon)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
- u8 cap;
WARN_ON(ifmsh->mesh_sp_id != IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET);
WARN_ON(!rcu_read_lock_held());
- cap = beacon->meshconf->meshconf_cap;
spin_lock_bh(&ifmsh->sync_offset_lock);
@@ -186,21 +183,13 @@ static void mesh_sync_offset_adjust_tbtt(struct ieee80211_sub_if_data *sdata,
"TBTT : kicking off TBTT adjustment with clockdrift_max=%lld\n",
ifmsh->sync_offset_clockdrift_max);
set_bit(MESH_WORK_DRIFT_ADJUST, &ifmsh->wrkq_flags);
-
- ifmsh->adjusting_tbtt = true;
} else {
msync_dbg(sdata,
"TBTT : max clockdrift=%lld; too small to adjust\n",
(long long)ifmsh->sync_offset_clockdrift_max);
ifmsh->sync_offset_clockdrift_max = 0;
-
- ifmsh->adjusting_tbtt = false;
}
spin_unlock_bh(&ifmsh->sync_offset_lock);
-
- beacon->meshconf->meshconf_cap = ifmsh->adjusting_tbtt ?
- IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING | cap :
- ~IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING & cap;
}
static const struct sync_method sync_methods[] = {
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 2cb429d34c03..120e9ae04db3 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -1996,7 +1996,7 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx,
* is called on error from nf_tables_newrule().
*/
expr = nft_expr_first(rule);
- while (expr->ops && expr != nft_expr_last(rule)) {
+ while (expr != nft_expr_last(rule) && expr->ops) {
nf_tables_expr_destroy(ctx, expr);
expr = nft_expr_next(expr);
}
diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
index 9dfaf4d55ee0..a97a5bf716be 100644
--- a/net/netfilter/nft_meta.c
+++ b/net/netfilter/nft_meta.c
@@ -151,8 +151,34 @@ void nft_meta_get_eval(const struct nft_expr *expr,
else
*dest = PACKET_BROADCAST;
break;
+ case NFPROTO_NETDEV:
+ switch (skb->protocol) {
+ case htons(ETH_P_IP): {
+ int noff = skb_network_offset(skb);
+ struct iphdr *iph, _iph;
+
+ iph = skb_header_pointer(skb, noff,
+ sizeof(_iph), &_iph);
+ if (!iph)
+ goto err;
+
+ if (ipv4_is_multicast(iph->daddr))
+ *dest = PACKET_MULTICAST;
+ else
+ *dest = PACKET_BROADCAST;
+
+ break;
+ }
+ case htons(ETH_P_IPV6):
+ *dest = PACKET_MULTICAST;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ goto err;
+ }
+ break;
default:
- WARN_ON(1);
+ WARN_ON_ONCE(1);
goto err;
}
break;
diff --git a/net/netfilter/nft_queue.c b/net/netfilter/nft_queue.c
index 61d216eb7917..5d189c11d208 100644
--- a/net/netfilter/nft_queue.c
+++ b/net/netfilter/nft_queue.c
@@ -37,7 +37,7 @@ static void nft_queue_eval(const struct nft_expr *expr,
if (priv->queues_total > 1) {
if (priv->flags & NFT_QUEUE_FLAG_CPU_FANOUT) {
- int cpu = smp_processor_id();
+ int cpu = raw_smp_processor_id();
queue = priv->queuenum + cpu % priv->queues_total;
} else {
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 2141d047301d..862e088905cc 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -2057,7 +2057,7 @@ static int netlink_dump(struct sock *sk)
struct sk_buff *skb = NULL;
struct nlmsghdr *nlh;
struct module *module;
- int len, err = -ENOBUFS;
+ int err = -ENOBUFS;
int alloc_min_size;
int alloc_size;
@@ -2105,9 +2105,11 @@ static int netlink_dump(struct sock *sk)
skb_reserve(skb, skb_tailroom(skb) - alloc_size);
netlink_skb_set_owner_r(skb, sk);
- len = cb->dump(skb, cb);
+ if (nlk->dump_done_errno > 0)
+ nlk->dump_done_errno = cb->dump(skb, cb);
- if (len > 0) {
+ if (nlk->dump_done_errno > 0 ||
+ skb_tailroom(skb) < nlmsg_total_size(sizeof(nlk->dump_done_errno))) {
mutex_unlock(nlk->cb_mutex);
if (sk_filter(sk, skb))
@@ -2117,13 +2119,15 @@ static int netlink_dump(struct sock *sk)
return 0;
}
- nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI);
- if (!nlh)
+ nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE,
+ sizeof(nlk->dump_done_errno), NLM_F_MULTI);
+ if (WARN_ON(!nlh))
goto errout_skb;
nl_dump_check_consistent(cb, nlh);
- memcpy(nlmsg_data(nlh), &len, sizeof(len));
+ memcpy(nlmsg_data(nlh), &nlk->dump_done_errno,
+ sizeof(nlk->dump_done_errno));
if (sk_filter(sk, skb))
kfree_skb(skb);
@@ -2179,6 +2183,7 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
cb = &nlk->cb;
memset(cb, 0, sizeof(*cb));
+ cb->start = control->start;
cb->dump = control->dump;
cb->done = control->done;
cb->nlh = nlh;
@@ -2188,9 +2193,13 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
cb->skb = skb;
nlk->cb_running = true;
+ nlk->dump_done_errno = INT_MAX;
mutex_unlock(nlk->cb_mutex);
+ if (cb->start)
+ cb->start(cb);
+
ret = netlink_dump(sk);
sock_put(sk);
diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h
index 15e62973cfc6..4de7e97d8fb2 100644
--- a/net/netlink/af_netlink.h
+++ b/net/netlink/af_netlink.h
@@ -38,6 +38,7 @@ struct netlink_sock {
wait_queue_head_t wait;
bool bound;
bool cb_running;
+ int dump_done_errno;
struct netlink_callback cb;
struct mutex *cb_mutex;
struct mutex cb_def_mutex;
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 80649934cf3b..b2cde0e09809 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -513,6 +513,20 @@ void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
}
EXPORT_SYMBOL(genlmsg_put);
+static int genl_lock_start(struct netlink_callback *cb)
+{
+ /* our ops are always const - netlink API doesn't propagate that */
+ const struct genl_ops *ops = cb->data;
+ int rc = 0;
+
+ if (ops->start) {
+ genl_lock();
+ rc = ops->start(cb);
+ genl_unlock();
+ }
+ return rc;
+}
+
static int genl_lock_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
{
/* our ops are always const - netlink API doesn't propagate that */
@@ -577,6 +591,7 @@ static int genl_family_rcv_msg(struct genl_family *family,
.module = family->module,
/* we have const, but the netlink API doesn't */
.data = (void *)ops,
+ .start = genl_lock_start,
.dump = genl_lock_dumpit,
.done = genl_lock_done,
};
@@ -588,6 +603,7 @@ static int genl_family_rcv_msg(struct genl_family *family,
} else {
struct netlink_dump_control c = {
.module = family->module,
+ .start = ops->start,
.dump = ops->dumpit,
.done = ops->done,
};
diff --git a/net/nfc/core.c b/net/nfc/core.c
index c5a2c7e733b3..1471e4b0aa2c 100644
--- a/net/nfc/core.c
+++ b/net/nfc/core.c
@@ -1093,7 +1093,7 @@ struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops,
err_free_dev:
kfree(dev);
- return ERR_PTR(rc);
+ return NULL;
}
EXPORT_SYMBOL(nfc_allocate_device);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 241f69039a72..1584f89c456a 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1724,7 +1724,7 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
out:
if (err && rollover) {
- kfree(rollover);
+ kfree_rcu(rollover, rcu);
po->rollover = NULL;
}
mutex_unlock(&fanout_mutex);
@@ -1751,8 +1751,10 @@ static struct packet_fanout *fanout_release(struct sock *sk)
else
f = NULL;
- if (po->rollover)
+ if (po->rollover) {
kfree_rcu(po->rollover, rcu);
+ po->rollover = NULL;
+ }
}
mutex_unlock(&fanout_mutex);
@@ -3769,6 +3771,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
void *data = &val;
union tpacket_stats_u st;
struct tpacket_rollover_stats rstats;
+ struct packet_rollover *rollover;
if (level != SOL_PACKET)
return -ENOPROTOOPT;
@@ -3847,13 +3850,18 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
0);
break;
case PACKET_ROLLOVER_STATS:
- if (!po->rollover)
+ rcu_read_lock();
+ rollover = rcu_dereference(po->rollover);
+ if (rollover) {
+ rstats.tp_all = atomic_long_read(&rollover->num);
+ rstats.tp_huge = atomic_long_read(&rollover->num_huge);
+ rstats.tp_failed = atomic_long_read(&rollover->num_failed);
+ data = &rstats;
+ lv = sizeof(rstats);
+ }
+ rcu_read_unlock();
+ if (!rollover)
return -EINVAL;
- rstats.tp_all = atomic_long_read(&po->rollover->num);
- rstats.tp_huge = atomic_long_read(&po->rollover->num_huge);
- rstats.tp_failed = atomic_long_read(&po->rollover->num_failed);
- data = &rstats;
- lv = sizeof(rstats);
break;
case PACKET_TX_HAS_OFF:
val = po->tp_tx_has_off;
diff --git a/net/rds/send.c b/net/rds/send.c
index 6815f03324d7..1a3c6acdd3f8 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -959,6 +959,11 @@ static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
ret = rds_cmsg_rdma_map(rs, rm, cmsg);
if (!ret)
*allocated_mr = 1;
+ else if (ret == -ENODEV)
+ /* Accommodate the get_mr() case which can fail
+ * if connection isn't established yet.
+ */
+ ret = -EAGAIN;
break;
case RDS_CMSG_ATOMIC_CSWP:
case RDS_CMSG_ATOMIC_FADD:
@@ -1072,8 +1077,12 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
/* Parse any control messages the user may have included. */
ret = rds_cmsg_send(rs, rm, msg, &allocated_mr);
- if (ret)
+ if (ret) {
+ /* Trigger connection so that its ready for the next retry */
+ if (ret == -EAGAIN)
+ rds_conn_connect_if_down(conn);
goto out;
+ }
if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) {
printk_ratelimited(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
diff --git a/net/sctp/debug.c b/net/sctp/debug.c
index 95d7b15dad21..e371a0d90068 100644
--- a/net/sctp/debug.c
+++ b/net/sctp/debug.c
@@ -166,7 +166,7 @@ static const char *const sctp_timer_tbl[] = {
/* Lookup timer debug name. */
const char *sctp_tname(const sctp_subtype_t id)
{
- if (id.timeout <= SCTP_EVENT_TIMEOUT_MAX)
+ if (id.timeout < ARRAY_SIZE(sctp_timer_tbl))
return sctp_timer_tbl[id.timeout];
return "unknown_timer";
}
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 2d7859c03fd2..71c2ef84c5b0 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -420,7 +420,7 @@ void sctp_icmp_redirect(struct sock *sk, struct sctp_transport *t,
{
struct dst_entry *dst;
- if (!t)
+ if (sock_owned_by_user(sk) || !t)
return;
dst = sctp_transport_dst_check(t);
if (dst)
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index e33e9bd4ed5a..8a61ccc37e12 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -806,6 +806,8 @@ static void sctp_inet6_skb_msgname(struct sk_buff *skb, char *msgname,
if (ipv6_addr_type(&addr->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) {
struct sctp_ulpevent *ev = sctp_skb2event(skb);
addr->v6.sin6_scope_id = ev->iif;
+ } else {
+ addr->v6.sin6_scope_id = 0;
}
}
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 3ebf3b652d60..7f0f689b8d2b 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -168,6 +168,36 @@ static inline void sctp_set_owner_w(struct sctp_chunk *chunk)
sk_mem_charge(sk, chunk->skb->truesize);
}
+static void sctp_clear_owner_w(struct sctp_chunk *chunk)
+{
+ skb_orphan(chunk->skb);
+}
+
+static void sctp_for_each_tx_datachunk(struct sctp_association *asoc,
+ void (*cb)(struct sctp_chunk *))
+
+{
+ struct sctp_outq *q = &asoc->outqueue;
+ struct sctp_transport *t;
+ struct sctp_chunk *chunk;
+
+ list_for_each_entry(t, &asoc->peer.transport_addr_list, transports)
+ list_for_each_entry(chunk, &t->transmitted, transmitted_list)
+ cb(chunk);
+
+ list_for_each_entry(chunk, &q->retransmit, list)
+ cb(chunk);
+
+ list_for_each_entry(chunk, &q->sacked, list)
+ cb(chunk);
+
+ list_for_each_entry(chunk, &q->abandoned, list)
+ cb(chunk);
+
+ list_for_each_entry(chunk, &q->out_chunk_list, list)
+ cb(chunk);
+}
+
/* Verify that this is a valid address. */
static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr,
int len)
@@ -4423,6 +4453,10 @@ int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp)
struct socket *sock;
int err = 0;
+ /* Do not peel off from one netns to another one. */
+ if (!net_eq(current->nsproxy->net_ns, sock_net(sk)))
+ return -EINVAL;
+
if (!asoc)
return -EINVAL;
@@ -7362,7 +7396,9 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
* paths won't try to lock it and then oldsk.
*/
lock_sock_nested(newsk, SINGLE_DEPTH_NESTING);
+ sctp_for_each_tx_datachunk(assoc, sctp_clear_owner_w);
sctp_assoc_migrate(assoc, newsk);
+ sctp_for_each_tx_datachunk(assoc, sctp_set_owner_w);
/* If the association on the newsk is already closed before accept()
* is called, set RCV_SHUTDOWN flag.
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 72268eac4ec7..736fffb28ab6 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -1084,25 +1084,6 @@ drop:
return rc;
}
-/*
- * Send protocol message to the other endpoint.
- */
-void tipc_link_proto_xmit(struct tipc_link *l, u32 msg_typ, int probe_msg,
- u32 gap, u32 tolerance, u32 priority)
-{
- struct sk_buff *skb = NULL;
- struct sk_buff_head xmitq;
-
- __skb_queue_head_init(&xmitq);
- tipc_link_build_proto_msg(l, msg_typ, probe_msg, gap,
- tolerance, priority, &xmitq);
- skb = __skb_dequeue(&xmitq);
- if (!skb)
- return;
- tipc_bearer_xmit_skb(l->net, l->bearer_id, skb, l->media_addr);
- l->rcv_unacked = 0;
-}
-
static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
u16 rcvgap, int tolerance, int priority,
struct sk_buff_head *xmitq)
@@ -1636,9 +1617,12 @@ int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info)
char *name;
struct tipc_link *link;
struct tipc_node *node;
+ struct sk_buff_head xmitq;
struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
struct net *net = sock_net(skb->sk);
+ __skb_queue_head_init(&xmitq);
+
if (!info->attrs[TIPC_NLA_LINK])
return -EINVAL;
@@ -1683,14 +1667,14 @@ int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info)
tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
link->tolerance = tol;
- tipc_link_proto_xmit(link, STATE_MSG, 0, 0, tol, 0);
+ tipc_link_build_proto_msg(link, STATE_MSG, 0, 0, tol, 0, &xmitq);
}
if (props[TIPC_NLA_PROP_PRIO]) {
u32 prio;
prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
link->priority = prio;
- tipc_link_proto_xmit(link, STATE_MSG, 0, 0, 0, prio);
+ tipc_link_build_proto_msg(link, STATE_MSG, 0, 0, 0, prio, &xmitq);
}
if (props[TIPC_NLA_PROP_WIN]) {
u32 win;
@@ -1702,7 +1686,7 @@ int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info)
out:
tipc_node_unlock(node);
-
+ tipc_bearer_xmit(net, bearer_id, &xmitq, &node->links[bearer_id].maddr);
return res;
}
diff --git a/net/tipc/link.h b/net/tipc/link.h
index 66d859b66c84..2a0d58671e88 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -153,7 +153,6 @@ struct tipc_stats {
struct tipc_link {
u32 addr;
char name[TIPC_MAX_LINK_NAME];
- struct tipc_media_addr *media_addr;
struct net *net;
/* Management and link supervision data */
diff --git a/net/tipc/server.c b/net/tipc/server.c
index 50f5b0ca7b3c..c416e5184a3f 100644
--- a/net/tipc/server.c
+++ b/net/tipc/server.c
@@ -618,14 +618,12 @@ int tipc_server_start(struct tipc_server *s)
void tipc_server_stop(struct tipc_server *s)
{
struct tipc_conn *con;
- int total = 0;
int id;
spin_lock_bh(&s->idr_lock);
- for (id = 0; total < s->idr_in_use; id++) {
+ for (id = 0; s->idr_in_use; id++) {
con = idr_find(&s->conn_idr, id);
if (con) {
- total++;
spin_unlock_bh(&s->idr_lock);
tipc_close_conn(con);
spin_lock_bh(&s->idr_lock);
diff --git a/net/unix/diag.c b/net/unix/diag.c
index 4d9679701a6d..384c84e83462 100644
--- a/net/unix/diag.c
+++ b/net/unix/diag.c
@@ -257,6 +257,8 @@ static int unix_diag_get_exact(struct sk_buff *in_skb,
err = -ENOENT;
if (sk == NULL)
goto out_nosk;
+ if (!net_eq(sock_net(sk), net))
+ goto out;
err = sock_diag_check_cookie(sk, req->udiag_cookie);
if (err)
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 9b5bd6d142dc..60324f7c72bd 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -1209,10 +1209,14 @@ static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr,
if (signal_pending(current)) {
err = sock_intr_errno(timeout);
- goto out_wait_error;
+ sk->sk_state = SS_UNCONNECTED;
+ sock->state = SS_UNCONNECTED;
+ goto out_wait;
} else if (timeout == 0) {
err = -ETIMEDOUT;
- goto out_wait_error;
+ sk->sk_state = SS_UNCONNECTED;
+ sock->state = SS_UNCONNECTED;
+ goto out_wait;
}
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
@@ -1220,20 +1224,17 @@ static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr,
if (sk->sk_err) {
err = -sk->sk_err;
- goto out_wait_error;
- } else
+ sk->sk_state = SS_UNCONNECTED;
+ sock->state = SS_UNCONNECTED;
+ } else {
err = 0;
+ }
out_wait:
finish_wait(sk_sleep(sk), &wait);
out:
release_sock(sk);
return err;
-
-out_wait_error:
- sk->sk_state = SS_UNCONNECTED;
- sock->state = SS_UNCONNECTED;
- goto out_wait;
}
static int vsock_accept(struct socket *sock, struct socket *newsock, int flags)
@@ -1270,18 +1271,20 @@ static int vsock_accept(struct socket *sock, struct socket *newsock, int flags)
listener->sk_err == 0) {
release_sock(listener);
timeout = schedule_timeout(timeout);
+ finish_wait(sk_sleep(listener), &wait);
lock_sock(listener);
if (signal_pending(current)) {
err = sock_intr_errno(timeout);
- goto out_wait;
+ goto out;
} else if (timeout == 0) {
err = -EAGAIN;
- goto out_wait;
+ goto out;
}
prepare_to_wait(sk_sleep(listener), &wait, TASK_INTERRUPTIBLE);
}
+ finish_wait(sk_sleep(listener), &wait);
if (listener->sk_err)
err = -listener->sk_err;
@@ -1301,19 +1304,15 @@ static int vsock_accept(struct socket *sock, struct socket *newsock, int flags)
*/
if (err) {
vconnected->rejected = true;
- release_sock(connected);
- sock_put(connected);
- goto out_wait;
+ } else {
+ newsock->state = SS_CONNECTED;
+ sock_graft(connected, newsock);
}
- newsock->state = SS_CONNECTED;
- sock_graft(connected, newsock);
release_sock(connected);
sock_put(connected);
}
-out_wait:
- finish_wait(sk_sleep(listener), &wait);
out:
release_sock(listener);
return err;
@@ -1513,8 +1512,7 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg,
long timeout;
int err;
struct vsock_transport_send_notify_data send_data;
-
- DEFINE_WAIT(wait);
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
sk = sock->sk;
vsk = vsock_sk(sk);
@@ -1557,11 +1555,10 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg,
if (err < 0)
goto out;
- prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
-
while (total_written < len) {
ssize_t written;
+ add_wait_queue(sk_sleep(sk), &wait);
while (vsock_stream_has_space(vsk) == 0 &&
sk->sk_err == 0 &&
!(sk->sk_shutdown & SEND_SHUTDOWN) &&
@@ -1570,27 +1567,30 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg,
/* Don't wait for non-blocking sockets. */
if (timeout == 0) {
err = -EAGAIN;
- goto out_wait;
+ remove_wait_queue(sk_sleep(sk), &wait);
+ goto out_err;
}
err = transport->notify_send_pre_block(vsk, &send_data);
- if (err < 0)
- goto out_wait;
+ if (err < 0) {
+ remove_wait_queue(sk_sleep(sk), &wait);
+ goto out_err;
+ }
release_sock(sk);
- timeout = schedule_timeout(timeout);
+ timeout = wait_woken(&wait, TASK_INTERRUPTIBLE, timeout);
lock_sock(sk);
if (signal_pending(current)) {
err = sock_intr_errno(timeout);
- goto out_wait;
+ remove_wait_queue(sk_sleep(sk), &wait);
+ goto out_err;
} else if (timeout == 0) {
err = -EAGAIN;
- goto out_wait;
+ remove_wait_queue(sk_sleep(sk), &wait);
+ goto out_err;
}
-
- prepare_to_wait(sk_sleep(sk), &wait,
- TASK_INTERRUPTIBLE);
}
+ remove_wait_queue(sk_sleep(sk), &wait);
/* These checks occur both as part of and after the loop
* conditional since we need to check before and after
@@ -1598,16 +1598,16 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg,
*/
if (sk->sk_err) {
err = -sk->sk_err;
- goto out_wait;
+ goto out_err;
} else if ((sk->sk_shutdown & SEND_SHUTDOWN) ||
(vsk->peer_shutdown & RCV_SHUTDOWN)) {
err = -EPIPE;
- goto out_wait;
+ goto out_err;
}
err = transport->notify_send_pre_enqueue(vsk, &send_data);
if (err < 0)
- goto out_wait;
+ goto out_err;
/* Note that enqueue will only write as many bytes as are free
* in the produce queue, so we don't need to ensure len is
@@ -1620,7 +1620,7 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg,
len - total_written);
if (written < 0) {
err = -ENOMEM;
- goto out_wait;
+ goto out_err;
}
total_written += written;
@@ -1628,14 +1628,13 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg,
err = transport->notify_send_post_enqueue(
vsk, written, &send_data);
if (err < 0)
- goto out_wait;
+ goto out_err;
}
-out_wait:
+out_err:
if (total_written > 0)
err = total_written;
- finish_wait(sk_sleep(sk), &wait);
out:
release_sock(sk);
return err;
@@ -1716,21 +1715,61 @@ vsock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
if (err < 0)
goto out;
- prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
while (1) {
- s64 ready = vsock_stream_has_data(vsk);
+ s64 ready;
- if (ready < 0) {
- /* Invalid queue pair content. XXX This should be
- * changed to a connection reset in a later change.
- */
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
+ ready = vsock_stream_has_data(vsk);
- err = -ENOMEM;
- goto out_wait;
- } else if (ready > 0) {
+ if (ready == 0) {
+ if (sk->sk_err != 0 ||
+ (sk->sk_shutdown & RCV_SHUTDOWN) ||
+ (vsk->peer_shutdown & SEND_SHUTDOWN)) {
+ finish_wait(sk_sleep(sk), &wait);
+ break;
+ }
+ /* Don't wait for non-blocking sockets. */
+ if (timeout == 0) {
+ err = -EAGAIN;
+ finish_wait(sk_sleep(sk), &wait);
+ break;
+ }
+
+ err = transport->notify_recv_pre_block(
+ vsk, target, &recv_data);
+ if (err < 0) {
+ finish_wait(sk_sleep(sk), &wait);
+ break;
+ }
+ release_sock(sk);
+ timeout = schedule_timeout(timeout);
+ lock_sock(sk);
+
+ if (signal_pending(current)) {
+ err = sock_intr_errno(timeout);
+ finish_wait(sk_sleep(sk), &wait);
+ break;
+ } else if (timeout == 0) {
+ err = -EAGAIN;
+ finish_wait(sk_sleep(sk), &wait);
+ break;
+ }
+ } else {
ssize_t read;
+ finish_wait(sk_sleep(sk), &wait);
+
+ if (ready < 0) {
+ /* Invalid queue pair content. XXX This should
+ * be changed to a connection reset in a later
+ * change.
+ */
+
+ err = -ENOMEM;
+ goto out;
+ }
+
err = transport->notify_recv_pre_dequeue(
vsk, target, &recv_data);
if (err < 0)
@@ -1750,42 +1789,12 @@ vsock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
vsk, target, read,
!(flags & MSG_PEEK), &recv_data);
if (err < 0)
- goto out_wait;
+ goto out;
if (read >= target || flags & MSG_PEEK)
break;
target -= read;
- } else {
- if (sk->sk_err != 0 || (sk->sk_shutdown & RCV_SHUTDOWN)
- || (vsk->peer_shutdown & SEND_SHUTDOWN)) {
- break;
- }
- /* Don't wait for non-blocking sockets. */
- if (timeout == 0) {
- err = -EAGAIN;
- break;
- }
-
- err = transport->notify_recv_pre_block(
- vsk, target, &recv_data);
- if (err < 0)
- break;
-
- release_sock(sk);
- timeout = schedule_timeout(timeout);
- lock_sock(sk);
-
- if (signal_pending(current)) {
- err = sock_intr_errno(timeout);
- break;
- } else if (timeout == 0) {
- err = -EAGAIN;
- break;
- }
-
- prepare_to_wait(sk_sleep(sk), &wait,
- TASK_INTERRUPTIBLE);
}
}
@@ -1797,8 +1806,6 @@ vsock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
if (copied > 0)
err = copied;
-out_wait:
- finish_wait(sk_sleep(sk), &wait);
out:
release_sock(sk);
return err;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 4096f699ba00..5b3e5f54c79e 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1307,7 +1307,7 @@ EXPORT_SYMBOL(xfrm_policy_delete);
int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
{
- struct net *net = xp_net(pol);
+ struct net *net = sock_net(sk);
struct xfrm_policy *old_pol;
#ifdef CONFIG_XFRM_SUB_POLICY
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 9895a8c56d8c..7944daeb7378 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -1845,6 +1845,13 @@ int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen
struct xfrm_mgr *km;
struct xfrm_policy *pol = NULL;
+ if (!optval && !optlen) {
+ xfrm_sk_policy_insert(sk, XFRM_POLICY_IN, NULL);
+ xfrm_sk_policy_insert(sk, XFRM_POLICY_OUT, NULL);
+ __sk_dst_reset(sk);
+ return 0;
+ }
+
if (optlen <= 0 || optlen > PAGE_SIZE)
return -EMSGSIZE;
@@ -1869,6 +1876,7 @@ int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen
if (err >= 0) {
xfrm_sk_policy_insert(sk, err, pol);
xfrm_pol_put(pol);
+ __sk_dst_reset(sk);
err = 0;
}
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 68010a01ea36..8b71b09e5ab6 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -1660,32 +1660,34 @@ static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr
static int xfrm_dump_policy_done(struct netlink_callback *cb)
{
- struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1];
+ struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args;
struct net *net = sock_net(cb->skb->sk);
xfrm_policy_walk_done(walk, net);
return 0;
}
+static int xfrm_dump_policy_start(struct netlink_callback *cb)
+{
+ struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args;
+
+ BUILD_BUG_ON(sizeof(*walk) > sizeof(cb->args));
+
+ xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY);
+ return 0;
+}
+
static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
- struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1];
+ struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args;
struct xfrm_dump_info info;
- BUILD_BUG_ON(sizeof(struct xfrm_policy_walk) >
- sizeof(cb->args) - sizeof(cb->args[0]));
-
info.in_skb = cb->skb;
info.out_skb = skb;
info.nlmsg_seq = cb->nlh->nlmsg_seq;
info.nlmsg_flags = NLM_F_MULTI;
- if (!cb->args[0]) {
- cb->args[0] = 1;
- xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY);
- }
-
(void) xfrm_policy_walk(net, walk, dump_one_policy, &info);
return skb->len;
@@ -2437,6 +2439,7 @@ static const struct nla_policy xfrma_spd_policy[XFRMA_SPD_MAX+1] = {
static const struct xfrm_link {
int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **);
+ int (*start)(struct netlink_callback *);
int (*dump)(struct sk_buff *, struct netlink_callback *);
int (*done)(struct netlink_callback *);
const struct nla_policy *nla_pol;
@@ -2450,6 +2453,7 @@ static const struct xfrm_link {
[XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy },
[XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy },
[XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy,
+ .start = xfrm_dump_policy_start,
.dump = xfrm_dump_policy,
.done = xfrm_dump_policy_done },
[XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi },
@@ -2501,6 +2505,7 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
{
struct netlink_dump_control c = {
+ .start = link->start,
.dump = link->dump,
.done = link->done,
};
diff --git a/samples/trace_events/trace-events-sample.c b/samples/trace_events/trace-events-sample.c
index 880a7d1d27d2..4ccff66523c9 100644
--- a/samples/trace_events/trace-events-sample.c
+++ b/samples/trace_events/trace-events-sample.c
@@ -78,28 +78,36 @@ static int simple_thread_fn(void *arg)
}
static DEFINE_MUTEX(thread_mutex);
+static int simple_thread_cnt;
void foo_bar_reg(void)
{
+ mutex_lock(&thread_mutex);
+ if (simple_thread_cnt++)
+ goto out;
+
pr_info("Starting thread for foo_bar_fn\n");
/*
* We shouldn't be able to start a trace when the module is
* unloading (there's other locks to prevent that). But
* for consistency sake, we still take the thread_mutex.
*/
- mutex_lock(&thread_mutex);
simple_tsk_fn = kthread_run(simple_thread_fn, NULL, "event-sample-fn");
+ out:
mutex_unlock(&thread_mutex);
}
void foo_bar_unreg(void)
{
- pr_info("Killing thread for foo_bar_fn\n");
- /* protect against module unloading */
mutex_lock(&thread_mutex);
+ if (--simple_thread_cnt)
+ goto out;
+
+ pr_info("Killing thread for foo_bar_fn\n");
if (simple_tsk_fn)
kthread_stop(simple_tsk_fn);
simple_tsk_fn = NULL;
+ out:
mutex_unlock(&thread_mutex);
}
diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c
index 9ce9d5003dcc..19014293f927 100644
--- a/security/integrity/ima/ima_appraise.c
+++ b/security/integrity/ima/ima_appraise.c
@@ -297,6 +297,9 @@ void ima_update_xattr(struct integrity_iint_cache *iint, struct file *file)
if (iint->flags & IMA_DIGSIG)
return;
+ if (iint->ima_file_status != INTEGRITY_PASS)
+ return;
+
rc = ima_collect_measurement(iint, file, NULL, NULL);
if (rc < 0)
return;
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index c21f09bf8b99..98289ba2a2e6 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -52,6 +52,8 @@ static int __init hash_setup(char *str)
ima_hash_algo = HASH_ALGO_SHA1;
else if (strncmp(str, "md5", 3) == 0)
ima_hash_algo = HASH_ALGO_MD5;
+ else
+ return 1;
goto out;
}
@@ -61,6 +63,8 @@ static int __init hash_setup(char *str)
break;
}
}
+ if (i == HASH_ALGO__LAST)
+ return 1;
out:
hash_setup_done = 1;
return 1;
diff --git a/security/keys/Kconfig b/security/keys/Kconfig
index 72483b8f1be5..1edb37eea81d 100644
--- a/security/keys/Kconfig
+++ b/security/keys/Kconfig
@@ -20,6 +20,10 @@ config KEYS
If you are unsure as to whether this is required, answer N.
+config KEYS_COMPAT
+ def_bool y
+ depends on COMPAT && KEYS
+
config PERSISTENT_KEYRINGS
bool "Enable register of persistent per-UID keyrings"
depends on KEYS
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index ef828238cdc0..d5264f950ce1 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -452,34 +452,33 @@ static long keyring_read(const struct key *keyring,
char __user *buffer, size_t buflen)
{
struct keyring_read_iterator_context ctx;
- unsigned long nr_keys;
- int ret;
+ long ret;
kenter("{%d},,%zu", key_serial(keyring), buflen);
if (buflen & (sizeof(key_serial_t) - 1))
return -EINVAL;
- nr_keys = keyring->keys.nr_leaves_on_tree;
- if (nr_keys == 0)
- return 0;
-
- /* Calculate how much data we could return */
- if (!buffer || !buflen)
- return nr_keys * sizeof(key_serial_t);
-
- /* Copy the IDs of the subscribed keys into the buffer */
- ctx.buffer = (key_serial_t __user *)buffer;
- ctx.buflen = buflen;
- ctx.count = 0;
- ret = assoc_array_iterate(&keyring->keys, keyring_read_iterator, &ctx);
- if (ret < 0) {
- kleave(" = %d [iterate]", ret);
- return ret;
+ /* Copy as many key IDs as fit into the buffer */
+ if (buffer && buflen) {
+ ctx.buffer = (key_serial_t __user *)buffer;
+ ctx.buflen = buflen;
+ ctx.count = 0;
+ ret = assoc_array_iterate(&keyring->keys,
+ keyring_read_iterator, &ctx);
+ if (ret < 0) {
+ kleave(" = %ld [iterate]", ret);
+ return ret;
+ }
}
- kleave(" = %zu [ok]", ctx.count);
- return ctx.count;
+ /* Return the size of the buffer needed */
+ ret = keyring->keys.nr_leaves_on_tree * sizeof(key_serial_t);
+ if (ret <= buflen)
+ kleave("= %ld [ok]", ret);
+ else
+ kleave("= %ld [buffer too small]", ret);
+ return ret;
}
/*
diff --git a/security/keys/trusted.c b/security/keys/trusted.c
index 509aedcf8310..214ae2dc7f64 100644
--- a/security/keys/trusted.c
+++ b/security/keys/trusted.c
@@ -69,7 +69,7 @@ static int TSS_sha1(const unsigned char *data, unsigned int datalen,
}
ret = crypto_shash_digest(&sdesc->shash, data, datalen, digest);
- kfree(sdesc);
+ kzfree(sdesc);
return ret;
}
@@ -113,7 +113,7 @@ static int TSS_rawhmac(unsigned char *digest, const unsigned char *key,
if (!ret)
ret = crypto_shash_final(&sdesc->shash, digest);
out:
- kfree(sdesc);
+ kzfree(sdesc);
return ret;
}
@@ -164,7 +164,7 @@ static int TSS_authhmac(unsigned char *digest, const unsigned char *key,
paramdigest, TPM_NONCE_SIZE, h1,
TPM_NONCE_SIZE, h2, 1, &c, 0, 0);
out:
- kfree(sdesc);
+ kzfree(sdesc);
return ret;
}
@@ -245,7 +245,7 @@ static int TSS_checkhmac1(unsigned char *buffer,
if (memcmp(testhmac, authdata, SHA1_DIGEST_SIZE))
ret = -EINVAL;
out:
- kfree(sdesc);
+ kzfree(sdesc);
return ret;
}
@@ -346,7 +346,7 @@ static int TSS_checkhmac2(unsigned char *buffer,
if (memcmp(testhmac2, authdata2, SHA1_DIGEST_SIZE))
ret = -EINVAL;
out:
- kfree(sdesc);
+ kzfree(sdesc);
return ret;
}
@@ -563,7 +563,7 @@ static int tpm_seal(struct tpm_buf *tb, uint16_t keytype,
*bloblen = storedsize;
}
out:
- kfree(td);
+ kzfree(td);
return ret;
}
@@ -677,7 +677,7 @@ static int key_seal(struct trusted_key_payload *p,
if (ret < 0)
pr_info("trusted_key: srkseal failed (%d)\n", ret);
- kfree(tb);
+ kzfree(tb);
return ret;
}
@@ -702,7 +702,7 @@ static int key_unseal(struct trusted_key_payload *p,
/* pull migratable flag out of sealed key */
p->migratable = p->key[--p->key_len];
- kfree(tb);
+ kzfree(tb);
return ret;
}
@@ -984,12 +984,12 @@ static int trusted_instantiate(struct key *key,
if (!ret && options->pcrlock)
ret = pcrlock(options->pcrlock);
out:
- kfree(datablob);
- kfree(options);
+ kzfree(datablob);
+ kzfree(options);
if (!ret)
rcu_assign_keypointer(key, payload);
else
- kfree(payload);
+ kzfree(payload);
return ret;
}
@@ -998,8 +998,7 @@ static void trusted_rcu_free(struct rcu_head *rcu)
struct trusted_key_payload *p;
p = container_of(rcu, struct trusted_key_payload, rcu);
- memset(p->key, 0, p->key_len);
- kfree(p);
+ kzfree(p);
}
/*
@@ -1041,13 +1040,13 @@ static int trusted_update(struct key *key, struct key_preparsed_payload *prep)
ret = datablob_parse(datablob, new_p, new_o);
if (ret != Opt_update) {
ret = -EINVAL;
- kfree(new_p);
+ kzfree(new_p);
goto out;
}
if (!new_o->keyhandle) {
ret = -EINVAL;
- kfree(new_p);
+ kzfree(new_p);
goto out;
}
@@ -1061,22 +1060,22 @@ static int trusted_update(struct key *key, struct key_preparsed_payload *prep)
ret = key_seal(new_p, new_o);
if (ret < 0) {
pr_info("trusted_key: key_seal failed (%d)\n", ret);
- kfree(new_p);
+ kzfree(new_p);
goto out;
}
if (new_o->pcrlock) {
ret = pcrlock(new_o->pcrlock);
if (ret < 0) {
pr_info("trusted_key: pcrlock failed (%d)\n", ret);
- kfree(new_p);
+ kzfree(new_p);
goto out;
}
}
rcu_assign_keypointer(key, new_p);
call_rcu(&p->rcu, trusted_rcu_free);
out:
- kfree(datablob);
- kfree(new_o);
+ kzfree(datablob);
+ kzfree(new_o);
return ret;
}
@@ -1095,34 +1094,30 @@ static long trusted_read(const struct key *key, char __user *buffer,
p = rcu_dereference_key(key);
if (!p)
return -EINVAL;
- if (!buffer || buflen <= 0)
- return 2 * p->blob_len;
- ascii_buf = kmalloc(2 * p->blob_len, GFP_KERNEL);
- if (!ascii_buf)
- return -ENOMEM;
- bufp = ascii_buf;
- for (i = 0; i < p->blob_len; i++)
- bufp = hex_byte_pack(bufp, p->blob[i]);
- if ((copy_to_user(buffer, ascii_buf, 2 * p->blob_len)) != 0) {
- kfree(ascii_buf);
- return -EFAULT;
+ if (buffer && buflen >= 2 * p->blob_len) {
+ ascii_buf = kmalloc(2 * p->blob_len, GFP_KERNEL);
+ if (!ascii_buf)
+ return -ENOMEM;
+
+ bufp = ascii_buf;
+ for (i = 0; i < p->blob_len; i++)
+ bufp = hex_byte_pack(bufp, p->blob[i]);
+ if (copy_to_user(buffer, ascii_buf, 2 * p->blob_len) != 0) {
+ kzfree(ascii_buf);
+ return -EFAULT;
+ }
+ kzfree(ascii_buf);
}
- kfree(ascii_buf);
return 2 * p->blob_len;
}
/*
- * trusted_destroy - before freeing the key, clear the decrypted data
+ * trusted_destroy - clear and free the key's payload
*/
static void trusted_destroy(struct key *key)
{
- struct trusted_key_payload *p = key->payload.data[0];
-
- if (!p)
- return;
- memset(p->key, 0, p->key_len);
- kfree(key->payload.data[0]);
+ kzfree(key->payload.data[0]);
}
struct key_type key_type_trusted = {
diff --git a/sound/core/pcm.c b/sound/core/pcm.c
index a2c2f06060df..4fc68b126169 100644
--- a/sound/core/pcm.c
+++ b/sound/core/pcm.c
@@ -742,6 +742,7 @@ int snd_pcm_new_stream(struct snd_pcm *pcm, int stream, int substream_count)
}
substream->group = &substream->self_group;
spin_lock_init(&substream->self_group.lock);
+ spin_lock_init(&substream->runtime_lock);
mutex_init(&substream->self_group.mutex);
INIT_LIST_HEAD(&substream->self_group.substreams);
list_add_tail(&substream->link_list, &substream->self_group.substreams);
@@ -1020,9 +1021,11 @@ int snd_pcm_attach_substream(struct snd_pcm *pcm, int stream,
void snd_pcm_detach_substream(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime;
+ unsigned long flags = 0;
if (PCM_RUNTIME_CHECK(substream))
return;
+ spin_lock_irqsave(&substream->runtime_lock, flags);
runtime = substream->runtime;
if (runtime->private_free != NULL)
runtime->private_free(runtime);
@@ -1036,6 +1039,7 @@ void snd_pcm_detach_substream(struct snd_pcm_substream *substream)
put_pid(substream->pid);
substream->pid = NULL;
substream->pstr->substream_opened--;
+ spin_unlock_irqrestore(&substream->runtime_lock, flags);
}
static ssize_t show_pcm_class(struct device *dev,
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index 2530669e2f94..db2c1cdd93b7 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -267,8 +267,10 @@ static void update_audio_tstamp(struct snd_pcm_substream *substream,
runtime->rate);
*audio_tstamp = ns_to_timespec(audio_nsecs);
}
- runtime->status->audio_tstamp = *audio_tstamp;
- runtime->status->tstamp = *curr_tstamp;
+ if (!timespec_equal(&runtime->status->audio_tstamp, audio_tstamp)) {
+ runtime->status->audio_tstamp = *audio_tstamp;
+ runtime->status->tstamp = *curr_tstamp;
+ }
/*
* re-take a driver timestamp to let apps detect if the reference tstamp
@@ -1857,8 +1859,6 @@ int snd_pcm_lib_ioctl(struct snd_pcm_substream *substream,
unsigned int cmd, void *arg)
{
switch (cmd) {
- case SNDRV_PCM_IOCTL1_INFO:
- return 0;
case SNDRV_PCM_IOCTL1_RESET:
return snd_pcm_lib_ioctl_reset(substream, arg);
case SNDRV_PCM_IOCTL1_CHANNEL_INFO:
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 9af294c72a4d..51110252e3f3 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -197,7 +197,6 @@ static inline void snd_leave_user(mm_segment_t fs)
int snd_pcm_info(struct snd_pcm_substream *substream, struct snd_pcm_info *info)
{
- struct snd_pcm_runtime *runtime;
struct snd_pcm *pcm = substream->pcm;
struct snd_pcm_str *pstr = substream->pstr;
@@ -213,12 +212,7 @@ int snd_pcm_info(struct snd_pcm_substream *substream, struct snd_pcm_info *info)
info->subdevices_count = pstr->substream_count;
info->subdevices_avail = pstr->substream_count - pstr->substream_opened;
strlcpy(info->subname, substream->name, sizeof(info->subname));
- runtime = substream->runtime;
- /* AB: FIXME!!! This is definitely nonsense */
- if (runtime) {
- info->sync = runtime->sync;
- substream->ops->ioctl(substream, SNDRV_PCM_IOCTL1_INFO, info);
- }
+
return 0;
}
diff --git a/sound/core/pcm_timer.c b/sound/core/pcm_timer.c
index 20ecd8f18080..11ea73f019ba 100644
--- a/sound/core/pcm_timer.c
+++ b/sound/core/pcm_timer.c
@@ -65,9 +65,16 @@ void snd_pcm_timer_resolution_change(struct snd_pcm_substream *substream)
static unsigned long snd_pcm_timer_resolution(struct snd_timer * timer)
{
struct snd_pcm_substream *substream;
-
+ unsigned long ret = 0, flags = 0;
+
substream = timer->private_data;
- return substream->runtime ? substream->runtime->timer_resolution : 0;
+ spin_lock_irqsave(&substream->runtime_lock, flags);
+ if (substream->runtime)
+ ret = substream->runtime->timer_resolution;
+ else
+ ret = 0;
+ spin_unlock_irqrestore(&substream->runtime_lock, flags);
+ return ret;
}
static int snd_pcm_timer_start(struct snd_timer * timer)
diff --git a/sound/core/seq/oss/seq_oss_midi.c b/sound/core/seq/oss/seq_oss_midi.c
index aaff9ee32695..b30b2139e3f0 100644
--- a/sound/core/seq/oss/seq_oss_midi.c
+++ b/sound/core/seq/oss/seq_oss_midi.c
@@ -612,9 +612,7 @@ send_midi_event(struct seq_oss_devinfo *dp, struct snd_seq_event *ev, struct seq
if (!dp->timer->running)
len = snd_seq_oss_timer_start(dp->timer);
if (ev->type == SNDRV_SEQ_EVENT_SYSEX) {
- if ((ev->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) == SNDRV_SEQ_EVENT_LENGTH_VARIABLE)
- snd_seq_oss_readq_puts(dp->readq, mdev->seq_device,
- ev->data.ext.ptr, ev->data.ext.len);
+ snd_seq_oss_readq_sysex(dp->readq, mdev->seq_device, ev);
} else {
len = snd_midi_event_decode(mdev->coder, msg, sizeof(msg), ev);
if (len > 0)
diff --git a/sound/core/seq/oss/seq_oss_readq.c b/sound/core/seq/oss/seq_oss_readq.c
index 046cb586fb2f..06b21226b4e7 100644
--- a/sound/core/seq/oss/seq_oss_readq.c
+++ b/sound/core/seq/oss/seq_oss_readq.c
@@ -118,6 +118,35 @@ snd_seq_oss_readq_puts(struct seq_oss_readq *q, int dev, unsigned char *data, in
}
/*
+ * put MIDI sysex bytes; the event buffer may be chained, thus it has
+ * to be expanded via snd_seq_dump_var_event().
+ */
+struct readq_sysex_ctx {
+ struct seq_oss_readq *readq;
+ int dev;
+};
+
+static int readq_dump_sysex(void *ptr, void *buf, int count)
+{
+ struct readq_sysex_ctx *ctx = ptr;
+
+ return snd_seq_oss_readq_puts(ctx->readq, ctx->dev, buf, count);
+}
+
+int snd_seq_oss_readq_sysex(struct seq_oss_readq *q, int dev,
+ struct snd_seq_event *ev)
+{
+ struct readq_sysex_ctx ctx = {
+ .readq = q,
+ .dev = dev
+ };
+
+ if ((ev->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARIABLE)
+ return 0;
+ return snd_seq_dump_var_event(ev, readq_dump_sysex, &ctx);
+}
+
+/*
* copy an event to input queue:
* return zero if enqueued
*/
diff --git a/sound/core/seq/oss/seq_oss_readq.h b/sound/core/seq/oss/seq_oss_readq.h
index f1463f1f449e..8d033ca2d23f 100644
--- a/sound/core/seq/oss/seq_oss_readq.h
+++ b/sound/core/seq/oss/seq_oss_readq.h
@@ -44,6 +44,8 @@ void snd_seq_oss_readq_delete(struct seq_oss_readq *q);
void snd_seq_oss_readq_clear(struct seq_oss_readq *readq);
unsigned int snd_seq_oss_readq_poll(struct seq_oss_readq *readq, struct file *file, poll_table *wait);
int snd_seq_oss_readq_puts(struct seq_oss_readq *readq, int dev, unsigned char *data, int len);
+int snd_seq_oss_readq_sysex(struct seq_oss_readq *q, int dev,
+ struct snd_seq_event *ev);
int snd_seq_oss_readq_put_event(struct seq_oss_readq *readq, union evrec *ev);
int snd_seq_oss_readq_put_timestamp(struct seq_oss_readq *readq, unsigned long curt, int seq_mode);
int snd_seq_oss_readq_pick(struct seq_oss_readq *q, union evrec *rec);
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
index e847b9923c19..b36de76f24e2 100644
--- a/sound/core/seq/seq_clientmgr.c
+++ b/sound/core/seq/seq_clientmgr.c
@@ -676,7 +676,7 @@ static int deliver_to_subscribers(struct snd_seq_client *client,
if (atomic)
read_lock(&grp->list_lock);
else
- down_read(&grp->list_mutex);
+ down_read_nested(&grp->list_mutex, hop);
list_for_each_entry(subs, &grp->list_head, src_list) {
/* both ports ready? */
if (atomic_read(&subs->ref_count) != 2)
diff --git a/sound/core/seq/seq_device.c b/sound/core/seq/seq_device.c
index c4acf17e9f5e..e40a2cba5002 100644
--- a/sound/core/seq/seq_device.c
+++ b/sound/core/seq/seq_device.c
@@ -148,8 +148,10 @@ void snd_seq_device_load_drivers(void)
flush_work(&autoload_work);
}
EXPORT_SYMBOL(snd_seq_device_load_drivers);
+#define cancel_autoload_drivers() cancel_work_sync(&autoload_work)
#else
#define queue_autoload_drivers() /* NOP */
+#define cancel_autoload_drivers() /* NOP */
#endif
/*
@@ -159,6 +161,7 @@ static int snd_seq_device_dev_free(struct snd_device *device)
{
struct snd_seq_device *dev = device->device_data;
+ cancel_autoload_drivers();
put_device(&dev->dev);
return 0;
}
diff --git a/sound/core/timer.c b/sound/core/timer.c
index f0675acecc93..0e51e5cd33fe 100644
--- a/sound/core/timer.c
+++ b/sound/core/timer.c
@@ -318,8 +318,6 @@ int snd_timer_open(struct snd_timer_instance **ti,
return 0;
}
-static int _snd_timer_stop(struct snd_timer_instance *timeri, int event);
-
/*
* close a timer instance
*/
@@ -408,7 +406,6 @@ unsigned long snd_timer_resolution(struct snd_timer_instance *timeri)
static void snd_timer_notify1(struct snd_timer_instance *ti, int event)
{
struct snd_timer *timer;
- unsigned long flags;
unsigned long resolution = 0;
struct snd_timer_instance *ts;
struct timespec tstamp;
@@ -432,34 +429,66 @@ static void snd_timer_notify1(struct snd_timer_instance *ti, int event)
return;
if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE)
return;
- spin_lock_irqsave(&timer->lock, flags);
list_for_each_entry(ts, &ti->slave_active_head, active_list)
if (ts->ccallback)
ts->ccallback(ts, event + 100, &tstamp, resolution);
- spin_unlock_irqrestore(&timer->lock, flags);
}
-static int snd_timer_start1(struct snd_timer *timer, struct snd_timer_instance *timeri,
- unsigned long sticks)
+/* start/continue a master timer */
+static int snd_timer_start1(struct snd_timer_instance *timeri,
+ bool start, unsigned long ticks)
{
+ struct snd_timer *timer;
+ int result;
+ unsigned long flags;
+
+ timer = timeri->timer;
+ if (!timer)
+ return -EINVAL;
+
+ spin_lock_irqsave(&timer->lock, flags);
+ if (timer->card && timer->card->shutdown) {
+ result = -ENODEV;
+ goto unlock;
+ }
+ if (timeri->flags & (SNDRV_TIMER_IFLG_RUNNING |
+ SNDRV_TIMER_IFLG_START)) {
+ result = -EBUSY;
+ goto unlock;
+ }
+
+ if (start)
+ timeri->ticks = timeri->cticks = ticks;
+ else if (!timeri->cticks)
+ timeri->cticks = 1;
+ timeri->pticks = 0;
+
list_move_tail(&timeri->active_list, &timer->active_list_head);
if (timer->running) {
if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE)
goto __start_now;
timer->flags |= SNDRV_TIMER_FLG_RESCHED;
timeri->flags |= SNDRV_TIMER_IFLG_START;
- return 1; /* delayed start */
+ result = 1; /* delayed start */
} else {
- timer->sticks = sticks;
+ if (start)
+ timer->sticks = ticks;
timer->hw.start(timer);
__start_now:
timer->running++;
timeri->flags |= SNDRV_TIMER_IFLG_RUNNING;
- return 0;
+ result = 0;
}
+ snd_timer_notify1(timeri, start ? SNDRV_TIMER_EVENT_START :
+ SNDRV_TIMER_EVENT_CONTINUE);
+ unlock:
+ spin_unlock_irqrestore(&timer->lock, flags);
+ return result;
}
-static int snd_timer_start_slave(struct snd_timer_instance *timeri)
+/* start/continue a slave timer */
+static int snd_timer_start_slave(struct snd_timer_instance *timeri,
+ bool start)
{
unsigned long flags;
@@ -473,88 +502,37 @@ static int snd_timer_start_slave(struct snd_timer_instance *timeri)
spin_lock(&timeri->timer->lock);
list_add_tail(&timeri->active_list,
&timeri->master->slave_active_head);
+ snd_timer_notify1(timeri, start ? SNDRV_TIMER_EVENT_START :
+ SNDRV_TIMER_EVENT_CONTINUE);
spin_unlock(&timeri->timer->lock);
}
spin_unlock_irqrestore(&slave_active_lock, flags);
return 1; /* delayed start */
}
-/*
- * start the timer instance
- */
-int snd_timer_start(struct snd_timer_instance *timeri, unsigned int ticks)
+/* stop/pause a master timer */
+static int snd_timer_stop1(struct snd_timer_instance *timeri, bool stop)
{
struct snd_timer *timer;
- int result = -EINVAL;
+ int result = 0;
unsigned long flags;
- if (timeri == NULL || ticks < 1)
- return -EINVAL;
- if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) {
- result = snd_timer_start_slave(timeri);
- if (result >= 0)
- snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START);
- return result;
- }
- timer = timeri->timer;
- if (timer == NULL)
- return -EINVAL;
- if (timer->card && timer->card->shutdown)
- return -ENODEV;
- spin_lock_irqsave(&timer->lock, flags);
- if (timeri->flags & (SNDRV_TIMER_IFLG_RUNNING |
- SNDRV_TIMER_IFLG_START)) {
- result = -EBUSY;
- goto unlock;
- }
- timeri->ticks = timeri->cticks = ticks;
- timeri->pticks = 0;
- result = snd_timer_start1(timer, timeri, ticks);
- unlock:
- spin_unlock_irqrestore(&timer->lock, flags);
- if (result >= 0)
- snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START);
- return result;
-}
-
-static int _snd_timer_stop(struct snd_timer_instance *timeri, int event)
-{
- struct snd_timer *timer;
- unsigned long flags;
-
- if (snd_BUG_ON(!timeri))
- return -ENXIO;
-
- if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) {
- spin_lock_irqsave(&slave_active_lock, flags);
- if (!(timeri->flags & SNDRV_TIMER_IFLG_RUNNING)) {
- spin_unlock_irqrestore(&slave_active_lock, flags);
- return -EBUSY;
- }
- if (timeri->timer)
- spin_lock(&timeri->timer->lock);
- timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
- list_del_init(&timeri->ack_list);
- list_del_init(&timeri->active_list);
- if (timeri->timer)
- spin_unlock(&timeri->timer->lock);
- spin_unlock_irqrestore(&slave_active_lock, flags);
- goto __end;
- }
timer = timeri->timer;
if (!timer)
return -EINVAL;
spin_lock_irqsave(&timer->lock, flags);
if (!(timeri->flags & (SNDRV_TIMER_IFLG_RUNNING |
SNDRV_TIMER_IFLG_START))) {
- spin_unlock_irqrestore(&timer->lock, flags);
- return -EBUSY;
+ result = -EBUSY;
+ goto unlock;
}
list_del_init(&timeri->ack_list);
list_del_init(&timeri->active_list);
- if (timer->card && timer->card->shutdown) {
- spin_unlock_irqrestore(&timer->lock, flags);
- return 0;
+ if (timer->card && timer->card->shutdown)
+ goto unlock;
+ if (stop) {
+ timeri->cticks = timeri->ticks;
+ timeri->pticks = 0;
}
if ((timeri->flags & SNDRV_TIMER_IFLG_RUNNING) &&
!(--timer->running)) {
@@ -569,35 +547,60 @@ static int _snd_timer_stop(struct snd_timer_instance *timeri, int event)
}
}
timeri->flags &= ~(SNDRV_TIMER_IFLG_RUNNING | SNDRV_TIMER_IFLG_START);
+ snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP :
+ SNDRV_TIMER_EVENT_CONTINUE);
+ unlock:
spin_unlock_irqrestore(&timer->lock, flags);
- __end:
- if (event != SNDRV_TIMER_EVENT_RESOLUTION)
- snd_timer_notify1(timeri, event);
+ return result;
+}
+
+/* stop/pause a slave timer */
+static int snd_timer_stop_slave(struct snd_timer_instance *timeri, bool stop)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&slave_active_lock, flags);
+ if (!(timeri->flags & SNDRV_TIMER_IFLG_RUNNING)) {
+ spin_unlock_irqrestore(&slave_active_lock, flags);
+ return -EBUSY;
+ }
+ timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
+ if (timeri->timer) {
+ spin_lock(&timeri->timer->lock);
+ list_del_init(&timeri->ack_list);
+ list_del_init(&timeri->active_list);
+ snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP :
+ SNDRV_TIMER_EVENT_CONTINUE);
+ spin_unlock(&timeri->timer->lock);
+ }
+ spin_unlock_irqrestore(&slave_active_lock, flags);
return 0;
}
/*
+ * start the timer instance
+ */
+int snd_timer_start(struct snd_timer_instance *timeri, unsigned int ticks)
+{
+ if (timeri == NULL || ticks < 1)
+ return -EINVAL;
+ if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
+ return snd_timer_start_slave(timeri, true);
+ else
+ return snd_timer_start1(timeri, true, ticks);
+}
+
+/*
* stop the timer instance.
*
* do not call this from the timer callback!
*/
int snd_timer_stop(struct snd_timer_instance *timeri)
{
- struct snd_timer *timer;
- unsigned long flags;
- int err;
-
- err = _snd_timer_stop(timeri, SNDRV_TIMER_EVENT_STOP);
- if (err < 0)
- return err;
- timer = timeri->timer;
- if (!timer)
- return -EINVAL;
- spin_lock_irqsave(&timer->lock, flags);
- timeri->cticks = timeri->ticks;
- timeri->pticks = 0;
- spin_unlock_irqrestore(&timer->lock, flags);
- return 0;
+ if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
+ return snd_timer_stop_slave(timeri, true);
+ else
+ return snd_timer_stop1(timeri, true);
}
/*
@@ -605,32 +608,10 @@ int snd_timer_stop(struct snd_timer_instance *timeri)
*/
int snd_timer_continue(struct snd_timer_instance *timeri)
{
- struct snd_timer *timer;
- int result = -EINVAL;
- unsigned long flags;
-
- if (timeri == NULL)
- return result;
if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
- return snd_timer_start_slave(timeri);
- timer = timeri->timer;
- if (! timer)
- return -EINVAL;
- if (timer->card && timer->card->shutdown)
- return -ENODEV;
- spin_lock_irqsave(&timer->lock, flags);
- if (timeri->flags & SNDRV_TIMER_IFLG_RUNNING) {
- result = -EBUSY;
- goto unlock;
- }
- if (!timeri->cticks)
- timeri->cticks = 1;
- timeri->pticks = 0;
- result = snd_timer_start1(timer, timeri, timer->sticks);
- unlock:
- spin_unlock_irqrestore(&timer->lock, flags);
- snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_CONTINUE);
- return result;
+ return snd_timer_start_slave(timeri, false);
+ else
+ return snd_timer_start1(timeri, false, 0);
}
/*
@@ -638,7 +619,10 @@ int snd_timer_continue(struct snd_timer_instance *timeri)
*/
int snd_timer_pause(struct snd_timer_instance * timeri)
{
- return _snd_timer_stop(timeri, SNDRV_TIMER_EVENT_PAUSE);
+ if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
+ return snd_timer_stop_slave(timeri, false);
+ else
+ return snd_timer_stop1(timeri, false);
}
/*
diff --git a/sound/core/timer_compat.c b/sound/core/timer_compat.c
index 2e908225d754..de9155eed727 100644
--- a/sound/core/timer_compat.c
+++ b/sound/core/timer_compat.c
@@ -40,11 +40,11 @@ static int snd_timer_user_info_compat(struct file *file,
struct snd_timer *t;
tu = file->private_data;
- if (snd_BUG_ON(!tu->timeri))
- return -ENXIO;
+ if (!tu->timeri)
+ return -EBADFD;
t = tu->timeri->timer;
- if (snd_BUG_ON(!t))
- return -ENXIO;
+ if (!t)
+ return -EBADFD;
memset(&info, 0, sizeof(info));
info.card = t->card ? t->card->number : -1;
if (t->hw.flags & SNDRV_TIMER_HW_SLAVE)
@@ -73,8 +73,8 @@ static int snd_timer_user_status_compat(struct file *file,
struct snd_timer_status32 status;
tu = file->private_data;
- if (snd_BUG_ON(!tu->timeri))
- return -ENXIO;
+ if (!tu->timeri)
+ return -EBADFD;
memset(&status, 0, sizeof(status));
status.tstamp.tv_sec = tu->tstamp.tv_sec;
status.tstamp.tv_nsec = tu->tstamp.tv_nsec;
@@ -106,7 +106,8 @@ enum {
#endif /* CONFIG_X86_X32 */
};
-static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg)
+static long __snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd,
+ unsigned long arg)
{
void __user *argp = compat_ptr(arg);
@@ -127,7 +128,7 @@ static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, uns
case SNDRV_TIMER_IOCTL_PAUSE:
case SNDRV_TIMER_IOCTL_PAUSE_OLD:
case SNDRV_TIMER_IOCTL_NEXT_DEVICE:
- return snd_timer_user_ioctl(file, cmd, (unsigned long)argp);
+ return __snd_timer_user_ioctl(file, cmd, (unsigned long)argp);
case SNDRV_TIMER_IOCTL_INFO32:
return snd_timer_user_info_compat(file, argp);
case SNDRV_TIMER_IOCTL_STATUS32:
@@ -139,3 +140,15 @@ static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, uns
}
return -ENOIOCTLCMD;
}
+
+static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct snd_timer_user *tu = file->private_data;
+ long ret;
+
+ mutex_lock(&tu->ioctl_lock);
+ ret = __snd_timer_user_ioctl_compat(file, cmd, arg);
+ mutex_unlock(&tu->ioctl_lock);
+ return ret;
+}
diff --git a/sound/drivers/vx/vx_pcm.c b/sound/drivers/vx/vx_pcm.c
index 11467272089e..ea7b377f0378 100644
--- a/sound/drivers/vx/vx_pcm.c
+++ b/sound/drivers/vx/vx_pcm.c
@@ -1015,7 +1015,7 @@ static void vx_pcm_capture_update(struct vx_core *chip, struct snd_pcm_substream
int size, space, count;
struct snd_pcm_runtime *runtime = subs->runtime;
- if (! pipe->prepared || (chip->chip_status & VX_STAT_IS_STALE))
+ if (!pipe->running || (chip->chip_status & VX_STAT_IS_STALE))
return;
size = runtime->buffer_size - snd_pcm_capture_avail(runtime);
@@ -1048,8 +1048,10 @@ static void vx_pcm_capture_update(struct vx_core *chip, struct snd_pcm_substream
/* ok, let's accelerate! */
int align = pipe->align * 3;
space = (count / align) * align;
- vx_pseudo_dma_read(chip, runtime, pipe, space);
- count -= space;
+ if (space > 0) {
+ vx_pseudo_dma_read(chip, runtime, pipe, space);
+ count -= space;
+ }
}
/* read the rest of bytes */
while (count > 0) {
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index e6de496bffbe..e2e08fc73b50 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2316,6 +2316,9 @@ static const struct pci_device_id azx_ids[] = {
/* AMD Hudson */
{ PCI_DEVICE(0x1022, 0x780d),
.driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB },
+ /* AMD Raven */
+ { PCI_DEVICE(0x1022, 0x15e3),
+ .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB },
/* ATI HDMI */
{ PCI_DEVICE(0x1002, 0x0002),
.driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 5cab24f52825..e5730a7d0480 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -329,6 +329,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
break;
case 0x10ec0225:
case 0x10ec0233:
+ case 0x10ec0236:
case 0x10ec0255:
case 0x10ec0256:
case 0x10ec0282:
@@ -337,6 +338,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
case 0x10ec0288:
case 0x10ec0295:
case 0x10ec0298:
+ case 0x10ec0299:
alc_update_coef_idx(codec, 0x10, 1<<9, 0);
break;
case 0x10ec0285:
@@ -909,9 +911,11 @@ static struct alc_codec_rename_pci_table rename_pci_tbl[] = {
{ 0x10ec0275, 0x1028, 0, "ALC3260" },
{ 0x10ec0899, 0x1028, 0, "ALC3861" },
{ 0x10ec0298, 0x1028, 0, "ALC3266" },
+ { 0x10ec0236, 0x1028, 0, "ALC3204" },
{ 0x10ec0256, 0x1028, 0, "ALC3246" },
{ 0x10ec0225, 0x1028, 0, "ALC3253" },
{ 0x10ec0295, 0x1028, 0, "ALC3254" },
+ { 0x10ec0299, 0x1028, 0, "ALC3271" },
{ 0x10ec0670, 0x1025, 0, "ALC669X" },
{ 0x10ec0676, 0x1025, 0, "ALC679X" },
{ 0x10ec0282, 0x1043, 0, "ALC3229" },
@@ -3694,6 +3698,7 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
alc_process_coef_fw(codec, coef0255_1);
alc_process_coef_fw(codec, coef0255);
break;
+ case 0x10ec0236:
case 0x10ec0256:
alc_process_coef_fw(codec, coef0256);
alc_process_coef_fw(codec, coef0255);
@@ -3718,6 +3723,7 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
break;
case 0x10ec0225:
case 0x10ec0295:
+ case 0x10ec0299:
alc_process_coef_fw(codec, coef0225);
break;
}
@@ -3774,6 +3780,7 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
switch (codec->core.vendor_id) {
+ case 0x10ec0236:
case 0x10ec0255:
case 0x10ec0256:
alc_write_coef_idx(codec, 0x45, 0xc489);
@@ -3819,6 +3826,7 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
break;
case 0x10ec0225:
case 0x10ec0295:
+ case 0x10ec0299:
alc_update_coef_idx(codec, 0x45, 0x3f<<10, 0x31<<10);
snd_hda_set_pin_ctl_cache(codec, hp_pin, 0);
alc_process_coef_fw(codec, coef0225);
@@ -3877,8 +3885,10 @@ static void alc_headset_mode_default(struct hda_codec *codec)
switch (codec->core.vendor_id) {
case 0x10ec0225:
case 0x10ec0295:
+ case 0x10ec0299:
alc_process_coef_fw(codec, coef0225);
break;
+ case 0x10ec0236:
case 0x10ec0255:
case 0x10ec0256:
alc_process_coef_fw(codec, coef0255);
@@ -3962,6 +3972,7 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
case 0x10ec0255:
alc_process_coef_fw(codec, coef0255);
break;
+ case 0x10ec0236:
case 0x10ec0256:
alc_process_coef_fw(codec, coef0256);
break;
@@ -3989,6 +4000,7 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
break;
case 0x10ec0225:
case 0x10ec0295:
+ case 0x10ec0299:
alc_process_coef_fw(codec, coef0225);
break;
}
@@ -4052,6 +4064,7 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
case 0x10ec0255:
alc_process_coef_fw(codec, coef0255);
break;
+ case 0x10ec0236:
case 0x10ec0256:
alc_process_coef_fw(codec, coef0256);
break;
@@ -4079,6 +4092,7 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
break;
case 0x10ec0225:
case 0x10ec0295:
+ case 0x10ec0299:
alc_process_coef_fw(codec, coef0225);
break;
}
@@ -4119,6 +4133,7 @@ static void alc_determine_headset_type(struct hda_codec *codec)
};
switch (codec->core.vendor_id) {
+ case 0x10ec0236:
case 0x10ec0255:
case 0x10ec0256:
alc_process_coef_fw(codec, coef0255);
@@ -4163,6 +4178,7 @@ static void alc_determine_headset_type(struct hda_codec *codec)
break;
case 0x10ec0225:
case 0x10ec0295:
+ case 0x10ec0299:
alc_process_coef_fw(codec, coef0225);
msleep(800);
val = alc_read_coef_idx(codec, 0x46);
@@ -4320,6 +4336,7 @@ static void alc255_set_default_jack_type(struct hda_codec *codec)
case 0x10ec0255:
alc_process_coef_fw(codec, alc255fw);
break;
+ case 0x10ec0236:
case 0x10ec0256:
alc_process_coef_fw(codec, alc256fw);
break;
@@ -4387,7 +4404,7 @@ static void alc_no_shutup(struct hda_codec *codec)
static void alc_fixup_no_shutup(struct hda_codec *codec,
const struct hda_fixup *fix, int action)
{
- if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+ if (action == HDA_FIXUP_ACT_PROBE) {
struct alc_spec *spec = codec->spec;
spec->shutup = alc_no_shutup;
}
@@ -5834,6 +5851,14 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
ALC225_STANDARD_PINS,
{0x12, 0xb7a60130},
{0x1b, 0x90170110}),
+ SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ {0x12, 0x90a60140},
+ {0x14, 0x90170110},
+ {0x21, 0x02211020}),
+ SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ {0x12, 0x90a60140},
+ {0x14, 0x90170150},
+ {0x21, 0x02211020}),
SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE,
{0x14, 0x90170110},
{0x21, 0x02211020}),
@@ -6208,6 +6233,7 @@ static int patch_alc269(struct hda_codec *codec)
case 0x10ec0255:
spec->codec_variant = ALC269_TYPE_ALC255;
break;
+ case 0x10ec0236:
case 0x10ec0256:
spec->codec_variant = ALC269_TYPE_ALC256;
spec->gen.mixer_nid = 0; /* ALC256 does not have any loopback mixer path */
@@ -6215,6 +6241,7 @@ static int patch_alc269(struct hda_codec *codec)
break;
case 0x10ec0225:
case 0x10ec0295:
+ case 0x10ec0299:
spec->codec_variant = ALC269_TYPE_ALC225;
break;
case 0x10ec0234:
@@ -6227,7 +6254,7 @@ static int patch_alc269(struct hda_codec *codec)
case 0x10ec0703:
spec->codec_variant = ALC269_TYPE_ALC700;
spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */
- alc_update_coef_idx(codec, 0x4a, 0, 1 << 15); /* Combo jack auto trigger control */
+ alc_update_coef_idx(codec, 0x4a, 1 << 15, 0); /* Combo jack auto trigger control */
break;
}
@@ -7147,6 +7174,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
HDA_CODEC_ENTRY(0x10ec0233, "ALC233", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0234, "ALC234", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0235, "ALC233", patch_alc269),
+ HDA_CODEC_ENTRY(0x10ec0236, "ALC236", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0255, "ALC255", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0256, "ALC256", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0260, "ALC260", patch_alc260),
@@ -7172,6 +7200,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
HDA_CODEC_ENTRY(0x10ec0294, "ALC294", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0295, "ALC295", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0298, "ALC298", patch_alc269),
+ HDA_CODEC_ENTRY(0x10ec0299, "ALC299", patch_alc269),
HDA_CODEC_REV_ENTRY(0x10ec0861, 0x100340, "ALC660", patch_alc861),
HDA_CODEC_ENTRY(0x10ec0660, "ALC660-VD", patch_alc861vd),
HDA_CODEC_ENTRY(0x10ec0861, "ALC861", patch_alc861),
diff --git a/sound/pci/vx222/vx222_ops.c b/sound/pci/vx222/vx222_ops.c
index af83b3b38052..8e457ea27f89 100644
--- a/sound/pci/vx222/vx222_ops.c
+++ b/sound/pci/vx222/vx222_ops.c
@@ -269,12 +269,12 @@ static void vx2_dma_write(struct vx_core *chip, struct snd_pcm_runtime *runtime,
/* Transfer using pseudo-dma.
*/
- if (offset + count > pipe->buffer_bytes) {
+ if (offset + count >= pipe->buffer_bytes) {
int length = pipe->buffer_bytes - offset;
count -= length;
length >>= 2; /* in 32bit words */
/* Transfer using pseudo-dma. */
- while (length-- > 0) {
+ for (; length > 0; length--) {
outl(cpu_to_le32(*addr), port);
addr++;
}
@@ -284,7 +284,7 @@ static void vx2_dma_write(struct vx_core *chip, struct snd_pcm_runtime *runtime,
pipe->hw_ptr += count;
count >>= 2; /* in 32bit words */
/* Transfer using pseudo-dma. */
- while (count-- > 0) {
+ for (; count > 0; count--) {
outl(cpu_to_le32(*addr), port);
addr++;
}
@@ -307,12 +307,12 @@ static void vx2_dma_read(struct vx_core *chip, struct snd_pcm_runtime *runtime,
vx2_setup_pseudo_dma(chip, 0);
/* Transfer using pseudo-dma.
*/
- if (offset + count > pipe->buffer_bytes) {
+ if (offset + count >= pipe->buffer_bytes) {
int length = pipe->buffer_bytes - offset;
count -= length;
length >>= 2; /* in 32bit words */
/* Transfer using pseudo-dma. */
- while (length-- > 0)
+ for (; length > 0; length--)
*addr++ = le32_to_cpu(inl(port));
addr = (u32 *)runtime->dma_area;
pipe->hw_ptr = 0;
@@ -320,7 +320,7 @@ static void vx2_dma_read(struct vx_core *chip, struct snd_pcm_runtime *runtime,
pipe->hw_ptr += count;
count >>= 2; /* in 32bit words */
/* Transfer using pseudo-dma. */
- while (count-- > 0)
+ for (; count > 0; count--)
*addr++ = le32_to_cpu(inl(port));
vx2_release_pseudo_dma(chip);
diff --git a/sound/pcmcia/vx/vxp_ops.c b/sound/pcmcia/vx/vxp_ops.c
index 281972913c32..56aa1ba73ccc 100644
--- a/sound/pcmcia/vx/vxp_ops.c
+++ b/sound/pcmcia/vx/vxp_ops.c
@@ -369,12 +369,12 @@ static void vxp_dma_write(struct vx_core *chip, struct snd_pcm_runtime *runtime,
unsigned short *addr = (unsigned short *)(runtime->dma_area + offset);
vx_setup_pseudo_dma(chip, 1);
- if (offset + count > pipe->buffer_bytes) {
+ if (offset + count >= pipe->buffer_bytes) {
int length = pipe->buffer_bytes - offset;
count -= length;
length >>= 1; /* in 16bit words */
/* Transfer using pseudo-dma. */
- while (length-- > 0) {
+ for (; length > 0; length--) {
outw(cpu_to_le16(*addr), port);
addr++;
}
@@ -384,7 +384,7 @@ static void vxp_dma_write(struct vx_core *chip, struct snd_pcm_runtime *runtime,
pipe->hw_ptr += count;
count >>= 1; /* in 16bit words */
/* Transfer using pseudo-dma. */
- while (count-- > 0) {
+ for (; count > 0; count--) {
outw(cpu_to_le16(*addr), port);
addr++;
}
@@ -411,12 +411,12 @@ static void vxp_dma_read(struct vx_core *chip, struct snd_pcm_runtime *runtime,
if (snd_BUG_ON(count % 2))
return;
vx_setup_pseudo_dma(chip, 0);
- if (offset + count > pipe->buffer_bytes) {
+ if (offset + count >= pipe->buffer_bytes) {
int length = pipe->buffer_bytes - offset;
count -= length;
length >>= 1; /* in 16bit words */
/* Transfer using pseudo-dma. */
- while (length-- > 0)
+ for (; length > 0; length--)
*addr++ = le16_to_cpu(inw(port));
addr = (unsigned short *)runtime->dma_area;
pipe->hw_ptr = 0;
@@ -424,7 +424,7 @@ static void vxp_dma_read(struct vx_core *chip, struct snd_pcm_runtime *runtime,
pipe->hw_ptr += count;
count >>= 1; /* in 16bit words */
/* Transfer using pseudo-dma. */
- while (count-- > 1)
+ for (; count > 1; count--)
*addr++ = le16_to_cpu(inw(port));
/* Disable DMA */
pchip->regDIALOG &= ~VXP_DLG_DMAREAD_SEL_MASK;
diff --git a/sound/soc/codecs/adau17x1.c b/sound/soc/codecs/adau17x1.c
index fcf05b254ecd..0b9e13eb0a0a 100644
--- a/sound/soc/codecs/adau17x1.c
+++ b/sound/soc/codecs/adau17x1.c
@@ -89,6 +89,27 @@ static int adau17x1_pll_event(struct snd_soc_dapm_widget *w,
return 0;
}
+static int adau17x1_adc_fixup(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct adau *adau = snd_soc_codec_get_drvdata(codec);
+
+ /*
+ * If we are capturing, toggle the ADOSR bit in Converter Control 0 to
+ * avoid losing SNR (workaround from ADI). This must be done after
+ * the ADC(s) have been enabled. According to the data sheet, it is
+ * normally illegal to set this bit when the sampling rate is 96 kHz,
+ * but according to ADI it is acceptable for this workaround.
+ */
+ regmap_update_bits(adau->regmap, ADAU17X1_CONVERTER0,
+ ADAU17X1_CONVERTER0_ADOSR, ADAU17X1_CONVERTER0_ADOSR);
+ regmap_update_bits(adau->regmap, ADAU17X1_CONVERTER0,
+ ADAU17X1_CONVERTER0_ADOSR, 0);
+
+ return 0;
+}
+
static const char * const adau17x1_mono_stereo_text[] = {
"Stereo",
"Mono Left Channel (L+R)",
@@ -120,7 +141,8 @@ static const struct snd_soc_dapm_widget adau17x1_dapm_widgets[] = {
SND_SOC_DAPM_MUX("Right DAC Mode Mux", SND_SOC_NOPM, 0, 0,
&adau17x1_dac_mode_mux),
- SND_SOC_DAPM_ADC("Left Decimator", NULL, ADAU17X1_ADC_CONTROL, 0, 0),
+ SND_SOC_DAPM_ADC_E("Left Decimator", NULL, ADAU17X1_ADC_CONTROL, 0, 0,
+ adau17x1_adc_fixup, SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_ADC("Right Decimator", NULL, ADAU17X1_ADC_CONTROL, 1, 0),
SND_SOC_DAPM_DAC("Left DAC", NULL, ADAU17X1_DAC_CONTROL0, 0, 0),
SND_SOC_DAPM_DAC("Right DAC", NULL, ADAU17X1_DAC_CONTROL0, 1, 0),
diff --git a/sound/soc/codecs/adau17x1.h b/sound/soc/codecs/adau17x1.h
index e13583e6ff56..6b46461cdc03 100644
--- a/sound/soc/codecs/adau17x1.h
+++ b/sound/soc/codecs/adau17x1.h
@@ -123,5 +123,7 @@ bool adau17x1_has_dsp(struct adau *adau);
#define ADAU17X1_CONVERTER0_CONVSR_MASK 0x7
+#define ADAU17X1_CONVERTER0_ADOSR BIT(3)
+
#endif
diff --git a/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c b/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c
index 6098a49b5c7c..43f00dcff7af 100644
--- a/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c
+++ b/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c
@@ -86,6 +86,12 @@ static int msm_digcdc_clock_control(bool flag)
if (flag) {
mutex_lock(&pdata->cdc_int_mclk0_mutex);
if (atomic_read(&pdata->int_mclk0_enabled) == false) {
+ if (pdata->native_clk_set)
+ pdata->digital_cdc_core_clk.clk_freq_in_hz =
+ NATIVE_MCLK_RATE;
+ else
+ pdata->digital_cdc_core_clk.clk_freq_in_hz =
+ DEFAULT_MCLK_RATE;
pdata->digital_cdc_core_clk.enable = 1;
ret = afe_set_lpass_clock_v2(
AFE_PORT_ID_INT0_MI2S_RX,
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
index 0bb415a28723..f1f990b325ad 100644
--- a/sound/soc/codecs/wm_adsp.c
+++ b/sound/soc/codecs/wm_adsp.c
@@ -1060,7 +1060,7 @@ static int wm_adsp_load(struct wm_adsp *dsp)
const struct wmfw_region *region;
const struct wm_adsp_region *mem;
const char *region_name;
- char *file, *text;
+ char *file, *text = NULL;
struct wm_adsp_buf *buf;
unsigned int reg;
int regions = 0;
@@ -1221,10 +1221,21 @@ static int wm_adsp_load(struct wm_adsp *dsp)
regions, le32_to_cpu(region->len), offset,
region_name);
+ if ((pos + le32_to_cpu(region->len) + sizeof(*region)) >
+ firmware->size) {
+ adsp_err(dsp,
+ "%s.%d: %s region len %d bytes exceeds file length %zu\n",
+ file, regions, region_name,
+ le32_to_cpu(region->len), firmware->size);
+ ret = -EINVAL;
+ goto out_fw;
+ }
+
if (text) {
memcpy(text, region->data, le32_to_cpu(region->len));
adsp_info(dsp, "%s: %s\n", file, text);
kfree(text);
+ text = NULL;
}
if (reg) {
@@ -1269,6 +1280,7 @@ out_fw:
regmap_async_complete(regmap);
wm_adsp_buf_free(&buf_list);
release_firmware(firmware);
+ kfree(text);
out:
kfree(file);
@@ -1730,6 +1742,17 @@ static int wm_adsp_load_coeff(struct wm_adsp *dsp)
}
if (reg) {
+ if ((pos + le32_to_cpu(blk->len) + sizeof(*blk)) >
+ firmware->size) {
+ adsp_err(dsp,
+ "%s.%d: %s region len %d bytes exceeds file length %zu\n",
+ file, blocks, region_name,
+ le32_to_cpu(blk->len),
+ firmware->size);
+ ret = -EINVAL;
+ goto out_fw;
+ }
+
buf = wm_adsp_buf_alloc(blk->data,
le32_to_cpu(blk->len),
&buf_list);
diff --git a/sound/soc/msm/msm8996.c b/sound/soc/msm/msm8996.c
index 010dfa3322a0..49a70a7395ac 100644
--- a/sound/soc/msm/msm8996.c
+++ b/sound/soc/msm/msm8996.c
@@ -41,12 +41,16 @@
#define DRV_NAME "msm8996-asoc-snd"
#define SAMPLING_RATE_8KHZ 8000
+#define SAMPLING_RATE_11P025KHZ 11025
#define SAMPLING_RATE_16KHZ 16000
+#define SAMPLING_RATE_22P05KHZ 22050
#define SAMPLING_RATE_32KHZ 32000
+#define SAMPLING_RATE_44P1KHZ 44100
#define SAMPLING_RATE_48KHZ 48000
+#define SAMPLING_RATE_88P2KHZ 88200
#define SAMPLING_RATE_96KHZ 96000
+#define SAMPLING_RATE_176P4KHZ 176400
#define SAMPLING_RATE_192KHZ 192000
-#define SAMPLING_RATE_44P1KHZ 44100
#define MSM8996_SPK_ON 1
#define MSM8996_HIFI_ON 1
@@ -73,6 +77,249 @@ static int slim5_rx_bit_format = SNDRV_PCM_FORMAT_S16_LE;
static int slim6_rx_sample_rate = SAMPLING_RATE_48KHZ;
static int slim6_rx_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+/* TDM default channels */
+static int msm_pri_tdm_tx_0_ch = 2;
+static int msm_pri_tdm_tx_1_ch = 2;
+static int msm_pri_tdm_tx_2_ch = 2;
+static int msm_pri_tdm_tx_3_ch = 2;
+
+static int msm_pri_tdm_rx_0_ch = 2;
+static int msm_pri_tdm_rx_1_ch = 2;
+static int msm_pri_tdm_rx_2_ch = 2;
+static int msm_pri_tdm_rx_3_ch = 2;
+
+static int msm_sec_tdm_tx_0_ch = 2;
+static int msm_sec_tdm_tx_1_ch = 2;
+static int msm_sec_tdm_tx_2_ch = 2;
+static int msm_sec_tdm_tx_3_ch = 2;
+
+static int msm_sec_tdm_rx_0_ch = 2;
+static int msm_sec_tdm_rx_1_ch = 2;
+static int msm_sec_tdm_rx_2_ch = 2;
+static int msm_sec_tdm_rx_3_ch = 2;
+
+static int msm_tert_tdm_rx_0_ch = 2;
+static int msm_tert_tdm_rx_1_ch = 2;
+static int msm_tert_tdm_rx_2_ch = 2;
+static int msm_tert_tdm_rx_3_ch = 2;
+
+static int msm_tert_tdm_tx_0_ch = 2;
+static int msm_tert_tdm_tx_1_ch = 2;
+static int msm_tert_tdm_tx_2_ch = 2;
+static int msm_tert_tdm_tx_3_ch = 2;
+
+static int msm_quat_tdm_rx_0_ch = 2;
+static int msm_quat_tdm_rx_1_ch = 2;
+static int msm_quat_tdm_rx_2_ch = 2;
+static int msm_quat_tdm_rx_3_ch = 2;
+
+static int msm_quat_tdm_tx_0_ch = 2;
+static int msm_quat_tdm_tx_1_ch = 2;
+static int msm_quat_tdm_tx_2_ch = 2;
+static int msm_quat_tdm_tx_3_ch = 2;
+
+/* TDM default bit format */
+static int msm_pri_tdm_tx_0_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+static int msm_pri_tdm_tx_1_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+static int msm_pri_tdm_tx_2_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+static int msm_pri_tdm_tx_3_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+
+static int msm_pri_tdm_rx_0_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+static int msm_pri_tdm_rx_1_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+static int msm_pri_tdm_rx_2_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+static int msm_pri_tdm_rx_3_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+
+static int msm_sec_tdm_tx_0_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+static int msm_sec_tdm_tx_1_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+static int msm_sec_tdm_tx_2_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+static int msm_sec_tdm_tx_3_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+
+static int msm_sec_tdm_rx_0_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+static int msm_sec_tdm_rx_1_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+static int msm_sec_tdm_rx_2_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+static int msm_sec_tdm_rx_3_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+
+static int msm_tert_tdm_rx_0_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+static int msm_tert_tdm_rx_1_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+static int msm_tert_tdm_rx_2_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+static int msm_tert_tdm_rx_3_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+
+static int msm_tert_tdm_tx_0_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+static int msm_tert_tdm_tx_1_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+static int msm_tert_tdm_tx_2_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+static int msm_tert_tdm_tx_3_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+
+static int msm_quat_tdm_rx_0_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+static int msm_quat_tdm_rx_1_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+static int msm_quat_tdm_rx_2_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+static int msm_quat_tdm_rx_3_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+
+static int msm_quat_tdm_tx_0_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+static int msm_quat_tdm_tx_1_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+static int msm_quat_tdm_tx_2_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+static int msm_quat_tdm_tx_3_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+
+static int msm_pri_tdm_rate = SAMPLING_RATE_48KHZ;
+static int msm_pri_tdm_slot_width = 32;
+static int msm_pri_tdm_slot_num = 8;
+static int msm_sec_tdm_rate = SAMPLING_RATE_48KHZ;
+static int msm_sec_tdm_slot_width = 32;
+static int msm_sec_tdm_slot_num = 8;
+static int msm_quat_tdm_rate = SAMPLING_RATE_48KHZ;
+static int msm_quat_tdm_slot_width = 32;
+static int msm_quat_tdm_slot_num = 8;
+static int msm_tert_tdm_rate = SAMPLING_RATE_48KHZ;
+static int msm_tert_tdm_slot_width = 32;
+static int msm_tert_tdm_slot_num = 8;
+
+static int msm_tdm_slot_width = 32;
+static int msm_tdm_num_slots = 8;
+
+enum {
+ QUATERNARY_TDM_RX_0,
+ QUATERNARY_TDM_RX_1,
+ QUATERNARY_TDM_RX_2,
+ QUATERNARY_TDM_RX_3,
+ QUATERNARY_TDM_RX_4,
+ QUATERNARY_TDM_RX_5,
+ QUATERNARY_TDM_RX_6,
+ QUATERNARY_TDM_RX_7,
+ QUATERNARY_TDM_TX_0,
+ QUATERNARY_TDM_TX_1,
+ QUATERNARY_TDM_TX_2,
+ QUATERNARY_TDM_TX_3,
+ QUATERNARY_TDM_TX_4,
+ QUATERNARY_TDM_TX_5,
+ QUATERNARY_TDM_TX_6,
+ QUATERNARY_TDM_TX_7,
+ TERTIARY_TDM_RX_0,
+ TERTIARY_TDM_RX_1,
+ TERTIARY_TDM_RX_2,
+ TERTIARY_TDM_RX_3,
+ TERTIARY_TDM_RX_4,
+ TERTIARY_TDM_RX_5,
+ TERTIARY_TDM_RX_6,
+ TERTIARY_TDM_RX_7,
+ TERTIARY_TDM_TX_0,
+ TERTIARY_TDM_TX_1,
+ TERTIARY_TDM_TX_2,
+ TERTIARY_TDM_TX_3,
+ TERTIARY_TDM_TX_4,
+ TERTIARY_TDM_TX_5,
+ TERTIARY_TDM_TX_6,
+ TERTIARY_TDM_TX_7,
+ SECONDARY_TDM_RX_0,
+ SECONDARY_TDM_RX_1,
+ SECONDARY_TDM_RX_2,
+ SECONDARY_TDM_RX_3,
+ SECONDARY_TDM_RX_4,
+ SECONDARY_TDM_RX_5,
+ SECONDARY_TDM_RX_6,
+ SECONDARY_TDM_RX_7,
+ SECONDARY_TDM_TX_0,
+ SECONDARY_TDM_TX_1,
+ SECONDARY_TDM_TX_2,
+ SECONDARY_TDM_TX_3,
+ SECONDARY_TDM_TX_4,
+ SECONDARY_TDM_TX_5,
+ SECONDARY_TDM_TX_6,
+ SECONDARY_TDM_TX_7,
+ PRIMARY_TDM_RX_0,
+ PRIMARY_TDM_RX_1,
+ PRIMARY_TDM_RX_2,
+ PRIMARY_TDM_RX_3,
+ PRIMARY_TDM_RX_4,
+ PRIMARY_TDM_RX_5,
+ PRIMARY_TDM_RX_6,
+ PRIMARY_TDM_RX_7,
+ PRIMARY_TDM_TX_0,
+ PRIMARY_TDM_TX_1,
+ PRIMARY_TDM_TX_2,
+ PRIMARY_TDM_TX_3,
+ PRIMARY_TDM_TX_4,
+ PRIMARY_TDM_TX_5,
+ PRIMARY_TDM_TX_6,
+ PRIMARY_TDM_TX_7,
+ TDM_MAX,
+};
+
+#define TDM_SLOT_OFFSET_MAX 8
+/* TDM default offset */
+static unsigned int tdm_slot_offset[TDM_MAX][TDM_SLOT_OFFSET_MAX] = {
+ /* QUAT_TDM_RX */
+ {0, 4, 0xFFFF},
+ {8, 12, 0xFFFF},
+ {16, 20, 0xFFFF},
+ {24, 28, 0xFFFF},
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ /* QUAT_TDM_TX */
+ {0, 4, 0xFFFF},
+ {8, 12, 0xFFFF},
+ {16, 20, 0xFFFF},
+ {24, 28, 0xFFFF},
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ /* TERT_TDM_RX */
+ {0, 4, 0xFFFF},
+ {8, 12, 0xFFFF},
+ {16, 20, 0xFFFF},
+ {24, 28, 0xFFFF},
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ /* TERT_TDM_TX */
+ {0, 4, 0xFFFF},
+ {8, 12, 0xFFFF},
+ {16, 20, 0xFFFF},
+ {24, 28, 0xFFFF},
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ /* SEC_TDM_RX */
+ {0, 4, 0xFFFF},
+ {8, 12, 0xFFFF},
+ {16, 20, 0xFFFF},
+ {24, 28, 0xFFFF},
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ /* SEC_TDM_TX */
+ {0, 4, 0xFFFF},
+ {8, 12, 0xFFFF},
+ {16, 20, 0xFFFF},
+ {24, 28, 0xFFFF},
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ /* PRI_TDM_RX */
+ {0, 4, 0xFFFF},
+ {8, 12, 0xFFFF},
+ {16, 20, 0xFFFF},
+ {24, 28, 0xFFFF},
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ /* PRI_TDM_TX */
+ {0, 4, 0xFFFF},
+ {8, 12, 0xFFFF},
+ {16, 20, 0xFFFF},
+ {24, 28, 0xFFFF},
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+};
+
static struct platform_device *spdev;
static int ext_us_amp_gpio = -1;
static int msm8996_spk_control = 1;
@@ -104,6 +351,7 @@ static const char *const vi_feed_ch_text[] = {"One", "Two"};
static char const *hdmi_rx_ch_text[] = {"Two", "Three", "Four", "Five",
"Six", "Seven", "Eight"};
static char const *rx_bit_format_text[] = {"S16_LE", "S24_LE", "S24_3LE"};
+static char const *usb_bit_format_text[] = {"S16_LE", "S24_LE", "S24_3LE"};
static char const *slim5_rx_bit_format_text[] = {"S16_LE", "S24_LE", "S24_3LE"};
static char const *slim6_rx_bit_format_text[] = {"S16_LE", "S24_LE", "S24_3LE"};
static char const *slim0_rx_sample_rate_text[] = {"KHZ_48", "KHZ_96",
@@ -118,12 +366,55 @@ static const char *const proxy_rx_ch_text[] = {"One", "Two", "Three", "Four",
static char const *hdmi_rx_sample_rate_text[] = {"KHZ_48", "KHZ_96",
"KHZ_192"};
+static char const *tdm_ch_text[] = {"One", "Two", "Three", "Four",
+ "Five", "Six", "Seven", "Eight"};
+
+static char const *tdm_bit_format_text[] = {"S16_LE", "S24_LE"};
+static const char *const tdm_rate_text[] = {"8000", "16000", "48000"};
+
+static const char *const tdm_slot_num_text[] = {"One", "Two", "Four",
+ "Eight", "Sixteen", "Thirtytwo"};
+
+
+static const char *const tdm_slot_width_text[] = {"16", "24", "32"};
+
+static const char *const usb_ch_text[] = {"One", "Two", "Three", "Four",
+ "Five", "Six", "Seven", "Eight"};
+static char const *usb_sample_rate_text[] = {"KHZ_8", "KHZ_11P025",
+ "KHZ_16", "KHZ_22P05", "KHZ_32",
+ "KHZ_44P1", "KHZ_48", "KHZ_88P2",
+ "KHZ_96", "KHZ_176P4", "KHZ_192"};
+
+static SOC_ENUM_SINGLE_EXT_DECL(usb_rx_chs, usb_ch_text);
+static SOC_ENUM_SINGLE_EXT_DECL(usb_tx_chs, usb_ch_text);
+static SOC_ENUM_SINGLE_EXT_DECL(usb_rx_format, usb_bit_format_text);
+static SOC_ENUM_SINGLE_EXT_DECL(usb_tx_format, usb_bit_format_text);
+static SOC_ENUM_SINGLE_EXT_DECL(usb_rx_sample_rate, usb_sample_rate_text);
+static SOC_ENUM_SINGLE_EXT_DECL(usb_tx_sample_rate, usb_sample_rate_text);
static const char *const auxpcm_rate_text[] = {"8000", "16000"};
static const struct soc_enum msm8996_auxpcm_enum[] = {
SOC_ENUM_SINGLE_EXT(2, auxpcm_rate_text),
};
+struct usb_be_config {
+ u32 sample_rate;
+ u32 bit_format;
+ u32 channels;
+};
+
+static struct usb_be_config usb_rx_cfg = {
+ .sample_rate = SAMPLING_RATE_48KHZ,
+ .bit_format = SNDRV_PCM_FORMAT_S16_LE,
+ .channels = 2,
+};
+
+static struct usb_be_config usb_tx_cfg = {
+ .sample_rate = SAMPLING_RATE_48KHZ,
+ .bit_format = SNDRV_PCM_FORMAT_S16_LE,
+ .channels = 2,
+};
+
static struct afe_clk_set mi2s_tx_clk = {
AFE_API_VERSION_I2S_CONFIG,
Q6AFE_LPASS_CLK_ID_TER_MI2S_IBIT,
@@ -1256,6 +1547,328 @@ static int hdmi_rx_sample_rate_put(struct snd_kcontrol *kcontrol,
return 0;
}
+static int usb_audio_rx_ch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ pr_debug("%s: usb_audio_rx_ch = %d\n", __func__,
+ usb_rx_cfg.channels);
+ ucontrol->value.integer.value[0] = usb_rx_cfg.channels - 1;
+ return 0;
+}
+
+static int usb_audio_rx_ch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ usb_rx_cfg.channels = ucontrol->value.integer.value[0] + 1;
+
+ pr_debug("%s: usb_audio_rx_ch = %d\n", __func__, usb_rx_cfg.channels);
+ return 1;
+}
+
+static int usb_audio_rx_sample_rate_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int sample_rate_val = 0;
+
+ switch (usb_rx_cfg.sample_rate) {
+ case SAMPLING_RATE_192KHZ:
+ sample_rate_val = 10;
+ break;
+ case SAMPLING_RATE_176P4KHZ:
+ sample_rate_val = 9;
+ break;
+ case SAMPLING_RATE_96KHZ:
+ sample_rate_val = 8;
+ break;
+ case SAMPLING_RATE_88P2KHZ:
+ sample_rate_val = 7;
+ break;
+ case SAMPLING_RATE_48KHZ:
+ sample_rate_val = 6;
+ break;
+ case SAMPLING_RATE_44P1KHZ:
+ sample_rate_val = 5;
+ break;
+ case SAMPLING_RATE_32KHZ:
+ sample_rate_val = 4;
+ break;
+ case SAMPLING_RATE_22P05KHZ:
+ sample_rate_val = 3;
+ break;
+ case SAMPLING_RATE_16KHZ:
+ sample_rate_val = 2;
+ break;
+ case SAMPLING_RATE_11P025KHZ:
+ sample_rate_val = 1;
+ break;
+ case SAMPLING_RATE_8KHZ:
+ default:
+ sample_rate_val = 0;
+ break;
+ }
+
+ ucontrol->value.integer.value[0] = sample_rate_val;
+ pr_debug("%s: usb_audio_rx_sample_rate = %d\n", __func__,
+ usb_rx_cfg.sample_rate);
+ return 0;
+}
+
+static int usb_audio_rx_sample_rate_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 10:
+ usb_rx_cfg.sample_rate = SAMPLING_RATE_192KHZ;
+ break;
+ case 9:
+ usb_rx_cfg.sample_rate = SAMPLING_RATE_176P4KHZ;
+ break;
+ case 8:
+ usb_rx_cfg.sample_rate = SAMPLING_RATE_96KHZ;
+ break;
+ case 7:
+ usb_rx_cfg.sample_rate = SAMPLING_RATE_88P2KHZ;
+ break;
+ case 6:
+ usb_rx_cfg.sample_rate = SAMPLING_RATE_48KHZ;
+ break;
+ case 5:
+ usb_rx_cfg.sample_rate = SAMPLING_RATE_44P1KHZ;
+ break;
+ case 4:
+ usb_rx_cfg.sample_rate = SAMPLING_RATE_32KHZ;
+ break;
+ case 3:
+ usb_rx_cfg.sample_rate = SAMPLING_RATE_22P05KHZ;
+ break;
+ case 2:
+ usb_rx_cfg.sample_rate = SAMPLING_RATE_16KHZ;
+ break;
+ case 1:
+ usb_rx_cfg.sample_rate = SAMPLING_RATE_11P025KHZ;
+ break;
+ case 0:
+ usb_rx_cfg.sample_rate = SAMPLING_RATE_8KHZ;
+ break;
+ default:
+ usb_rx_cfg.sample_rate = SAMPLING_RATE_48KHZ;
+ break;
+ }
+
+ pr_debug("%s: control value = %ld, usb_audio_rx_sample_rate = %d\n",
+ __func__, ucontrol->value.integer.value[0],
+ usb_rx_cfg.sample_rate);
+ return 0;
+}
+
+static int usb_audio_rx_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (usb_rx_cfg.bit_format) {
+ case SNDRV_PCM_FORMAT_S24_3LE:
+ ucontrol->value.integer.value[0] = 2;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ ucontrol->value.integer.value[0] = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S16_LE:
+ default:
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ }
+
+ pr_debug("%s: usb_audio_rx_format = %d, ucontrol value = %ld\n",
+ __func__, usb_rx_cfg.bit_format,
+ ucontrol->value.integer.value[0]);
+ return 0;
+}
+
+static int usb_audio_rx_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int rc = 0;
+
+ switch (ucontrol->value.integer.value[0]) {
+ case 2:
+ usb_rx_cfg.bit_format = SNDRV_PCM_FORMAT_S24_3LE;
+ break;
+ case 1:
+ usb_rx_cfg.bit_format = SNDRV_PCM_FORMAT_S24_LE;
+ break;
+ case 0:
+ default:
+ usb_rx_cfg.bit_format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ }
+ pr_debug("%s: usb_audio_rx_format = %d, ucontrol value = %ld\n",
+ __func__, usb_rx_cfg.bit_format,
+ ucontrol->value.integer.value[0]);
+
+ return rc;
+}
+
+static int usb_audio_tx_ch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ pr_debug("%s: usb_audio_tx_ch = %d\n", __func__,
+ usb_tx_cfg.channels);
+ ucontrol->value.integer.value[0] = usb_tx_cfg.channels - 1;
+ return 0;
+}
+
+static int usb_audio_tx_ch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ usb_tx_cfg.channels = ucontrol->value.integer.value[0] + 1;
+
+ pr_debug("%s: usb_audio_tx_ch = %d\n", __func__, usb_tx_cfg.channels);
+ return 1;
+}
+
+static int usb_audio_tx_sample_rate_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int sample_rate_val = 0;
+
+ switch (usb_tx_cfg.sample_rate) {
+ case SAMPLING_RATE_192KHZ:
+ sample_rate_val = 10;
+ break;
+ case SAMPLING_RATE_176P4KHZ:
+ sample_rate_val = 9;
+ break;
+ case SAMPLING_RATE_96KHZ:
+ sample_rate_val = 8;
+ break;
+ case SAMPLING_RATE_88P2KHZ:
+ sample_rate_val = 7;
+ break;
+ case SAMPLING_RATE_48KHZ:
+ sample_rate_val = 6;
+ break;
+ case SAMPLING_RATE_44P1KHZ:
+ sample_rate_val = 5;
+ break;
+ case SAMPLING_RATE_32KHZ:
+ sample_rate_val = 4;
+ break;
+ case SAMPLING_RATE_22P05KHZ:
+ sample_rate_val = 3;
+ break;
+ case SAMPLING_RATE_16KHZ:
+ sample_rate_val = 2;
+ break;
+ case SAMPLING_RATE_11P025KHZ:
+ sample_rate_val = 1;
+ break;
+ case SAMPLING_RATE_8KHZ:
+ sample_rate_val = 0;
+ break;
+ default:
+ sample_rate_val = 6;
+ break;
+ }
+
+ ucontrol->value.integer.value[0] = sample_rate_val;
+ pr_debug("%s: usb_audio_tx_sample_rate = %d\n", __func__,
+ usb_tx_cfg.sample_rate);
+ return 0;
+}
+
+static int usb_audio_tx_sample_rate_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 10:
+ usb_tx_cfg.sample_rate = SAMPLING_RATE_192KHZ;
+ break;
+ case 9:
+ usb_tx_cfg.sample_rate = SAMPLING_RATE_176P4KHZ;
+ break;
+ case 8:
+ usb_tx_cfg.sample_rate = SAMPLING_RATE_96KHZ;
+ break;
+ case 7:
+ usb_tx_cfg.sample_rate = SAMPLING_RATE_88P2KHZ;
+ break;
+ case 6:
+ usb_tx_cfg.sample_rate = SAMPLING_RATE_48KHZ;
+ break;
+ case 5:
+ usb_tx_cfg.sample_rate = SAMPLING_RATE_44P1KHZ;
+ break;
+ case 4:
+ usb_tx_cfg.sample_rate = SAMPLING_RATE_32KHZ;
+ break;
+ case 3:
+ usb_tx_cfg.sample_rate = SAMPLING_RATE_22P05KHZ;
+ break;
+ case 2:
+ usb_tx_cfg.sample_rate = SAMPLING_RATE_16KHZ;
+ break;
+ case 1:
+ usb_tx_cfg.sample_rate = SAMPLING_RATE_11P025KHZ;
+ break;
+ case 0:
+ usb_tx_cfg.sample_rate = SAMPLING_RATE_8KHZ;
+ break;
+ default:
+ usb_tx_cfg.sample_rate = SAMPLING_RATE_48KHZ;
+ break;
+ }
+
+ pr_debug("%s: control value = %ld, usb_audio_tx_sample_rate = %d\n",
+ __func__, ucontrol->value.integer.value[0],
+ usb_tx_cfg.sample_rate);
+ return 0;
+}
+
+static int usb_audio_tx_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (usb_tx_cfg.bit_format) {
+ case SNDRV_PCM_FORMAT_S24_3LE:
+ ucontrol->value.integer.value[0] = 2;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ ucontrol->value.integer.value[0] = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S16_LE:
+ default:
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ }
+
+ pr_debug("%s: usb_audio_tx_format = %d, ucontrol value = %ld\n",
+ __func__, usb_tx_cfg.bit_format,
+ ucontrol->value.integer.value[0]);
+ return 0;
+}
+
+static int usb_audio_tx_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int rc = 0;
+
+ switch (ucontrol->value.integer.value[0]) {
+ case 2:
+ usb_tx_cfg.bit_format = SNDRV_PCM_FORMAT_S24_3LE;
+ break;
+ case 1:
+ usb_tx_cfg.bit_format = SNDRV_PCM_FORMAT_S24_LE;
+ break;
+ case 0:
+ default:
+ usb_tx_cfg.bit_format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ }
+ pr_debug("%s: usb_audio_tx_format = %d, ucontrol value = %ld\n",
+ __func__, usb_tx_cfg.bit_format,
+ ucontrol->value.integer.value[0]);
+
+ return rc;
+}
+
static int msm8996_auxpcm_rate_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
@@ -1296,6 +1909,2179 @@ static int msm_proxy_rx_ch_put(struct snd_kcontrol *kcontrol,
return 1;
}
+static int msm_pri_tdm_tx_0_ch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ pr_debug("%s: msm_pri_tdm_tx_0_ch = %d\n", __func__,
+ msm_pri_tdm_tx_0_ch);
+ ucontrol->value.integer.value[0] = msm_pri_tdm_tx_0_ch - 1;
+ return 0;
+}
+
+static int msm_pri_tdm_tx_0_ch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ msm_pri_tdm_tx_0_ch = ucontrol->value.integer.value[0] + 1;
+ pr_debug("%s: msm_pri_tdm_tx_0_ch = %d\n", __func__,
+ msm_pri_tdm_tx_0_ch);
+ return 0;
+}
+
+static int msm_pri_tdm_tx_1_ch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ pr_debug("%s: pri_tdm_tx_1_ch = %d\n", __func__,
+ msm_pri_tdm_tx_1_ch);
+ ucontrol->value.integer.value[0] = msm_pri_tdm_tx_1_ch - 1;
+ return 0;
+}
+
+static int msm_pri_tdm_tx_1_ch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ msm_pri_tdm_tx_1_ch = ucontrol->value.integer.value[0] + 1;
+ pr_debug("%s: msm_pri_tdm_tx_1_ch = %d\n", __func__,
+ msm_pri_tdm_tx_1_ch);
+ return 0;
+}
+
+static int msm_pri_tdm_tx_2_ch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ pr_debug("%s: msm_pri_tdm_tx_2_ch = %d\n", __func__,
+ msm_pri_tdm_tx_2_ch);
+ ucontrol->value.integer.value[0] = msm_pri_tdm_tx_2_ch - 1;
+ return 0;
+}
+
+static int msm_pri_tdm_tx_2_ch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ msm_pri_tdm_tx_2_ch = ucontrol->value.integer.value[0] + 1;
+ pr_debug("%s: msm_pri_tdm_tx_2_ch = %d\n", __func__,
+ msm_pri_tdm_tx_2_ch);
+ return 0;
+}
+static int msm_pri_tdm_tx_3_ch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ pr_debug("%s: msm_pri_tdm_tx_3_ch = %d\n", __func__,
+ msm_pri_tdm_tx_3_ch);
+ ucontrol->value.integer.value[0] = msm_pri_tdm_tx_3_ch - 1;
+ return 0;
+}
+
+static int msm_pri_tdm_tx_3_ch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ msm_pri_tdm_tx_3_ch = ucontrol->value.integer.value[0] + 1;
+ pr_debug("%s: msm_pri_tdm_tx_3_ch = %d\n", __func__,
+ msm_pri_tdm_tx_3_ch);
+ return 0;
+}
+
+static int msm_pri_tdm_rx_0_ch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ pr_debug("%s: msm_pri_tdm_rx_0_ch = %d\n", __func__,
+ msm_pri_tdm_rx_0_ch);
+ ucontrol->value.integer.value[0] = msm_pri_tdm_rx_0_ch - 1;
+ return 0;
+}
+
+static int msm_pri_tdm_rx_0_ch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ msm_pri_tdm_rx_0_ch = ucontrol->value.integer.value[0] + 1;
+ pr_debug("%s: msm_pri_tdm_rx_0_ch = %d\n", __func__,
+ msm_pri_tdm_rx_0_ch);
+ return 0;
+}
+
+static int msm_pri_tdm_rx_1_ch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ pr_debug("%s: msm_pri_tdm_rx_1_ch = %d\n", __func__,
+ msm_pri_tdm_rx_1_ch);
+ ucontrol->value.integer.value[0] = msm_pri_tdm_rx_1_ch - 1;
+ return 0;
+}
+
+static int msm_pri_tdm_rx_1_ch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ msm_pri_tdm_rx_1_ch = ucontrol->value.integer.value[0] + 1;
+ pr_debug("%s: msm_pri_tdm_rx_1_ch = %d\n", __func__,
+ msm_pri_tdm_rx_1_ch);
+ return 0;
+}
+
+static int msm_pri_tdm_rx_2_ch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ pr_debug("%s: msm_pri_tdm_rx_2_ch = %d\n", __func__,
+ msm_pri_tdm_rx_2_ch);
+ ucontrol->value.integer.value[0] = msm_pri_tdm_rx_2_ch - 1;
+ return 0;
+}
+
+static int msm_pri_tdm_rx_2_ch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ msm_pri_tdm_rx_2_ch = ucontrol->value.integer.value[0] + 1;
+ pr_debug("%s: msm_pri_tdm_rx_2_ch = %d\n", __func__,
+ msm_pri_tdm_rx_2_ch);
+ return 0;
+}
+
+static int msm_pri_tdm_rx_3_ch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ pr_debug("%s: msm_pri_tdm_rx_3_ch = %d\n", __func__,
+ msm_pri_tdm_rx_3_ch);
+ ucontrol->value.integer.value[0] = msm_pri_tdm_rx_3_ch - 1;
+ return 0;
+}
+
+static int msm_pri_tdm_rx_3_ch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ msm_pri_tdm_rx_3_ch = ucontrol->value.integer.value[0] + 1;
+ pr_debug("%s: msm_pri_tdm_rx_3_ch = %d\n", __func__,
+ msm_pri_tdm_rx_3_ch);
+ return 0;
+}
+
+static int msm_pri_tdm_rate_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ ucontrol->value.integer.value[0] = msm_pri_tdm_rate;
+ pr_debug("%s: msm_pri_tdm_rate = %d\n", __func__, msm_pri_tdm_rate);
+ return 0;
+}
+static int msm_pri_tdm_rate_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 0:
+ msm_pri_tdm_rate = SAMPLING_RATE_8KHZ;
+ break;
+ case 1:
+ msm_pri_tdm_rate = SAMPLING_RATE_16KHZ;
+ break;
+ case 2:
+ default:
+ msm_pri_tdm_rate = SAMPLING_RATE_48KHZ;
+ break;
+ }
+ pr_debug("%s: msm_pri_tdm_rate = %d\n",
+ __func__, msm_pri_tdm_rate);
+ return 0;
+}
+
+static int msm_sec_tdm_rate_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ ucontrol->value.integer.value[0] = msm_sec_tdm_rate;
+ pr_debug("%s: msm_sec_tdm_rate = %d\n", __func__, msm_sec_tdm_rate);
+ return 0;
+}
+
+static int msm_sec_tdm_rate_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 0:
+ msm_sec_tdm_rate = SAMPLING_RATE_8KHZ;
+ break;
+ case 1:
+ msm_sec_tdm_rate = SAMPLING_RATE_16KHZ;
+ break;
+ case 2:
+ default:
+ msm_sec_tdm_rate = SAMPLING_RATE_48KHZ;
+ break;
+ }
+ pr_debug("%s: msm_sec_tdm_rate = %d\n",
+ __func__, msm_sec_tdm_rate);
+ return 0;
+}
+
+static int msm_tert_tdm_rate_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ ucontrol->value.integer.value[0] = msm_tert_tdm_rate;
+ pr_debug("%s: msm_tert_tdm_rate = %d\n", __func__, msm_tert_tdm_rate);
+ return 0;
+}
+
+static int msm_tert_tdm_rate_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 0:
+ msm_tert_tdm_rate = SAMPLING_RATE_8KHZ;
+ break;
+ case 1:
+ msm_tert_tdm_rate = SAMPLING_RATE_16KHZ;
+ break;
+ case 2:
+ default:
+ msm_tert_tdm_rate = SAMPLING_RATE_48KHZ;
+ break;
+ }
+ pr_debug("%s: msm_tert_tdm_rate = %d\n",
+ __func__, msm_tert_tdm_rate);
+ return 0;
+}
+
+static int msm_quat_tdm_rate_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ ucontrol->value.integer.value[0] = msm_quat_tdm_rate;
+ pr_debug("%s: msm_quat_tdm_rate = %d\n", __func__, msm_quat_tdm_rate);
+ return 0;
+}
+static int msm_quat_tdm_rate_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 0:
+ msm_quat_tdm_rate = SAMPLING_RATE_8KHZ;
+ break;
+ case 1:
+ msm_quat_tdm_rate = SAMPLING_RATE_16KHZ;
+ break;
+ case 2:
+ default:
+ msm_quat_tdm_rate = SAMPLING_RATE_48KHZ;
+ break;
+ }
+ pr_debug("%s: msm_quat_tdm_rate = %d\n",
+ __func__, msm_quat_tdm_rate);
+ return 0;
+}
+
+static int msm_pri_tdm_slot_width_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ ucontrol->value.integer.value[0] = msm_pri_tdm_slot_width;
+ pr_debug("%s: msm_pri_tdm_slot_width = %d\n",
+ __func__, msm_pri_tdm_slot_width);
+ return 0;
+}
+
+static int msm_pri_tdm_slot_width_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 0:
+ msm_pri_tdm_slot_width = 16;
+ break;
+ case 1:
+ msm_pri_tdm_slot_width = 24;
+ break;
+ case 2:
+ default:
+ msm_pri_tdm_slot_width = 32;
+ break;
+ }
+ pr_debug("%s: msm_pri_tdm_slot_width= %d\n",
+ __func__, msm_pri_tdm_slot_width);
+ return 0;
+}
+
+static int msm_sec_tdm_slot_width_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ ucontrol->value.integer.value[0] = msm_sec_tdm_slot_width;
+ pr_debug("%s: msm_sec_tdm_slot_width = %d\n",
+ __func__, msm_sec_tdm_slot_width);
+ return 0;
+}
+
+static int msm_sec_tdm_slot_width_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 0:
+ msm_sec_tdm_slot_width = 16;
+ break;
+ case 1:
+ msm_sec_tdm_slot_width = 24;
+ break;
+ case 2:
+ default:
+ msm_sec_tdm_slot_width = 32;
+ break;
+ }
+ pr_debug("%s: msm_sec_tdm_slot_width= %d\n",
+ __func__, msm_sec_tdm_slot_width);
+ return 0;
+}
+
+static int msm_tert_tdm_slot_width_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ ucontrol->value.integer.value[0] = msm_tert_tdm_slot_width;
+ pr_debug("%s: msm_tert_tdm_slot_width = %d\n",
+ __func__, msm_tert_tdm_slot_width);
+ return 0;
+}
+
+static int msm_tert_tdm_slot_width_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 0:
+ msm_tert_tdm_slot_width = 16;
+ break;
+ case 1:
+ msm_tert_tdm_slot_width = 24;
+ break;
+ case 2:
+ default:
+ msm_tert_tdm_slot_width = 32;
+ break;
+ }
+ pr_debug("%s: msm_tert_tdm_slot_width= %d\n",
+ __func__, msm_tert_tdm_slot_width);
+ return 0;
+}
+
+static int msm_quat_tdm_slot_width_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ ucontrol->value.integer.value[0] = msm_quat_tdm_slot_width;
+ pr_debug("%s: msm_quat_tdm_slot_width = %d\n",
+ __func__, msm_quat_tdm_slot_width);
+ return 0;
+}
+
+static int msm_quat_tdm_slot_width_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 0:
+ msm_quat_tdm_slot_width = 16;
+ break;
+ case 1:
+ msm_quat_tdm_slot_width = 24;
+ break;
+ case 2:
+ default:
+ msm_quat_tdm_slot_width = 32;
+ break;
+ }
+ pr_debug("%s: msm_quat_tdm_slot_width= %d\n",
+ __func__, msm_quat_tdm_slot_width);
+ return 0;
+}
+
+
+static int msm_pri_tdm_slot_num_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (msm_pri_tdm_slot_num) {
+ case 1:
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ case 2:
+ ucontrol->value.integer.value[0] = 1;
+ break;
+ case 4:
+ ucontrol->value.integer.value[0] = 2;
+ break;
+ case 8:
+ ucontrol->value.integer.value[0] = 3;
+ break;
+ case 16:
+ ucontrol->value.integer.value[0] = 4;
+ break;
+ case 32:
+ default:
+ ucontrol->value.integer.value[0] = 5;
+ break;
+ }
+
+ pr_debug("%s: msm_pri_tdm_slot_num = %d\n",
+ __func__, msm_pri_tdm_slot_num);
+ return 0;
+}
+
+static int msm_pri_tdm_slot_num_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 0:
+ msm_pri_tdm_slot_num = 1;
+ break;
+ case 1:
+ msm_pri_tdm_slot_num = 2;
+ break;
+ case 2:
+ msm_pri_tdm_slot_num = 4;
+ break;
+ case 3:
+ msm_pri_tdm_slot_num = 8;
+ break;
+ case 4:
+ msm_pri_tdm_slot_num = 16;
+ break;
+ case 5:
+ msm_pri_tdm_slot_num = 32;
+ break;
+ default:
+ msm_pri_tdm_slot_num = 8;
+ break;
+ }
+ pr_debug("%s: msm_pri_tdm_slot_num = %d\n",
+ __func__, msm_pri_tdm_slot_num);
+ return 0;
+}
+
+static int msm_sec_tdm_slot_num_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (msm_sec_tdm_slot_num) {
+ case 1:
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ case 2:
+ ucontrol->value.integer.value[0] = 1;
+ break;
+ case 4:
+ ucontrol->value.integer.value[0] = 2;
+ break;
+ case 8:
+ ucontrol->value.integer.value[0] = 3;
+ break;
+ case 16:
+ ucontrol->value.integer.value[0] = 4;
+ break;
+ case 32:
+ default:
+ ucontrol->value.integer.value[0] = 5;
+ break;
+ }
+
+ pr_debug("%s: msm_sec_tdm_slot_num = %d\n",
+ __func__, msm_sec_tdm_slot_num);
+ return 0;
+}
+
+static int msm_sec_tdm_slot_num_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 0:
+ msm_sec_tdm_slot_num = 1;
+ break;
+ case 1:
+ msm_sec_tdm_slot_num = 2;
+ break;
+ case 2:
+ msm_sec_tdm_slot_num = 4;
+ break;
+ case 3:
+ msm_sec_tdm_slot_num = 8;
+ break;
+ case 4:
+ msm_sec_tdm_slot_num = 16;
+ break;
+ case 5:
+ msm_sec_tdm_slot_num = 32;
+ break;
+ default:
+ msm_sec_tdm_slot_num = 8;
+ break;
+ }
+ pr_debug("%s: msm_sec_tdm_slot_num = %d\n",
+ __func__, msm_sec_tdm_slot_num);
+ return 0;
+}
+
+static int msm_tert_tdm_slot_num_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (msm_tert_tdm_slot_num) {
+ case 1:
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ case 2:
+ ucontrol->value.integer.value[0] = 1;
+ break;
+ case 4:
+ ucontrol->value.integer.value[0] = 2;
+ break;
+ case 8:
+ ucontrol->value.integer.value[0] = 3;
+ break;
+ case 16:
+ ucontrol->value.integer.value[0] = 4;
+ break;
+ case 32:
+ default:
+ ucontrol->value.integer.value[0] = 5;
+ break;
+ }
+
+ pr_debug("%s: msm_tert_tdm_slot_num = %d\n",
+ __func__, msm_tert_tdm_slot_num);
+ return 0;
+}
+
+static int msm_tert_tdm_slot_num_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 0:
+ msm_tert_tdm_slot_num = 1;
+ break;
+ case 1:
+ msm_tert_tdm_slot_num = 2;
+ break;
+ case 2:
+ msm_tert_tdm_slot_num = 4;
+ break;
+ case 3:
+ msm_tert_tdm_slot_num = 8;
+ break;
+ case 4:
+ msm_tert_tdm_slot_num = 16;
+ break;
+ case 5:
+ msm_tert_tdm_slot_num = 32;
+ break;
+ default:
+ msm_tert_tdm_slot_num = 8;
+ break;
+ }
+ pr_debug("%s: msm_tert_tdm_slot_num = %d\n",
+ __func__, msm_tert_tdm_slot_num);
+ return 0;
+}
+
+static int msm_quat_tdm_slot_num_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (msm_quat_tdm_slot_num) {
+ case 1:
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ case 2:
+ ucontrol->value.integer.value[0] = 1;
+ break;
+ case 4:
+ ucontrol->value.integer.value[0] = 2;
+ break;
+ case 8:
+ ucontrol->value.integer.value[0] = 3;
+ break;
+ case 16:
+ ucontrol->value.integer.value[0] = 4;
+ break;
+ case 32:
+ default:
+ ucontrol->value.integer.value[0] = 5;
+ break;
+ }
+
+ pr_debug("%s: msm_quat_tdm_slot_num = %d\n",
+ __func__, msm_quat_tdm_slot_num);
+ return 0;
+}
+static int msm_quat_tdm_slot_num_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 0:
+ msm_quat_tdm_slot_num = 1;
+ break;
+ case 1:
+ msm_quat_tdm_slot_num = 2;
+ break;
+ case 2:
+ msm_quat_tdm_slot_num = 4;
+ break;
+ case 3:
+ msm_quat_tdm_slot_num = 8;
+ break;
+ case 4:
+ msm_quat_tdm_slot_num = 16;
+ break;
+ case 5:
+ msm_quat_tdm_slot_num = 32;
+ break;
+ default:
+ msm_quat_tdm_slot_num = 8;
+ break;
+ }
+ pr_debug("%s: msm_quat_tdm_slot_num = %d\n",
+ __func__, msm_quat_tdm_slot_num);
+ return 0;
+}
+
+static int msm_tdm_slot_mapping_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_multi_mixer_control *mc =
+ (struct soc_multi_mixer_control *)kcontrol->private_value;
+ unsigned int *slot_offset;
+ int i;
+
+ if (mc->shift >= TDM_MAX) {
+ pr_err("%s invalid port index %d\n", __func__, mc->shift);
+ return -EINVAL;
+ }
+
+ slot_offset = tdm_slot_offset[mc->shift];
+ for (i = 0; i < TDM_SLOT_OFFSET_MAX; i++) {
+ ucontrol->value.integer.value[i] = slot_offset[i];
+ pr_debug("%s port index %d offset %d value %d\n",
+ __func__, mc->shift, i, slot_offset[i]);
+ }
+
+ return 0;
+}
+
+static int msm_tdm_slot_mapping_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_multi_mixer_control *mc =
+ (struct soc_multi_mixer_control *)kcontrol->private_value;
+ unsigned int *slot_offset;
+ int i;
+
+ if (mc->shift >= TDM_MAX) {
+ pr_err("%s invalid port index %d\n", __func__, mc->shift);
+ return -EINVAL;
+ }
+
+ slot_offset = tdm_slot_offset[mc->shift];
+
+ for (i = 0; i < TDM_SLOT_OFFSET_MAX; i++) {
+ slot_offset[i] = ucontrol->value.integer.value[i];
+ pr_debug("%s port index %d offset %d value %d\n",
+ __func__, mc->shift, i, slot_offset[i]);
+ }
+
+ return 0;
+}
+
+static int msm_sec_tdm_rx_0_ch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ pr_debug("%s: msm_sec_tdm_rx_0_ch = %d\n", __func__,
+ msm_sec_tdm_rx_0_ch);
+ ucontrol->value.integer.value[0] = msm_sec_tdm_rx_0_ch - 1;
+ return 0;
+}
+
+static int msm_sec_tdm_rx_0_ch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ msm_sec_tdm_rx_0_ch = ucontrol->value.integer.value[0] + 1;
+ pr_debug("%s: msm_sec_tdm_rx_0_ch = %d\n", __func__,
+ msm_sec_tdm_rx_0_ch);
+ return 0;
+}
+
+static int msm_sec_tdm_rx_1_ch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ pr_debug("%s: msm_sec_tdm_rx_1_ch = %d\n", __func__,
+ msm_sec_tdm_rx_1_ch);
+ ucontrol->value.integer.value[0] = msm_sec_tdm_rx_1_ch - 1;
+ return 0;
+}
+
+static int msm_sec_tdm_rx_1_ch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ msm_sec_tdm_rx_1_ch = ucontrol->value.integer.value[0] + 1;
+ pr_debug("%s: msm_sec_tdm_rx_1_ch = %d\n", __func__,
+ msm_sec_tdm_rx_1_ch);
+ return 0;
+}
+
+static int msm_sec_tdm_rx_2_ch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ pr_debug("%s: msm_sec_tdm_rx_2_ch = %d\n", __func__,
+ msm_sec_tdm_rx_2_ch);
+ ucontrol->value.integer.value[0] = msm_sec_tdm_rx_2_ch - 1;
+ return 0;
+}
+
+static int msm_sec_tdm_rx_2_ch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ msm_sec_tdm_rx_2_ch = ucontrol->value.integer.value[0] + 1;
+ pr_debug("%s: msm_sec_tdm_rx_2_ch = %d\n", __func__,
+ msm_sec_tdm_rx_2_ch);
+ return 0;
+}
+
+static int msm_sec_tdm_rx_3_ch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ pr_debug("%s: msm_sec_tdm_rx_3_ch = %d\n", __func__,
+ msm_sec_tdm_rx_3_ch);
+ ucontrol->value.integer.value[0] = msm_sec_tdm_rx_3_ch - 1;
+ return 0;
+}
+
+static int msm_sec_tdm_rx_3_ch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ msm_sec_tdm_rx_3_ch = ucontrol->value.integer.value[0] + 1;
+ pr_debug("%s: msm_sec_tdm_rx_3_ch = %d\n", __func__,
+ msm_sec_tdm_rx_3_ch);
+ return 0;
+}
+
+static int msm_sec_tdm_tx_0_ch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ pr_debug("%s: msm_sec_tdm_tx_0_ch = %d\n", __func__,
+ msm_sec_tdm_tx_0_ch);
+ ucontrol->value.integer.value[0] = msm_sec_tdm_tx_0_ch - 1;
+ return 0;
+}
+
+static int msm_sec_tdm_tx_0_ch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ msm_sec_tdm_tx_0_ch = ucontrol->value.integer.value[0] + 1;
+ pr_debug("%s: msm_sec_tdm_tx_0_ch = %d\n", __func__,
+ msm_sec_tdm_tx_0_ch);
+ return 0;
+}
+
+static int msm_sec_tdm_tx_1_ch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ pr_debug("%s: msm_sec_tdm_tx_1_ch = %d\n", __func__,
+ msm_sec_tdm_tx_1_ch);
+ ucontrol->value.integer.value[0] = msm_sec_tdm_tx_1_ch - 1;
+ return 0;
+}
+
+static int msm_sec_tdm_tx_1_ch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ msm_sec_tdm_tx_1_ch = ucontrol->value.integer.value[0] + 1;
+ pr_debug("%s: msm_sec_tdm_tx_1_ch = %d\n", __func__,
+ msm_sec_tdm_tx_1_ch);
+ return 0;
+}
+static int msm_sec_tdm_tx_2_ch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ pr_debug("%s: msm_sec_tdm_tx_2_ch = %d\n", __func__,
+ msm_sec_tdm_tx_2_ch);
+ ucontrol->value.integer.value[0] = msm_sec_tdm_tx_2_ch - 1;
+ return 0;
+}
+
+static int msm_sec_tdm_tx_2_ch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ msm_sec_tdm_tx_2_ch = ucontrol->value.integer.value[0] + 1;
+ pr_debug("%s: msm_sec_tdm_tx_2_ch = %d\n", __func__,
+ msm_sec_tdm_tx_2_ch);
+ return 0;
+}
+
+static int msm_sec_tdm_tx_3_ch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ pr_debug("%s: msm_sec_tdm_tx_3_ch = %d\n", __func__,
+ msm_sec_tdm_tx_3_ch);
+ ucontrol->value.integer.value[0] = msm_sec_tdm_tx_3_ch - 1;
+ return 0;
+}
+
+static int msm_sec_tdm_tx_3_ch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ msm_sec_tdm_tx_3_ch = ucontrol->value.integer.value[0] + 1;
+ pr_debug("%s: msm_sec_tdm_tx_3_ch = %d\n", __func__,
+ msm_sec_tdm_tx_3_ch);
+ return 0;
+}
+
+static int msm_tert_tdm_rx_0_ch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ pr_debug("%s: msm_tert_tdm_rx_0_ch = %d\n", __func__,
+ msm_tert_tdm_rx_0_ch);
+ ucontrol->value.integer.value[0] = msm_tert_tdm_rx_0_ch - 1;
+ return 0;
+}
+
+static int msm_tert_tdm_rx_0_ch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ msm_tert_tdm_rx_0_ch = ucontrol->value.integer.value[0] + 1;
+ pr_debug("%s: msm_tert_tdm_rx_0_ch = %d\n", __func__,
+ msm_tert_tdm_rx_0_ch);
+ return 0;
+}
+static int msm_tert_tdm_rx_1_ch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ pr_debug("%s: msm_tert_tdm_rx_1_ch = %d\n", __func__,
+ msm_tert_tdm_rx_1_ch);
+ ucontrol->value.integer.value[0] = msm_tert_tdm_rx_1_ch - 1;
+ return 0;
+}
+
+static int msm_tert_tdm_rx_1_ch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ msm_tert_tdm_rx_1_ch = ucontrol->value.integer.value[0] + 1;
+ pr_debug("%s: msm_tert_tdm_rx_1_ch = %d\n", __func__,
+ msm_tert_tdm_rx_1_ch);
+ return 0;
+}
+
+static int msm_tert_tdm_rx_2_ch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ pr_debug("%s: msm_tert_tdm_rx_2_ch = %d\n", __func__,
+ msm_tert_tdm_rx_2_ch);
+ ucontrol->value.integer.value[0] = msm_tert_tdm_rx_2_ch - 1;
+ return 0;
+}
+
+static int msm_tert_tdm_rx_2_ch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ msm_tert_tdm_rx_2_ch = ucontrol->value.integer.value[0] + 1;
+ pr_debug("%s: msm_tert_tdm_rx_2_ch = %d\n", __func__,
+ msm_tert_tdm_rx_2_ch);
+ return 0;
+}
+
+static int msm_tert_tdm_rx_3_ch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ pr_debug("%s: msm_tert_tdm_rx_3_ch = %d\n", __func__,
+ msm_tert_tdm_rx_3_ch);
+ ucontrol->value.integer.value[0] = msm_tert_tdm_rx_3_ch - 1;
+ return 0;
+}
+
+static int msm_tert_tdm_rx_3_ch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ msm_tert_tdm_rx_3_ch = ucontrol->value.integer.value[0] + 1;
+ pr_debug("%s: msm_tert_tdm_rx_3_ch = %d\n", __func__,
+ msm_tert_tdm_rx_3_ch);
+ return 0;
+}
+
+static int msm_tert_tdm_tx_0_ch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ pr_debug("%s: msm_tert_tdm_tx_0_ch = %d\n", __func__,
+ msm_tert_tdm_tx_0_ch);
+ ucontrol->value.integer.value[0] = msm_tert_tdm_tx_0_ch - 1;
+ return 0;
+}
+
+static int msm_tert_tdm_tx_0_ch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ msm_tert_tdm_tx_0_ch = ucontrol->value.integer.value[0] + 1;
+ pr_debug("%s: msm_tert_tdm_tx_0_ch = %d\n", __func__,
+ msm_tert_tdm_tx_0_ch);
+ return 0;
+}
+
+static int msm_tert_tdm_tx_1_ch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ pr_debug("%s: msm_tert_tdm_tx_1_ch = %d\n", __func__,
+ msm_tert_tdm_tx_1_ch);
+ ucontrol->value.integer.value[0] = msm_tert_tdm_tx_1_ch - 1;
+ return 0;
+}
+
+static int msm_tert_tdm_tx_1_ch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ msm_tert_tdm_tx_1_ch = ucontrol->value.integer.value[0] + 1;
+ pr_debug("%s: msm_tert_tdm_tx_1_ch = %d\n", __func__,
+ msm_tert_tdm_tx_1_ch);
+ return 0;
+}
+
+static int msm_tert_tdm_tx_2_ch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ pr_debug("%s: msm_tert_tdm_tx_2_ch = %d\n", __func__,
+ msm_tert_tdm_tx_2_ch);
+ ucontrol->value.integer.value[0] = msm_tert_tdm_tx_2_ch - 1;
+ return 0;
+}
+
+static int msm_tert_tdm_tx_2_ch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ msm_tert_tdm_tx_2_ch = ucontrol->value.integer.value[0] + 1;
+ pr_debug("%s: msm_tert_tdm_tx_2_ch = %d\n", __func__,
+ msm_tert_tdm_tx_2_ch);
+ return 0;
+}
+
+static int msm_tert_tdm_tx_3_ch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ pr_debug("%s: msm_tert_tdm_tx_3_ch = %d\n", __func__,
+ msm_tert_tdm_tx_3_ch);
+ ucontrol->value.integer.value[0] = msm_tert_tdm_tx_3_ch - 1;
+ return 0;
+}
+
+static int msm_tert_tdm_tx_3_ch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ msm_tert_tdm_tx_3_ch = ucontrol->value.integer.value[0] + 1;
+ pr_debug("%s: msm_tert_tdm_tx_3_ch = %d\n", __func__,
+ msm_tert_tdm_tx_3_ch);
+ return 0;
+}
+
+static int msm_quat_tdm_rx_0_ch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ pr_debug("%s: msm_quat_tdm_rx_0_ch = %d\n", __func__,
+ msm_quat_tdm_rx_0_ch);
+ ucontrol->value.integer.value[0] = msm_quat_tdm_rx_0_ch - 1;
+ return 0;
+}
+
+static int msm_quat_tdm_rx_0_ch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ msm_quat_tdm_rx_0_ch = ucontrol->value.integer.value[0] + 1;
+ pr_debug("%s: msm_quat_tdm_rx_0_ch = %d\n", __func__,
+ msm_quat_tdm_rx_0_ch);
+ return 0;
+}
+
+static int msm_quat_tdm_rx_1_ch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ pr_debug("%s: msm_quat_tdm_rx_1_ch = %d\n", __func__,
+ msm_quat_tdm_rx_1_ch);
+ ucontrol->value.integer.value[0] = msm_quat_tdm_rx_1_ch - 1;
+ return 0;
+}
+
+static int msm_quat_tdm_rx_1_ch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ msm_quat_tdm_rx_1_ch = ucontrol->value.integer.value[0] + 1;
+ pr_debug("%s: msm_quat_tdm_rx_1_ch = %d\n", __func__,
+ msm_quat_tdm_rx_1_ch);
+ return 0;
+}
+
+static int msm_quat_tdm_rx_2_ch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ pr_debug("%s: msm_quat_tdm_rx_2_ch = %d\n", __func__,
+ msm_quat_tdm_rx_2_ch);
+ ucontrol->value.integer.value[0] = msm_quat_tdm_rx_2_ch - 1;
+ return 0;
+}
+
+static int msm_quat_tdm_rx_2_ch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ msm_quat_tdm_rx_2_ch = ucontrol->value.integer.value[0] + 1;
+ pr_debug("%s: msm_quat_tdm_rx_2_ch = %d\n", __func__,
+ msm_quat_tdm_rx_2_ch);
+ return 0;
+}
+
+static int msm_quat_tdm_rx_3_ch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ pr_debug("%s: msm_quat_tdm_rx_3_ch = %d\n", __func__,
+ msm_quat_tdm_rx_3_ch);
+ ucontrol->value.integer.value[0] = msm_quat_tdm_rx_3_ch - 1;
+ return 0;
+}
+
+static int msm_quat_tdm_rx_3_ch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ msm_quat_tdm_rx_3_ch = ucontrol->value.integer.value[0] + 1;
+ pr_debug("%s: msm_quat_tdm_rx_3_ch = %d\n", __func__,
+ msm_quat_tdm_rx_3_ch);
+ return 0;
+}
+static int msm_quat_tdm_tx_0_ch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ pr_debug("%s: msm_quat_tdm_tx_0_ch = %d\n", __func__,
+ msm_quat_tdm_tx_0_ch);
+ ucontrol->value.integer.value[0] = msm_quat_tdm_tx_0_ch - 1;
+ return 0;
+}
+
+static int msm_quat_tdm_tx_0_ch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ msm_quat_tdm_tx_0_ch = ucontrol->value.integer.value[0] + 1;
+ pr_debug("%s: msm_quat_tdm_tx_0_ch = %d\n", __func__,
+ msm_quat_tdm_tx_0_ch);
+ return 0;
+}
+
+static int msm_quat_tdm_tx_1_ch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ pr_debug("%s: msm_quat_tdm_tx_1_ch = %d\n", __func__,
+ msm_quat_tdm_tx_1_ch);
+ ucontrol->value.integer.value[0] = msm_quat_tdm_tx_1_ch - 1;
+ return 0;
+}
+
+static int msm_quat_tdm_tx_1_ch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ msm_quat_tdm_tx_1_ch = ucontrol->value.integer.value[0] + 1;
+ pr_debug("%s: msm_quat_tdm_tx_1_ch = %d\n", __func__,
+ msm_quat_tdm_tx_1_ch);
+ return 0;
+}
+
+static int msm_quat_tdm_tx_2_ch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ pr_debug("%s: msm_quat_tdm_tx_2_ch = %d\n", __func__,
+ msm_quat_tdm_tx_2_ch);
+ ucontrol->value.integer.value[0] = msm_quat_tdm_tx_2_ch - 1;
+ return 0;
+}
+
+static int msm_quat_tdm_tx_2_ch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ msm_quat_tdm_tx_2_ch = ucontrol->value.integer.value[0] + 1;
+ pr_debug("%s: msm_quat_tdm_tx_2_ch = %d\n", __func__,
+ msm_quat_tdm_tx_2_ch);
+ return 0;
+}
+
+static int msm_quat_tdm_tx_3_ch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ pr_debug("%s: msm_quat_tdm_tx_3_ch = %d\n", __func__,
+ msm_quat_tdm_tx_3_ch);
+ ucontrol->value.integer.value[0] = msm_quat_tdm_tx_3_ch - 1;
+ return 0;
+}
+
+static int msm_quat_tdm_tx_3_ch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ msm_quat_tdm_tx_3_ch = ucontrol->value.integer.value[0] + 1;
+ pr_debug("%s: msm_quat_tdm_tx_3_ch = %d\n", __func__,
+ msm_quat_tdm_tx_3_ch);
+ return 0;
+}
+
+static int msm_pri_tdm_tx_0_bit_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (msm_pri_tdm_tx_0_bit_format) {
+ case SNDRV_PCM_FORMAT_S24_LE:
+ ucontrol->value.integer.value[0] = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S16_LE:
+ default:
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ }
+ pr_debug("%s: msm_pri_tdm_tx_0_bit_format = %ld\n",
+ __func__, ucontrol->value.integer.value[0]);
+ return 0;
+}
+
+static int msm_pri_tdm_tx_0_bit_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 1:
+ msm_pri_tdm_tx_0_bit_format = SNDRV_PCM_FORMAT_S24_LE;
+ break;
+ case 0:
+ default:
+ msm_pri_tdm_tx_0_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ }
+ pr_debug("%s: msm_pri_tdm_tx_0_bit_format = %d\n",
+ __func__, msm_pri_tdm_tx_0_bit_format);
+ return 0;
+}
+
+static int msm_pri_tdm_tx_1_bit_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (msm_pri_tdm_tx_1_bit_format) {
+ case SNDRV_PCM_FORMAT_S24_LE:
+ ucontrol->value.integer.value[0] = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S16_LE:
+ default:
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ }
+ pr_debug("%s: msm_pri_tdm_tx_1_bit_format = %ld\n",
+ __func__, ucontrol->value.integer.value[0]);
+ return 0;
+}
+
+static int msm_pri_tdm_tx_1_bit_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 1:
+ msm_pri_tdm_tx_1_bit_format = SNDRV_PCM_FORMAT_S24_LE;
+ break;
+ case 0:
+ default:
+ msm_pri_tdm_tx_1_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ }
+ pr_debug("%s: msm_pri_tdm_tx_1_bit_format = %d\n",
+ __func__, msm_pri_tdm_tx_1_bit_format);
+ return 0;
+}
+
+static int msm_pri_tdm_tx_2_bit_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (msm_pri_tdm_tx_2_bit_format) {
+ case SNDRV_PCM_FORMAT_S24_LE:
+ ucontrol->value.integer.value[0] = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S16_LE:
+ default:
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ }
+ pr_debug("%s: msm_pri_tdm_tx_2_bit_format = %ld\n",
+ __func__, ucontrol->value.integer.value[0]);
+ return 0;
+}
+
+static int msm_pri_tdm_tx_2_bit_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 1:
+ msm_pri_tdm_tx_2_bit_format = SNDRV_PCM_FORMAT_S24_LE;
+ break;
+ case 0:
+ default:
+ msm_pri_tdm_tx_2_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ }
+ pr_debug("%s: msm_pri_tdm_tx_2_bit_format = %d\n",
+ __func__, msm_pri_tdm_tx_2_bit_format);
+ return 0;
+}
+
+static int msm_pri_tdm_tx_3_bit_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (msm_pri_tdm_tx_3_bit_format) {
+ case SNDRV_PCM_FORMAT_S24_LE:
+ ucontrol->value.integer.value[0] = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S16_LE:
+ default:
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ }
+ pr_debug("%s: msm_pri_tdm_tx_3_bit_format = %ld\n",
+ __func__, ucontrol->value.integer.value[0]);
+ return 0;
+}
+
+static int msm_pri_tdm_tx_3_bit_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 1:
+ msm_pri_tdm_tx_3_bit_format = SNDRV_PCM_FORMAT_S24_LE;
+ break;
+ case 0:
+ default:
+ msm_pri_tdm_tx_3_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ }
+ pr_debug("%s: msm_pri_tdm_tx_3_bit_format = %d\n",
+ __func__, msm_pri_tdm_tx_3_bit_format);
+ return 0;
+}
+
+static int msm_pri_tdm_rx_0_bit_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (msm_pri_tdm_rx_0_bit_format) {
+ case SNDRV_PCM_FORMAT_S24_LE:
+ ucontrol->value.integer.value[0] = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S16_LE:
+ default:
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ }
+ pr_debug("%s: msm_pri_tdm_rx_0_bit_format = %ld\n",
+ __func__, ucontrol->value.integer.value[0]);
+ return 0;
+}
+
+static int msm_pri_tdm_rx_0_bit_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 1:
+ msm_pri_tdm_rx_0_bit_format = SNDRV_PCM_FORMAT_S24_LE;
+ break;
+ case 0:
+ default:
+ msm_pri_tdm_rx_0_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ }
+ pr_debug("%s: msm_pri_tdm_rx_0_bit_format = %d\n",
+ __func__, msm_pri_tdm_rx_0_bit_format);
+ return 0;
+}
+
+static int msm_pri_tdm_rx_1_bit_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (msm_pri_tdm_rx_1_bit_format) {
+ case SNDRV_PCM_FORMAT_S24_LE:
+ ucontrol->value.integer.value[0] = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S16_LE:
+ default:
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ }
+ pr_debug("%s: msm_pri_tdm_rx_1_bit_format = %ld\n",
+ __func__, ucontrol->value.integer.value[0]);
+ return 0;
+}
+
+static int msm_pri_tdm_rx_1_bit_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 1:
+ msm_pri_tdm_rx_1_bit_format = SNDRV_PCM_FORMAT_S24_LE;
+ break;
+ case 0:
+ default:
+ msm_pri_tdm_rx_1_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ }
+ pr_debug("%s: msm_pri_tdm_rx_1_bit_format = %d\n",
+ __func__, msm_pri_tdm_rx_1_bit_format);
+ return 0;
+}
+
+static int msm_pri_tdm_rx_2_bit_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (msm_pri_tdm_rx_2_bit_format) {
+ case SNDRV_PCM_FORMAT_S24_LE:
+ ucontrol->value.integer.value[0] = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S16_LE:
+ default:
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ }
+ pr_debug("%s: msm_pri_tdm_rx_2_bit_format = %ld\n",
+ __func__, ucontrol->value.integer.value[0]);
+ return 0;
+}
+
+static int msm_pri_tdm_rx_2_bit_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 1:
+ msm_pri_tdm_rx_2_bit_format = SNDRV_PCM_FORMAT_S24_LE;
+ break;
+ case 0:
+ default:
+ msm_pri_tdm_rx_2_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ }
+ pr_debug("%s: msm_pri_tdm_rx_2_bit_format = %d\n",
+ __func__, msm_pri_tdm_rx_2_bit_format);
+ return 0;
+}
+
+static int msm_pri_tdm_rx_3_bit_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (msm_pri_tdm_rx_3_bit_format) {
+ case SNDRV_PCM_FORMAT_S24_LE:
+ ucontrol->value.integer.value[0] = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S16_LE:
+ default:
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ }
+ pr_debug("%s: msm_pri_tdm_rx_3_bit_format = %ld\n",
+ __func__, ucontrol->value.integer.value[0]);
+ return 0;
+}
+
+static int msm_pri_tdm_rx_3_bit_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 1:
+ msm_pri_tdm_rx_3_bit_format = SNDRV_PCM_FORMAT_S24_LE;
+ break;
+ case 0:
+ default:
+ msm_pri_tdm_rx_3_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ }
+ pr_debug("%s: msm_pri_tdm_rx_3_bit_format = %d\n",
+ __func__, msm_pri_tdm_rx_3_bit_format);
+ return 0;
+}
+
+static int msm_sec_tdm_rx_0_bit_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (msm_sec_tdm_rx_0_bit_format) {
+ case SNDRV_PCM_FORMAT_S24_LE:
+ ucontrol->value.integer.value[0] = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S16_LE:
+ default:
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ }
+ pr_debug("%s: msm_sec_tdm_rx_0_bit_format = %ld\n",
+ __func__, ucontrol->value.integer.value[0]);
+ return 0;
+}
+
+static int msm_sec_tdm_rx_0_bit_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 1:
+ msm_sec_tdm_rx_0_bit_format = SNDRV_PCM_FORMAT_S24_LE;
+ break;
+ case 0:
+ default:
+ msm_sec_tdm_rx_0_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ }
+ pr_debug("%s: msm_sec_tdm_rx_0_bit_format = %d\n",
+ __func__, msm_sec_tdm_rx_0_bit_format);
+ return 0;
+}
+
+static int msm_sec_tdm_rx_1_bit_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (msm_sec_tdm_rx_1_bit_format) {
+ case SNDRV_PCM_FORMAT_S24_LE:
+ ucontrol->value.integer.value[0] = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S16_LE:
+ default:
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ }
+ pr_debug("%s: msm_sec_tdm_rx_1_bit_format = %ld\n",
+ __func__, ucontrol->value.integer.value[0]);
+ return 0;
+}
+
+static int msm_sec_tdm_rx_1_bit_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 1:
+ msm_sec_tdm_rx_1_bit_format = SNDRV_PCM_FORMAT_S24_LE;
+ break;
+ case 0:
+ default:
+ msm_sec_tdm_rx_1_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ }
+ pr_debug("%s: msm_sec_tdm_rx_1_bit_format = %d\n",
+ __func__, msm_sec_tdm_rx_1_bit_format);
+ return 0;
+}
+
+static int msm_sec_tdm_rx_2_bit_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (msm_sec_tdm_rx_2_bit_format) {
+ case SNDRV_PCM_FORMAT_S24_LE:
+ ucontrol->value.integer.value[0] = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S16_LE:
+ default:
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ }
+ pr_debug("%s: msm_sec_tdm_rx_2_bit_format = %ld\n",
+ __func__, ucontrol->value.integer.value[0]);
+ return 0;
+}
+
+static int msm_sec_tdm_rx_2_bit_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 1:
+ msm_sec_tdm_rx_2_bit_format = SNDRV_PCM_FORMAT_S24_LE;
+ break;
+ case 0:
+ default:
+ msm_sec_tdm_rx_2_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ }
+ pr_debug("%s: msm_sec_tdm_rx_2_bit_format = %d\n",
+ __func__, msm_sec_tdm_rx_2_bit_format);
+ return 0;
+}
+
+static int msm_sec_tdm_rx_3_bit_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (msm_sec_tdm_rx_3_bit_format) {
+ case SNDRV_PCM_FORMAT_S24_LE:
+ ucontrol->value.integer.value[0] = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S16_LE:
+ default:
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ }
+ pr_debug("%s: msm_sec_tdm_rx_3_bit_format = %ld\n",
+ __func__, ucontrol->value.integer.value[0]);
+ return 0;
+}
+
+static int msm_sec_tdm_rx_3_bit_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 1:
+ msm_sec_tdm_rx_3_bit_format = SNDRV_PCM_FORMAT_S24_LE;
+ break;
+ case 0:
+ default:
+ msm_sec_tdm_rx_3_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ }
+ pr_debug("%s: msm_sec_tdm_rx_3_bit_format = %d\n",
+ __func__, msm_sec_tdm_rx_3_bit_format);
+ return 0;
+}
+
+static int msm_sec_tdm_tx_0_bit_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (msm_sec_tdm_tx_0_bit_format) {
+ case SNDRV_PCM_FORMAT_S24_LE:
+ ucontrol->value.integer.value[0] = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S16_LE:
+ default:
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ }
+ pr_debug("%s: msm_sec_tdm_tx_0_bit_format = %ld\n",
+ __func__, ucontrol->value.integer.value[0]);
+ return 0;
+}
+
+static int msm_sec_tdm_tx_0_bit_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 1:
+ msm_sec_tdm_tx_0_bit_format = SNDRV_PCM_FORMAT_S24_LE;
+ break;
+ case 0:
+ default:
+ msm_sec_tdm_tx_0_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ }
+ pr_debug("%s: msm_sec_tdm_tx_0_bit_format = %d\n",
+ __func__, msm_sec_tdm_tx_0_bit_format);
+ return 0;
+}
+
+static int msm_sec_tdm_tx_1_bit_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (msm_sec_tdm_tx_1_bit_format) {
+ case SNDRV_PCM_FORMAT_S24_LE:
+ ucontrol->value.integer.value[0] = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S16_LE:
+ default:
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ }
+ pr_debug("%s: msm_sec_tdm_tx_1_bit_format = %ld\n",
+ __func__, ucontrol->value.integer.value[0]);
+ return 0;
+}
+
+static int msm_sec_tdm_tx_1_bit_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 1:
+ msm_sec_tdm_tx_1_bit_format = SNDRV_PCM_FORMAT_S24_LE;
+ break;
+ case 0:
+ default:
+ msm_sec_tdm_tx_1_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ }
+ pr_debug("%s: msm_sec_tdm_tx_1_bit_format = %d\n",
+ __func__, msm_sec_tdm_tx_1_bit_format);
+ return 0;
+}
+
+static int msm_sec_tdm_tx_2_bit_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (msm_sec_tdm_tx_2_bit_format) {
+ case SNDRV_PCM_FORMAT_S24_LE:
+ ucontrol->value.integer.value[0] = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S16_LE:
+ default:
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ }
+ pr_debug("%s: msm_sec_tdm_tx_2_bit_format = %ld\n",
+ __func__, ucontrol->value.integer.value[0]);
+ return 0;
+}
+static int msm_sec_tdm_tx_2_bit_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 1:
+ msm_sec_tdm_tx_2_bit_format = SNDRV_PCM_FORMAT_S24_LE;
+ break;
+ case 0:
+ default:
+ msm_sec_tdm_tx_2_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ }
+ pr_debug("%s: msm_sec_tdm_tx_2_bit_format = %d\n",
+ __func__, msm_sec_tdm_tx_2_bit_format);
+ return 0;
+}
+
+static int msm_sec_tdm_tx_3_bit_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (msm_sec_tdm_tx_3_bit_format) {
+ case SNDRV_PCM_FORMAT_S24_LE:
+ ucontrol->value.integer.value[0] = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S16_LE:
+ default:
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ }
+ pr_debug("%s: msm_sec_tdm_tx_3_bit_format = %ld\n",
+ __func__, ucontrol->value.integer.value[0]);
+ return 0;
+}
+
+static int msm_sec_tdm_tx_3_bit_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 1:
+ msm_sec_tdm_tx_3_bit_format = SNDRV_PCM_FORMAT_S24_LE;
+ break;
+ case 0:
+ default:
+ msm_sec_tdm_tx_3_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ }
+ pr_debug("%s: msm_sec_tdm_tx_3_bit_format = %d\n",
+ __func__, msm_sec_tdm_tx_3_bit_format);
+ return 0;
+}
+
+static int msm_tert_tdm_rx_0_bit_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (msm_tert_tdm_rx_0_bit_format) {
+ case SNDRV_PCM_FORMAT_S24_LE:
+ ucontrol->value.integer.value[0] = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S16_LE:
+ default:
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ }
+ pr_debug("%s: msm_tert_tdm_rx_0_bit_format = %ld\n",
+ __func__, ucontrol->value.integer.value[0]);
+ return 0;
+}
+
+static int msm_tert_tdm_rx_0_bit_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 1:
+ msm_tert_tdm_rx_0_bit_format = SNDRV_PCM_FORMAT_S24_LE;
+ break;
+ case 0:
+ default:
+ msm_tert_tdm_rx_0_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ }
+ pr_debug("%s: msm_tert_tdm_rx_0_bit_format = %d\n",
+ __func__, msm_tert_tdm_rx_0_bit_format);
+ return 0;
+}
+
+static int msm_tert_tdm_rx_1_bit_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (msm_tert_tdm_rx_1_bit_format) {
+ case SNDRV_PCM_FORMAT_S24_LE:
+ ucontrol->value.integer.value[0] = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S16_LE:
+ default:
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ }
+ pr_debug("%s: msm_tert_tdm_rx_1_bit_format = %ld\n",
+ __func__, ucontrol->value.integer.value[0]);
+ return 0;
+}
+
+static int msm_tert_tdm_rx_1_bit_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 1:
+ msm_tert_tdm_rx_1_bit_format = SNDRV_PCM_FORMAT_S24_LE;
+ break;
+ case 0:
+ default:
+ msm_tert_tdm_rx_1_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ }
+ pr_debug("%s: msm_tert_tdm_rx_1_bit_format = %d\n",
+ __func__, msm_tert_tdm_rx_1_bit_format);
+ return 0;
+}
+
+static int msm_tert_tdm_rx_2_bit_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (msm_tert_tdm_rx_2_bit_format) {
+ case SNDRV_PCM_FORMAT_S24_LE:
+ ucontrol->value.integer.value[0] = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S16_LE:
+ default:
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ }
+ pr_debug("%s: msm_tert_tdm_rx_2_bit_format = %ld\n",
+ __func__, ucontrol->value.integer.value[0]);
+ return 0;
+}
+
+static int msm_tert_tdm_rx_2_bit_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 1:
+ msm_tert_tdm_rx_2_bit_format = SNDRV_PCM_FORMAT_S24_LE;
+ break;
+ case 0:
+ default:
+ msm_tert_tdm_rx_2_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ }
+ pr_debug("%s: msm_tert_tdm_rx_2_bit_format = %d\n",
+ __func__, msm_tert_tdm_rx_2_bit_format);
+ return 0;
+}
+
+static int msm_tert_tdm_rx_3_bit_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (msm_tert_tdm_rx_3_bit_format) {
+ case SNDRV_PCM_FORMAT_S24_LE:
+ ucontrol->value.integer.value[0] = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S16_LE:
+ default:
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ }
+ pr_debug("%s: msm_tert_tdm_rx_3_bit_format = %ld\n",
+ __func__, ucontrol->value.integer.value[0]);
+ return 0;
+}
+
+static int msm_tert_tdm_rx_3_bit_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 1:
+ msm_tert_tdm_rx_3_bit_format = SNDRV_PCM_FORMAT_S24_LE;
+ break;
+ case 0:
+ default:
+ msm_tert_tdm_rx_3_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ }
+ pr_debug("%s: msm_tert_tdm_rx_3_bit_format = %d\n",
+ __func__, msm_tert_tdm_rx_3_bit_format);
+ return 0;
+}
+
+static int msm_tert_tdm_tx_0_bit_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (msm_tert_tdm_tx_0_bit_format) {
+ case SNDRV_PCM_FORMAT_S24_LE:
+ ucontrol->value.integer.value[0] = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S16_LE:
+ default:
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ }
+ pr_debug("%s: msm_tert_tdm_tx_0_bit_format = %ld\n",
+ __func__, ucontrol->value.integer.value[0]);
+ return 0;
+}
+
+static int msm_tert_tdm_tx_0_bit_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 1:
+ msm_tert_tdm_tx_0_bit_format = SNDRV_PCM_FORMAT_S24_LE;
+ break;
+ case 0:
+ default:
+ msm_tert_tdm_tx_0_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ }
+ pr_debug("%s: msm_tert_tdm_tx_0_bit_format = %d\n",
+ __func__, msm_tert_tdm_tx_0_bit_format);
+ return 0;
+}
+
+static int msm_tert_tdm_tx_1_bit_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (msm_tert_tdm_tx_1_bit_format) {
+ case SNDRV_PCM_FORMAT_S24_LE:
+ ucontrol->value.integer.value[0] = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S16_LE:
+ default:
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ }
+ pr_debug("%s: msm_tert_tdm_tx_1_bit_format = %ld\n",
+ __func__, ucontrol->value.integer.value[0]);
+ return 0;
+}
+
+static int msm_tert_tdm_tx_1_bit_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 1:
+ msm_tert_tdm_tx_1_bit_format = SNDRV_PCM_FORMAT_S24_LE;
+ break;
+ case 0:
+ default:
+ msm_tert_tdm_tx_1_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ }
+ pr_debug("%s: msm_tert_tdm_tx_1_bit_format = %d\n",
+ __func__, msm_tert_tdm_tx_1_bit_format);
+ return 0;
+}
+
+static int msm_tert_tdm_tx_2_bit_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (msm_tert_tdm_tx_2_bit_format) {
+ case SNDRV_PCM_FORMAT_S24_LE:
+ ucontrol->value.integer.value[0] = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S16_LE:
+ default:
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ }
+ pr_debug("%s: msm_tert_tdm_tx_2_bit_format = %ld\n",
+ __func__, ucontrol->value.integer.value[0]);
+ return 0;
+}
+
+static int msm_tert_tdm_tx_2_bit_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 1:
+ msm_tert_tdm_tx_2_bit_format = SNDRV_PCM_FORMAT_S24_LE;
+ break;
+ case 0:
+ default:
+ msm_tert_tdm_tx_2_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ }
+ pr_debug("%s: msm_tert_tdm_tx_2_bit_format = %d\n",
+ __func__, msm_tert_tdm_tx_2_bit_format);
+ return 0;
+}
+
+static int msm_tert_tdm_tx_3_bit_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (msm_tert_tdm_tx_3_bit_format) {
+ case SNDRV_PCM_FORMAT_S24_LE:
+ ucontrol->value.integer.value[0] = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S16_LE:
+ default:
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ }
+ pr_debug("%s: msm_tert_tdm_tx_3_bit_format = %ld\n",
+ __func__, ucontrol->value.integer.value[0]);
+ return 0;
+}
+
+static int msm_tert_tdm_tx_3_bit_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 1:
+ msm_tert_tdm_tx_3_bit_format = SNDRV_PCM_FORMAT_S24_LE;
+ break;
+ case 0:
+ default:
+ msm_tert_tdm_tx_3_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ }
+ pr_debug("%s: msm_tert_tdm_tx_3_bit_format = %d\n",
+ __func__, msm_tert_tdm_tx_3_bit_format);
+ return 0;
+}
+
+static int msm_quat_tdm_rx_0_bit_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (msm_quat_tdm_rx_0_bit_format) {
+ case SNDRV_PCM_FORMAT_S24_LE:
+ ucontrol->value.integer.value[0] = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S16_LE:
+ default:
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ }
+ pr_debug("%s: msm_quat_tdm_rx_0_bit_format = %ld\n",
+ __func__, ucontrol->value.integer.value[0]);
+ return 0;
+}
+
+static int msm_quat_tdm_rx_0_bit_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 1:
+ msm_quat_tdm_rx_0_bit_format = SNDRV_PCM_FORMAT_S24_LE;
+ break;
+ case 0:
+ default:
+ msm_quat_tdm_rx_0_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ }
+ pr_debug("%s: msm_quat_tdm_rx_0_bit_format = %d\n",
+ __func__, msm_quat_tdm_rx_0_bit_format);
+ return 0;
+}
+
+static int msm_quat_tdm_rx_1_bit_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (msm_quat_tdm_rx_1_bit_format) {
+ case SNDRV_PCM_FORMAT_S24_LE:
+ ucontrol->value.integer.value[0] = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S16_LE:
+ default:
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ }
+ pr_debug("%s: msm_quat_tdm_rx_1_bit_format = %ld\n",
+ __func__, ucontrol->value.integer.value[0]);
+ return 0;
+}
+static int msm_quat_tdm_rx_1_bit_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 1:
+ msm_quat_tdm_rx_1_bit_format = SNDRV_PCM_FORMAT_S24_LE;
+ break;
+ case 0:
+ default:
+ msm_quat_tdm_rx_1_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ }
+ pr_debug("%s: msm_quat_tdm_rx_1_bit_format = %d\n",
+ __func__, msm_quat_tdm_rx_1_bit_format);
+ return 0;
+}
+
+static int msm_quat_tdm_rx_2_bit_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (msm_quat_tdm_rx_2_bit_format) {
+ case SNDRV_PCM_FORMAT_S24_LE:
+ ucontrol->value.integer.value[0] = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S16_LE:
+ default:
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ }
+ pr_debug("%s: msm_quat_tdm_rx_2_bit_format = %ld\n",
+ __func__, ucontrol->value.integer.value[0]);
+ return 0;
+}
+
+static int msm_quat_tdm_rx_2_bit_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 1:
+ msm_quat_tdm_rx_2_bit_format = SNDRV_PCM_FORMAT_S24_LE;
+ break;
+ case 0:
+ default:
+ msm_quat_tdm_rx_2_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ }
+ pr_debug("%s: msm_quat_tdm_rx_2_bit_format = %d\n",
+ __func__, msm_quat_tdm_rx_2_bit_format);
+ return 0;
+}
+
+static int msm_quat_tdm_rx_3_bit_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (msm_quat_tdm_rx_3_bit_format) {
+ case SNDRV_PCM_FORMAT_S24_LE:
+ ucontrol->value.integer.value[0] = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S16_LE:
+ default:
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ }
+ pr_debug("%s: msm_quat_tdm_rx_3_bit_format = %ld\n",
+ __func__, ucontrol->value.integer.value[0]);
+ return 0;
+}
+
+static int msm_quat_tdm_rx_3_bit_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 1:
+ msm_quat_tdm_rx_3_bit_format = SNDRV_PCM_FORMAT_S24_LE;
+ break;
+ case 0:
+ default:
+ msm_quat_tdm_rx_3_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ }
+ pr_debug("%s: msm_quat_tdm_rx_3_bit_format = %d\n",
+ __func__, msm_quat_tdm_rx_3_bit_format);
+ return 0;
+}
+
+static int msm_quat_tdm_tx_0_bit_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (msm_quat_tdm_tx_0_bit_format) {
+ case SNDRV_PCM_FORMAT_S24_LE:
+ ucontrol->value.integer.value[0] = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S16_LE:
+ default:
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ }
+ pr_debug("%s: msm_quat_tdm_tx_0_bit_format = %ld\n",
+ __func__, ucontrol->value.integer.value[0]);
+ return 0;
+}
+static int msm_quat_tdm_tx_0_bit_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 1:
+ msm_quat_tdm_tx_0_bit_format = SNDRV_PCM_FORMAT_S24_LE;
+ break;
+ case 0:
+ default:
+ msm_quat_tdm_tx_0_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ }
+ pr_debug("%s: msm_quat_tdm_tx_0_bit_format = %d\n",
+ __func__, msm_quat_tdm_tx_0_bit_format);
+ return 0;
+}
+
+static int msm_quat_tdm_tx_1_bit_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (msm_quat_tdm_tx_1_bit_format) {
+ case SNDRV_PCM_FORMAT_S24_LE:
+ ucontrol->value.integer.value[0] = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S16_LE:
+ default:
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ }
+ pr_debug("%s: msm_quat_tdm_tx_1_bit_format = %ld\n",
+ __func__, ucontrol->value.integer.value[0]);
+ return 0;
+}
+
+static int msm_quat_tdm_tx_1_bit_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 1:
+ msm_quat_tdm_tx_1_bit_format = SNDRV_PCM_FORMAT_S24_LE;
+ break;
+ case 0:
+ default:
+ msm_quat_tdm_tx_1_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ }
+ pr_debug("%s: msm_quat_tdm_tx_1_bit_format = %d\n",
+ __func__, msm_quat_tdm_tx_1_bit_format);
+ return 0;
+}
+static int msm_quat_tdm_tx_2_bit_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (msm_quat_tdm_tx_2_bit_format) {
+ case SNDRV_PCM_FORMAT_S24_LE:
+ ucontrol->value.integer.value[0] = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S16_LE:
+ default:
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ }
+ pr_debug("%s: msm_quat_tdm_tx_2_bit_format = %ld\n",
+ __func__, ucontrol->value.integer.value[0]);
+ return 0;
+}
+
+static int msm_quat_tdm_tx_2_bit_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 1:
+ msm_quat_tdm_tx_2_bit_format = SNDRV_PCM_FORMAT_S24_LE;
+ break;
+ case 0:
+ default:
+ msm_quat_tdm_tx_2_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ }
+ pr_debug("%s: msm_quat_tdm_tx_2_bit_format = %d\n",
+ __func__, msm_quat_tdm_tx_2_bit_format);
+ return 0;
+}
+
+static int msm_quat_tdm_tx_3_bit_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (msm_quat_tdm_tx_3_bit_format) {
+ case SNDRV_PCM_FORMAT_S24_LE:
+ ucontrol->value.integer.value[0] = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S16_LE:
+ default:
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ }
+ pr_debug("%s: msm_quat_tdm_tx_3_bit_format = %ld\n",
+ __func__, ucontrol->value.integer.value[0]);
+ return 0;
+}
+static int msm_quat_tdm_tx_3_bit_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 1:
+ msm_quat_tdm_tx_3_bit_format = SNDRV_PCM_FORMAT_S24_LE;
+ break;
+ case 0:
+ default:
+ msm_quat_tdm_tx_3_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ }
+ pr_debug("%s: msm_quat_tdm_tx_3_bit_format = %d\n",
+ __func__, msm_quat_tdm_tx_3_bit_format);
+ return 0;
+}
+
+
static int msm_auxpcm_be_params_fixup(struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_hw_params *params)
{
@@ -1558,14 +4344,713 @@ static int msm_slim_5_tx_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
static int msm_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_hw_params *params)
{
+ struct snd_soc_dai_link *dai_link = rtd->dai_link;
struct snd_interval *rate = hw_param_interval(params,
SNDRV_PCM_HW_PARAM_RATE);
+ struct snd_interval *channels = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_CHANNELS);
+
+ pr_debug("%s: format = %d, rate = %d\n",
+ __func__, params_format(params), params_rate(params));
+
+ switch (dai_link->be_id) {
+ case MSM_BACKEND_DAI_USB_RX:
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ usb_rx_cfg.bit_format);
+ rate->min = rate->max = usb_rx_cfg.sample_rate;
+ channels->min = channels->max = usb_rx_cfg.channels;
+ break;
+
+ case MSM_BACKEND_DAI_USB_TX:
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ usb_tx_cfg.bit_format);
+ rate->min = rate->max = usb_tx_cfg.sample_rate;
+ channels->min = channels->max = usb_tx_cfg.channels;
+ break;
+
+ default:
+ rate->min = rate->max = SAMPLING_RATE_48KHZ;
+ break;
+ }
+ return 0;
+}
+static int msm_tdm_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_interval *rate = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_RATE);
+ struct snd_interval *channels = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_CHANNELS);
- pr_debug("%s:\n", __func__);
rate->min = rate->max = SAMPLING_RATE_48KHZ;
+
+ switch (cpu_dai->id) {
+ case AFE_PORT_ID_PRIMARY_TDM_TX:
+ channels->min = channels->max = msm_pri_tdm_tx_0_ch;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ msm_pri_tdm_tx_0_bit_format);
+ rate->min = rate->max = msm_pri_tdm_rate;
+ break;
+ case AFE_PORT_ID_PRIMARY_TDM_TX_1:
+ channels->min = channels->max = msm_pri_tdm_tx_1_ch;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ msm_pri_tdm_tx_1_bit_format);
+ rate->min = rate->max = msm_pri_tdm_rate;
+ break;
+ case AFE_PORT_ID_PRIMARY_TDM_TX_2:
+ channels->min = channels->max = msm_pri_tdm_tx_2_ch;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ msm_pri_tdm_tx_2_bit_format);
+ rate->min = rate->max = msm_pri_tdm_rate;
+ break;
+ case AFE_PORT_ID_PRIMARY_TDM_TX_3:
+ channels->min = channels->max = msm_pri_tdm_tx_3_ch;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ msm_pri_tdm_tx_3_bit_format);
+ rate->min = rate->max = msm_pri_tdm_rate;
+ break;
+ case AFE_PORT_ID_PRIMARY_TDM_RX:
+ channels->min = channels->max = msm_pri_tdm_rx_0_ch;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ msm_pri_tdm_rx_0_bit_format);
+ rate->min = rate->max = msm_pri_tdm_rate;
+ break;
+ case AFE_PORT_ID_PRIMARY_TDM_RX_1:
+ channels->min = channels->max = msm_pri_tdm_rx_1_ch;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ msm_pri_tdm_rx_1_bit_format);
+ rate->min = rate->max = msm_pri_tdm_rate;
+ break;
+ case AFE_PORT_ID_PRIMARY_TDM_RX_2:
+ channels->min = channels->max = msm_pri_tdm_rx_2_ch;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ msm_pri_tdm_rx_2_bit_format);
+ rate->min = rate->max = msm_pri_tdm_rate;
+ break;
+ case AFE_PORT_ID_PRIMARY_TDM_RX_3:
+ channels->min = channels->max = msm_pri_tdm_rx_3_ch;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ msm_pri_tdm_rx_3_bit_format);
+ rate->min = rate->max = msm_pri_tdm_rate;
+ break;
+ case AFE_PORT_ID_SECONDARY_TDM_RX:
+ channels->min = channels->max = msm_sec_tdm_rx_0_ch;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ msm_sec_tdm_rx_0_bit_format);
+ rate->min = rate->max = msm_sec_tdm_rate;
+ break;
+ case AFE_PORT_ID_SECONDARY_TDM_RX_1:
+ channels->min = channels->max = msm_sec_tdm_rx_1_ch;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ msm_sec_tdm_rx_1_bit_format);
+ rate->min = rate->max = msm_sec_tdm_rate;
+ break;
+ case AFE_PORT_ID_SECONDARY_TDM_RX_2:
+ channels->min = channels->max = msm_sec_tdm_rx_2_ch;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ msm_sec_tdm_rx_2_bit_format);
+ rate->min = rate->max = msm_sec_tdm_rate;
+ break;
+ case AFE_PORT_ID_SECONDARY_TDM_RX_3:
+ channels->min = channels->max = msm_sec_tdm_rx_3_ch;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ msm_sec_tdm_rx_3_bit_format);
+ rate->min = rate->max = msm_sec_tdm_rate;
+ break;
+ case AFE_PORT_ID_SECONDARY_TDM_TX:
+ channels->min = channels->max = msm_sec_tdm_tx_0_ch;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ msm_sec_tdm_tx_0_bit_format);
+ rate->min = rate->max = msm_sec_tdm_rate;
+ break;
+ case AFE_PORT_ID_SECONDARY_TDM_TX_1:
+ channels->min = channels->max = msm_sec_tdm_tx_1_ch;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ msm_sec_tdm_tx_1_bit_format);
+ rate->min = rate->max = msm_sec_tdm_rate;
+ break;
+ case AFE_PORT_ID_SECONDARY_TDM_TX_2:
+ channels->min = channels->max = msm_sec_tdm_tx_2_ch;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ msm_sec_tdm_tx_2_bit_format);
+ rate->min = rate->max = msm_sec_tdm_rate;
+ break;
+ case AFE_PORT_ID_SECONDARY_TDM_TX_3:
+ channels->min = channels->max = msm_sec_tdm_tx_3_ch;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ msm_sec_tdm_tx_3_bit_format);
+ rate->min = rate->max = msm_sec_tdm_rate;
+ break;
+ case AFE_PORT_ID_TERTIARY_TDM_RX:
+ channels->min = channels->max = msm_tert_tdm_rx_0_ch;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ msm_tert_tdm_rx_0_bit_format);
+ rate->min = rate->max = msm_tert_tdm_rate;
+ break;
+ case AFE_PORT_ID_TERTIARY_TDM_RX_1:
+ channels->min = channels->max = msm_tert_tdm_rx_1_ch;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ msm_tert_tdm_rx_1_bit_format);
+ rate->min = rate->max = msm_tert_tdm_rate;
+ break;
+ case AFE_PORT_ID_TERTIARY_TDM_RX_2:
+ channels->min = channels->max = msm_tert_tdm_rx_2_ch;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ msm_tert_tdm_rx_2_bit_format);
+ rate->min = rate->max = msm_tert_tdm_rate;
+ break;
+ case AFE_PORT_ID_TERTIARY_TDM_RX_3:
+ channels->min = channels->max = msm_tert_tdm_rx_3_ch;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ msm_tert_tdm_rx_3_bit_format);
+ rate->min = rate->max = msm_tert_tdm_rate;
+ break;
+ case AFE_PORT_ID_TERTIARY_TDM_TX:
+ channels->min = channels->max = msm_tert_tdm_tx_0_ch;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ msm_tert_tdm_tx_0_bit_format);
+ rate->min = rate->max = msm_tert_tdm_rate;
+ break;
+ case AFE_PORT_ID_TERTIARY_TDM_TX_1:
+ channels->min = channels->max = msm_tert_tdm_tx_1_ch;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ msm_tert_tdm_tx_1_bit_format);
+ rate->min = rate->max = msm_tert_tdm_rate;
+ break;
+ case AFE_PORT_ID_TERTIARY_TDM_TX_2:
+ channels->min = channels->max = msm_tert_tdm_tx_2_ch;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ msm_tert_tdm_tx_2_bit_format);
+ rate->min = rate->max = msm_tert_tdm_rate;
+ break;
+ case AFE_PORT_ID_TERTIARY_TDM_TX_3:
+ channels->min = channels->max = msm_tert_tdm_tx_3_ch;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ msm_tert_tdm_tx_3_bit_format);
+ rate->min = rate->max = msm_tert_tdm_rate;
+ break;
+ case AFE_PORT_ID_QUATERNARY_TDM_RX:
+ channels->min = channels->max = msm_quat_tdm_rx_0_ch;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ msm_quat_tdm_rx_0_bit_format);
+ rate->min = rate->max = msm_quat_tdm_rate;
+ break;
+ case AFE_PORT_ID_QUATERNARY_TDM_RX_1:
+ channels->min = channels->max = msm_quat_tdm_rx_1_ch;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ msm_quat_tdm_rx_1_bit_format);
+ rate->min = rate->max = msm_quat_tdm_rate;
+ break;
+ case AFE_PORT_ID_QUATERNARY_TDM_RX_2:
+ channels->min = channels->max = msm_quat_tdm_rx_2_ch;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ msm_quat_tdm_rx_2_bit_format);
+ rate->min = rate->max = msm_quat_tdm_rate;
+ break;
+ case AFE_PORT_ID_QUATERNARY_TDM_RX_3:
+ channels->min = channels->max = msm_quat_tdm_rx_3_ch;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ msm_quat_tdm_rx_3_bit_format);
+ rate->min = rate->max = msm_quat_tdm_rate;
+ break;
+ case AFE_PORT_ID_QUATERNARY_TDM_TX:
+ channels->min = channels->max = msm_quat_tdm_tx_0_ch;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ msm_quat_tdm_tx_0_bit_format);
+ rate->min = rate->max = msm_quat_tdm_rate;
+ break;
+ case AFE_PORT_ID_QUATERNARY_TDM_TX_1:
+ channels->min = channels->max = msm_quat_tdm_tx_1_ch;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ msm_quat_tdm_tx_1_bit_format);
+ rate->min = rate->max = msm_quat_tdm_rate;
+ break;
+ case AFE_PORT_ID_QUATERNARY_TDM_TX_2:
+ channels->min = channels->max = msm_quat_tdm_tx_2_ch;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ msm_quat_tdm_tx_2_bit_format);
+ rate->min = rate->max = msm_quat_tdm_rate;
+ break;
+ case AFE_PORT_ID_QUATERNARY_TDM_TX_3:
+ channels->min = channels->max = msm_quat_tdm_tx_3_ch;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ msm_quat_tdm_tx_3_bit_format);
+ rate->min = rate->max = msm_quat_tdm_rate;
+ break;
+ default:
+ pr_err("%s: dai id 0x%x not supported\n",
+ __func__, cpu_dai->id);
+ return -EINVAL;
+ }
+
+ pr_debug("%s: dai id = 0x%x channels = %d rate = %d format = 0x%x\n",
+ __func__, cpu_dai->id, channels->max, rate->max,
+ params_format(params));
+
return 0;
}
+static unsigned int tdm_param_set_slot_mask(int slots)
+{
+ unsigned int slot_mask = 0;
+ unsigned int i = 0;
+
+ if ((slots != 16) && (slots != 8)) {
+ pr_err("%s: invalid slot number %d\n", __func__, slots);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < slots ; i++)
+ slot_mask |= 1 << i;
+ return slot_mask;
+}
+
+static int msm8996_tdm_snd_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ int ret = 0;
+ int channels, slot_width, slots, rate;
+ unsigned int slot_mask;
+ unsigned int *slot_offset;
+ int offset_channels = 0;
+ int i;
+ int clk_freq;
+
+ pr_debug("%s: dai id = 0x%x\n", __func__, cpu_dai->id);
+
+ rate = params_rate(params);
+ channels = params_channels(params);
+ if (channels < 1 || channels > 8) {
+ pr_err("%s: invalid param channels %d\n",
+ __func__, channels);
+ return -EINVAL;
+ }
+
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S32_LE:
+ case SNDRV_PCM_FORMAT_S24_LE:
+ case SNDRV_PCM_FORMAT_S16_LE:
+ /*
+ * up to 8 channel HW configuration should
+ * use 32 bit slot width for max support of
+ * stream bit width. (slot_width > bit_width)
+ */
+ slot_width = msm_tdm_slot_width;
+ break;
+ default:
+ pr_err("%s: invalid param format 0x%x\n",
+ __func__, params_format(params));
+ return -EINVAL;
+ }
+
+ slots = msm_tdm_num_slots;
+
+ switch (cpu_dai->id) {
+ case AFE_PORT_ID_PRIMARY_TDM_RX:
+ slots = msm_pri_tdm_slot_num;
+ slot_width = msm_pri_tdm_slot_width;
+ slot_offset = tdm_slot_offset[PRIMARY_TDM_RX_0];
+ break;
+ case AFE_PORT_ID_PRIMARY_TDM_RX_1:
+ slots = msm_pri_tdm_slot_num;
+ slot_width = msm_pri_tdm_slot_width;
+ slot_offset = tdm_slot_offset[PRIMARY_TDM_RX_1];
+ break;
+ case AFE_PORT_ID_PRIMARY_TDM_RX_2:
+ slots = msm_pri_tdm_slot_num;
+ slot_width = msm_pri_tdm_slot_width;
+ slot_offset = tdm_slot_offset[PRIMARY_TDM_RX_2];
+ break;
+ case AFE_PORT_ID_PRIMARY_TDM_RX_3:
+ slots = msm_pri_tdm_slot_num;
+ slot_width = msm_pri_tdm_slot_width;
+ slot_offset = tdm_slot_offset[PRIMARY_TDM_RX_3];
+ break;
+ case AFE_PORT_ID_PRIMARY_TDM_RX_4:
+ slots = msm_pri_tdm_slot_num;
+ slot_width = msm_pri_tdm_slot_width;
+ slot_offset = tdm_slot_offset[PRIMARY_TDM_RX_4];
+ break;
+ case AFE_PORT_ID_PRIMARY_TDM_RX_5:
+ slots = msm_pri_tdm_slot_num;
+ slot_width = msm_pri_tdm_slot_width;
+ slot_offset = tdm_slot_offset[PRIMARY_TDM_RX_5];
+ break;
+ case AFE_PORT_ID_PRIMARY_TDM_RX_6:
+ slots = msm_pri_tdm_slot_num;
+ slot_width = msm_pri_tdm_slot_width;
+ slot_offset = tdm_slot_offset[PRIMARY_TDM_RX_6];
+ break;
+ case AFE_PORT_ID_PRIMARY_TDM_RX_7:
+ slots = msm_pri_tdm_slot_num;
+ slot_width = msm_pri_tdm_slot_width;
+ slot_offset = tdm_slot_offset[PRIMARY_TDM_RX_7];
+ break;
+ case AFE_PORT_ID_PRIMARY_TDM_TX:
+ slots = msm_pri_tdm_slot_num;
+ slot_width = msm_pri_tdm_slot_width;
+ slot_offset = tdm_slot_offset[PRIMARY_TDM_TX_0];
+ break;
+ case AFE_PORT_ID_PRIMARY_TDM_TX_1:
+ slots = msm_pri_tdm_slot_num;
+ slot_width = msm_pri_tdm_slot_width;
+ slot_offset = tdm_slot_offset[PRIMARY_TDM_TX_1];
+ break;
+ case AFE_PORT_ID_PRIMARY_TDM_TX_2:
+ slots = msm_pri_tdm_slot_num;
+ slot_width = msm_pri_tdm_slot_width;
+ slot_offset = tdm_slot_offset[PRIMARY_TDM_TX_2];
+ break;
+ case AFE_PORT_ID_PRIMARY_TDM_TX_3:
+ slots = msm_pri_tdm_slot_num;
+ slot_width = msm_pri_tdm_slot_width;
+ slot_offset = tdm_slot_offset[PRIMARY_TDM_TX_3];
+ break;
+ case AFE_PORT_ID_PRIMARY_TDM_TX_4:
+ slots = msm_pri_tdm_slot_num;
+ slot_width = msm_pri_tdm_slot_width;
+ slot_offset = tdm_slot_offset[PRIMARY_TDM_TX_4];
+ break;
+ case AFE_PORT_ID_PRIMARY_TDM_TX_5:
+ slots = msm_pri_tdm_slot_num;
+ slot_width = msm_pri_tdm_slot_width;
+ slot_offset = tdm_slot_offset[PRIMARY_TDM_TX_5];
+ break;
+ case AFE_PORT_ID_PRIMARY_TDM_TX_6:
+ slots = msm_pri_tdm_slot_num;
+ slot_width = msm_pri_tdm_slot_width;
+ slot_offset = tdm_slot_offset[PRIMARY_TDM_TX_6];
+ break;
+ case AFE_PORT_ID_PRIMARY_TDM_TX_7:
+ slots = msm_pri_tdm_slot_num;
+ slot_width = msm_pri_tdm_slot_width;
+ slot_offset = tdm_slot_offset[PRIMARY_TDM_TX_7];
+ break;
+ case AFE_PORT_ID_SECONDARY_TDM_RX:
+ slots = msm_sec_tdm_slot_num;
+ slot_width = msm_sec_tdm_slot_width;
+ slot_offset = tdm_slot_offset[SECONDARY_TDM_RX_0];
+ break;
+ case AFE_PORT_ID_SECONDARY_TDM_RX_1:
+ slots = msm_sec_tdm_slot_num;
+ slot_width = msm_sec_tdm_slot_width;
+ slot_offset = tdm_slot_offset[SECONDARY_TDM_RX_1];
+ break;
+ case AFE_PORT_ID_SECONDARY_TDM_RX_2:
+ slots = msm_sec_tdm_slot_num;
+ slot_width = msm_sec_tdm_slot_width;
+ slot_offset = tdm_slot_offset[SECONDARY_TDM_RX_2];
+ break;
+ case AFE_PORT_ID_SECONDARY_TDM_RX_3:
+ slots = msm_sec_tdm_slot_num;
+ slot_width = msm_sec_tdm_slot_width;
+ slot_offset = tdm_slot_offset[SECONDARY_TDM_RX_3];
+ break;
+ case AFE_PORT_ID_SECONDARY_TDM_RX_4:
+ slots = msm_sec_tdm_slot_num;
+ slot_width = msm_sec_tdm_slot_width;
+ slot_offset = tdm_slot_offset[SECONDARY_TDM_RX_4];
+ break;
+ case AFE_PORT_ID_SECONDARY_TDM_RX_5:
+ slots = msm_sec_tdm_slot_num;
+ slot_width = msm_sec_tdm_slot_width;
+ slot_offset = tdm_slot_offset[SECONDARY_TDM_RX_5];
+ break;
+ case AFE_PORT_ID_SECONDARY_TDM_RX_6:
+ slots = msm_sec_tdm_slot_num;
+ slot_width = msm_sec_tdm_slot_width;
+ slot_offset = tdm_slot_offset[SECONDARY_TDM_RX_6];
+ break;
+ case AFE_PORT_ID_SECONDARY_TDM_RX_7:
+ slots = msm_sec_tdm_slot_num;
+ slot_width = msm_sec_tdm_slot_width;
+ slot_offset = tdm_slot_offset[SECONDARY_TDM_RX_7];
+ break;
+ case AFE_PORT_ID_SECONDARY_TDM_TX:
+ slots = msm_sec_tdm_slot_num;
+ slot_width = msm_sec_tdm_slot_width;
+ slot_offset = tdm_slot_offset[SECONDARY_TDM_TX_0];
+ break;
+ case AFE_PORT_ID_SECONDARY_TDM_TX_1:
+ slots = msm_sec_tdm_slot_num;
+ slot_width = msm_sec_tdm_slot_width;
+ slot_offset = tdm_slot_offset[SECONDARY_TDM_TX_1];
+ break;
+ case AFE_PORT_ID_SECONDARY_TDM_TX_2:
+ slots = msm_sec_tdm_slot_num;
+ slot_width = msm_sec_tdm_slot_width;
+ slot_offset = tdm_slot_offset[SECONDARY_TDM_TX_2];
+ break;
+ case AFE_PORT_ID_SECONDARY_TDM_TX_3:
+ slots = msm_sec_tdm_slot_num;
+ slot_width = msm_sec_tdm_slot_width;
+ slot_offset = tdm_slot_offset[SECONDARY_TDM_TX_3];
+ break;
+ case AFE_PORT_ID_SECONDARY_TDM_TX_4:
+ slots = msm_sec_tdm_slot_num;
+ slot_width = msm_sec_tdm_slot_width;
+ slot_offset = tdm_slot_offset[SECONDARY_TDM_TX_4];
+ break;
+ case AFE_PORT_ID_SECONDARY_TDM_TX_5:
+ slots = msm_sec_tdm_slot_num;
+ slot_width = msm_sec_tdm_slot_width;
+ slot_offset = tdm_slot_offset[SECONDARY_TDM_TX_5];
+ break;
+ case AFE_PORT_ID_SECONDARY_TDM_TX_6:
+ slots = msm_sec_tdm_slot_num;
+ slot_width = msm_sec_tdm_slot_width;
+ slot_offset = tdm_slot_offset[SECONDARY_TDM_TX_6];
+ break;
+ case AFE_PORT_ID_SECONDARY_TDM_TX_7:
+ slots = msm_sec_tdm_slot_num;
+ slot_width = msm_sec_tdm_slot_width;
+ slot_offset = tdm_slot_offset[SECONDARY_TDM_TX_7];
+ break;
+ case AFE_PORT_ID_TERTIARY_TDM_RX:
+ slots = msm_tert_tdm_slot_num;
+ slot_width = msm_tert_tdm_slot_width;
+ slot_offset = tdm_slot_offset[TERTIARY_TDM_RX_0];
+ break;
+ case AFE_PORT_ID_TERTIARY_TDM_RX_1:
+ slots = msm_tert_tdm_slot_num;
+ slot_width = msm_tert_tdm_slot_width;
+ slot_offset = tdm_slot_offset[TERTIARY_TDM_RX_1];
+ break;
+ case AFE_PORT_ID_TERTIARY_TDM_RX_2:
+ slots = msm_tert_tdm_slot_num;
+ slot_width = msm_tert_tdm_slot_width;
+ slot_offset = tdm_slot_offset[TERTIARY_TDM_RX_2];
+ break;
+ case AFE_PORT_ID_TERTIARY_TDM_RX_3:
+ slots = msm_tert_tdm_slot_num;
+ slot_width = msm_tert_tdm_slot_width;
+ slot_offset = tdm_slot_offset[TERTIARY_TDM_RX_3];
+ break;
+ case AFE_PORT_ID_TERTIARY_TDM_RX_4:
+ slots = msm_tert_tdm_slot_num;
+ slot_width = msm_tert_tdm_slot_width;
+ slot_offset = tdm_slot_offset[TERTIARY_TDM_RX_4];
+ break;
+ case AFE_PORT_ID_TERTIARY_TDM_RX_5:
+ slots = msm_tert_tdm_slot_num;
+ slot_width = msm_tert_tdm_slot_width;
+ slot_offset = tdm_slot_offset[TERTIARY_TDM_RX_5];
+ break;
+ case AFE_PORT_ID_TERTIARY_TDM_RX_6:
+ slots = msm_tert_tdm_slot_num;
+ slot_width = msm_tert_tdm_slot_width;
+ slot_offset = tdm_slot_offset[TERTIARY_TDM_RX_6];
+ break;
+ case AFE_PORT_ID_TERTIARY_TDM_RX_7:
+ slots = msm_tert_tdm_slot_num;
+ slot_width = msm_tert_tdm_slot_width;
+ slot_offset = tdm_slot_offset[TERTIARY_TDM_RX_7];
+ break;
+ case AFE_PORT_ID_TERTIARY_TDM_TX:
+ slots = msm_tert_tdm_slot_num;
+ slot_width = msm_tert_tdm_slot_width;
+ slot_offset = tdm_slot_offset[TERTIARY_TDM_TX_0];
+ break;
+ case AFE_PORT_ID_TERTIARY_TDM_TX_1:
+ slots = msm_tert_tdm_slot_num;
+ slot_width = msm_tert_tdm_slot_width;
+ slot_offset = tdm_slot_offset[TERTIARY_TDM_TX_1];
+ break;
+ case AFE_PORT_ID_TERTIARY_TDM_TX_2:
+ slots = msm_tert_tdm_slot_num;
+ slot_width = msm_tert_tdm_slot_width;
+ slot_offset = tdm_slot_offset[TERTIARY_TDM_TX_2];
+ break;
+ case AFE_PORT_ID_TERTIARY_TDM_TX_3:
+ slots = msm_tert_tdm_slot_num;
+ slot_width = msm_tert_tdm_slot_width;
+ slot_offset = tdm_slot_offset[TERTIARY_TDM_TX_3];
+ break;
+ case AFE_PORT_ID_TERTIARY_TDM_TX_4:
+ slots = msm_tert_tdm_slot_num;
+ slot_width = msm_tert_tdm_slot_width;
+ slot_offset = tdm_slot_offset[TERTIARY_TDM_TX_4];
+ break;
+ case AFE_PORT_ID_TERTIARY_TDM_TX_5:
+ slots = msm_tert_tdm_slot_num;
+ slot_width = msm_tert_tdm_slot_width;
+ slot_offset = tdm_slot_offset[TERTIARY_TDM_TX_5];
+ break;
+ case AFE_PORT_ID_TERTIARY_TDM_TX_6:
+ slots = msm_tert_tdm_slot_num;
+ slot_width = msm_tert_tdm_slot_width;
+ slot_offset = tdm_slot_offset[TERTIARY_TDM_TX_6];
+ break;
+ case AFE_PORT_ID_TERTIARY_TDM_TX_7:
+ slots = msm_tert_tdm_slot_num;
+ slot_width = msm_tert_tdm_slot_width;
+ slot_offset = tdm_slot_offset[TERTIARY_TDM_TX_7];
+ break;
+ case AFE_PORT_ID_QUATERNARY_TDM_RX:
+ slots = msm_quat_tdm_slot_num;
+ slot_width = msm_quat_tdm_slot_width;
+ slot_offset = tdm_slot_offset[QUATERNARY_TDM_RX_0];
+ break;
+ case AFE_PORT_ID_QUATERNARY_TDM_RX_1:
+ slots = msm_quat_tdm_slot_num;
+ slot_width = msm_quat_tdm_slot_width;
+ slot_offset = tdm_slot_offset[QUATERNARY_TDM_RX_1];
+ break;
+ case AFE_PORT_ID_QUATERNARY_TDM_RX_2:
+ slots = msm_quat_tdm_slot_num;
+ slot_width = msm_quat_tdm_slot_width;
+ slot_offset = tdm_slot_offset[QUATERNARY_TDM_RX_2];
+ break;
+ case AFE_PORT_ID_QUATERNARY_TDM_RX_3:
+ slots = msm_quat_tdm_slot_num;
+ slot_width = msm_quat_tdm_slot_width;
+ slot_offset = tdm_slot_offset[QUATERNARY_TDM_RX_3];
+ break;
+ case AFE_PORT_ID_QUATERNARY_TDM_RX_4:
+ slots = msm_quat_tdm_slot_num;
+ slot_width = msm_quat_tdm_slot_width;
+ slot_offset = tdm_slot_offset[QUATERNARY_TDM_RX_4];
+ break;
+ case AFE_PORT_ID_QUATERNARY_TDM_RX_5:
+ slots = msm_quat_tdm_slot_num;
+ slot_width = msm_quat_tdm_slot_width;
+ slot_offset = tdm_slot_offset[QUATERNARY_TDM_RX_5];
+ break;
+ case AFE_PORT_ID_QUATERNARY_TDM_RX_6:
+ slots = msm_quat_tdm_slot_num;
+ slot_width = msm_quat_tdm_slot_width;
+ slot_offset = tdm_slot_offset[QUATERNARY_TDM_RX_6];
+ break;
+ case AFE_PORT_ID_QUATERNARY_TDM_RX_7:
+ slots = msm_quat_tdm_slot_num;
+ slot_width = msm_quat_tdm_slot_width;
+ slot_offset = tdm_slot_offset[QUATERNARY_TDM_RX_7];
+ break;
+ case AFE_PORT_ID_QUATERNARY_TDM_TX:
+ slots = msm_quat_tdm_slot_num;
+ slot_width = msm_quat_tdm_slot_width;
+ slot_offset = tdm_slot_offset[QUATERNARY_TDM_TX_0];
+ break;
+ case AFE_PORT_ID_QUATERNARY_TDM_TX_1:
+ slots = msm_quat_tdm_slot_num;
+ slot_width = msm_quat_tdm_slot_width;
+ slot_offset = tdm_slot_offset[QUATERNARY_TDM_TX_1];
+ break;
+ case AFE_PORT_ID_QUATERNARY_TDM_TX_2:
+ slots = msm_quat_tdm_slot_num;
+ slot_width = msm_quat_tdm_slot_width;
+ slot_offset = tdm_slot_offset[QUATERNARY_TDM_TX_2];
+ break;
+ case AFE_PORT_ID_QUATERNARY_TDM_TX_3:
+ slots = msm_quat_tdm_slot_num;
+ slot_width = msm_quat_tdm_slot_width;
+ slot_offset = tdm_slot_offset[QUATERNARY_TDM_TX_3];
+ break;
+ case AFE_PORT_ID_QUATERNARY_TDM_TX_4:
+ slots = msm_quat_tdm_slot_num;
+ slot_width = msm_quat_tdm_slot_width;
+ slot_offset = tdm_slot_offset[QUATERNARY_TDM_TX_4];
+ break;
+ case AFE_PORT_ID_QUATERNARY_TDM_TX_5:
+ slots = msm_quat_tdm_slot_num;
+ slot_width = msm_quat_tdm_slot_width;
+ slot_offset = tdm_slot_offset[QUATERNARY_TDM_TX_5];
+ break;
+ case AFE_PORT_ID_QUATERNARY_TDM_TX_6:
+ slots = msm_quat_tdm_slot_num;
+ slot_width = msm_quat_tdm_slot_width;
+ slot_offset = tdm_slot_offset[QUATERNARY_TDM_TX_6];
+ break;
+ case AFE_PORT_ID_QUATERNARY_TDM_TX_7:
+ slots = msm_quat_tdm_slot_num;
+ slot_width = msm_quat_tdm_slot_width;
+ slot_offset = tdm_slot_offset[QUATERNARY_TDM_TX_7];
+ break;
+ default:
+ pr_err("%s: dai id 0x%x not supported\n",
+ __func__, cpu_dai->id);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < TDM_SLOT_OFFSET_MAX; i++) {
+ if (slot_offset[i] != AFE_SLOT_MAPPING_OFFSET_INVALID)
+ offset_channels++;
+ else
+ break;
+ }
+
+ if (offset_channels == 0) {
+ pr_err("%s: slot offset not supported, offset_channels %d\n",
+ __func__, offset_channels);
+ return -EINVAL;
+ }
+
+ if (channels > offset_channels) {
+ pr_err("%s: channels %d exceed offset_channels %d\n",
+ __func__, channels, offset_channels);
+ return -EINVAL;
+ }
+
+ slot_mask = tdm_param_set_slot_mask(slots);
+ if (!slot_mask) {
+ pr_err("%s: invalid slot_mask 0x%x\n",
+ __func__, slot_mask);
+ return -EINVAL;
+ }
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ ret = snd_soc_dai_set_tdm_slot(cpu_dai, 0, slot_mask,
+ slots, slot_width);
+ if (ret < 0) {
+ pr_err("%s: failed to set tdm slot, err:%d\n",
+ __func__, ret);
+ goto end;
+ }
+
+ ret = snd_soc_dai_set_channel_map(cpu_dai,
+ 0, NULL, channels, slot_offset);
+ if (ret < 0) {
+ pr_err("%s: failed to set channel map, err:%d\n",
+ __func__, ret);
+ goto end;
+ }
+ } else {
+ ret = snd_soc_dai_set_tdm_slot(cpu_dai, slot_mask, 0,
+ slots, slot_width);
+ if (ret < 0) {
+ pr_err("%s: failed to set tdm slot, err:%d\n",
+ __func__, ret);
+ goto end;
+ }
+
+ ret = snd_soc_dai_set_channel_map(cpu_dai,
+ channels, slot_offset, 0, NULL);
+ if (ret < 0) {
+ pr_err("%s: failed to set channel map, err:%d\n",
+ __func__, ret);
+ goto end;
+ }
+ }
+
+ clk_freq = rate * slot_width * slots;
+ ret = snd_soc_dai_set_sysclk(cpu_dai, 0, clk_freq, SND_SOC_CLOCK_OUT);
+ if (ret < 0) {
+ pr_err("%s: failed to set tdm clk, err:%d\n",
+ __func__, ret);
+ }
+
+end:
+ return ret;
+}
+
+static struct snd_soc_ops msm8996_tdm_be_ops = {
+ .hw_params = msm8996_tdm_snd_hw_params,
+};
+
+
static const struct soc_enum msm_snd_enum[] = {
SOC_ENUM_SINGLE_EXT(2, spk_function),
SOC_ENUM_SINGLE_EXT(2, slim0_rx_ch_text),
@@ -1587,6 +5072,14 @@ static const struct soc_enum msm_snd_enum[] = {
SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(slim6_rx_bit_format_text),
slim6_rx_bit_format_text),
SOC_ENUM_SINGLE_EXT(2, slim6_rx_ch_text),
+ SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(tdm_ch_text), tdm_ch_text),
+ SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(tdm_bit_format_text),
+ tdm_bit_format_text),
+ SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(tdm_rate_text), tdm_rate_text),
+ SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(tdm_slot_num_text),
+ tdm_slot_num_text),
+ SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(tdm_slot_width_text),
+ tdm_slot_width_text),
};
static const struct snd_kcontrol_new msm_snd_controls[] = {
@@ -1633,6 +5126,462 @@ static const struct snd_kcontrol_new msm_snd_controls[] = {
msm8996_hifi_put),
SOC_ENUM_EXT("VI_FEED_TX Channels", msm_snd_enum[12],
msm_vi_feed_tx_ch_get, msm_vi_feed_tx_ch_put),
+ SOC_ENUM_EXT("USB_AUDIO_RX Channels", usb_rx_chs,
+ usb_audio_rx_ch_get, usb_audio_rx_ch_put),
+ SOC_ENUM_EXT("USB_AUDIO_TX Channels", usb_tx_chs,
+ usb_audio_tx_ch_get, usb_audio_tx_ch_put),
+ SOC_ENUM_EXT("USB_AUDIO_RX SampleRate", usb_rx_sample_rate,
+ usb_audio_rx_sample_rate_get,
+ usb_audio_rx_sample_rate_put),
+ SOC_ENUM_EXT("USB_AUDIO_TX SampleRate", usb_tx_sample_rate,
+ usb_audio_tx_sample_rate_get,
+ usb_audio_tx_sample_rate_put),
+ SOC_ENUM_EXT("USB_AUDIO_RX Format", usb_rx_format,
+ usb_audio_rx_format_get, usb_audio_rx_format_put),
+ SOC_ENUM_EXT("USB_AUDIO_TX Format", usb_tx_format,
+ usb_audio_tx_format_get, usb_audio_tx_format_put),
+ SOC_ENUM_EXT("PRI_TDM_TX_0 Channels", msm_snd_enum[16],
+ msm_pri_tdm_tx_0_ch_get, msm_pri_tdm_tx_0_ch_put),
+ SOC_ENUM_EXT("PRI_TDM_TX_1 Channels", msm_snd_enum[16],
+ msm_pri_tdm_tx_1_ch_get, msm_pri_tdm_tx_1_ch_put),
+ SOC_ENUM_EXT("PRI_TDM_TX_2 Channels", msm_snd_enum[16],
+ msm_pri_tdm_tx_2_ch_get, msm_pri_tdm_tx_2_ch_put),
+ SOC_ENUM_EXT("PRI_TDM_TX_3 Channels", msm_snd_enum[16],
+ msm_pri_tdm_tx_3_ch_get, msm_pri_tdm_tx_3_ch_put),
+ SOC_ENUM_EXT("PRI_TDM_RX_0 Channels", msm_snd_enum[16],
+ msm_pri_tdm_rx_0_ch_get, msm_pri_tdm_rx_0_ch_put),
+ SOC_ENUM_EXT("PRI_TDM_RX_1 Channels", msm_snd_enum[16],
+ msm_pri_tdm_rx_1_ch_get, msm_pri_tdm_rx_1_ch_put),
+ SOC_ENUM_EXT("PRI_TDM_RX_2 Channels", msm_snd_enum[16],
+ msm_pri_tdm_rx_2_ch_get, msm_pri_tdm_rx_2_ch_put),
+ SOC_ENUM_EXT("PRI_TDM_RX_3 Channels", msm_snd_enum[16],
+ msm_pri_tdm_rx_3_ch_get, msm_pri_tdm_rx_3_ch_put),
+ SOC_ENUM_EXT("SEC_TDM_RX_0 Channels", msm_snd_enum[16],
+ msm_sec_tdm_rx_0_ch_get, msm_sec_tdm_rx_0_ch_put),
+ SOC_ENUM_EXT("SEC_TDM_RX_1 Channels", msm_snd_enum[16],
+ msm_sec_tdm_rx_1_ch_get, msm_sec_tdm_rx_1_ch_put),
+ SOC_ENUM_EXT("SEC_TDM_RX_2 Channels", msm_snd_enum[16],
+ msm_sec_tdm_rx_2_ch_get, msm_sec_tdm_rx_2_ch_put),
+ SOC_ENUM_EXT("SEC_TDM_RX_3 Channels", msm_snd_enum[16],
+ msm_sec_tdm_rx_3_ch_get, msm_sec_tdm_rx_3_ch_put),
+ SOC_ENUM_EXT("SEC_TDM_TX_0 Channels", msm_snd_enum[16],
+ msm_sec_tdm_tx_0_ch_get, msm_sec_tdm_tx_0_ch_put),
+ SOC_ENUM_EXT("SEC_TDM_TX_1 Channels", msm_snd_enum[16],
+ msm_sec_tdm_tx_1_ch_get, msm_sec_tdm_tx_1_ch_put),
+ SOC_ENUM_EXT("SEC_TDM_TX_2 Channels", msm_snd_enum[16],
+ msm_sec_tdm_tx_2_ch_get, msm_sec_tdm_tx_2_ch_put),
+ SOC_ENUM_EXT("SEC_TDM_TX_3 Channels", msm_snd_enum[16],
+ msm_sec_tdm_tx_3_ch_get, msm_sec_tdm_tx_3_ch_put),
+ SOC_ENUM_EXT("TERT_TDM_RX_0 Channels", msm_snd_enum[16],
+ msm_tert_tdm_rx_0_ch_get, msm_tert_tdm_rx_0_ch_put),
+ SOC_ENUM_EXT("TERT_TDM_RX_1 Channels", msm_snd_enum[16],
+ msm_tert_tdm_rx_1_ch_get, msm_tert_tdm_rx_1_ch_put),
+ SOC_ENUM_EXT("TERT_TDM_RX_2 Channels", msm_snd_enum[16],
+ msm_tert_tdm_rx_2_ch_get, msm_tert_tdm_rx_2_ch_put),
+ SOC_ENUM_EXT("TERT_TDM_RX_3 Channels", msm_snd_enum[16],
+ msm_tert_tdm_rx_3_ch_get, msm_tert_tdm_rx_3_ch_put),
+ SOC_ENUM_EXT("TERT_TDM_TX_0 Channels", msm_snd_enum[16],
+ msm_tert_tdm_tx_0_ch_get, msm_tert_tdm_tx_0_ch_put),
+ SOC_ENUM_EXT("TERT_TDM_TX_1 Channels", msm_snd_enum[16],
+ msm_tert_tdm_tx_1_ch_get, msm_tert_tdm_tx_1_ch_put),
+ SOC_ENUM_EXT("TERT_TDM_TX_2 Channels", msm_snd_enum[16],
+ msm_tert_tdm_tx_2_ch_get, msm_tert_tdm_tx_2_ch_put),
+ SOC_ENUM_EXT("TERT_TDM_TX_3 Channels", msm_snd_enum[16],
+ msm_tert_tdm_tx_3_ch_get, msm_tert_tdm_tx_3_ch_put),
+ SOC_ENUM_EXT("QUAT_TDM_RX_0 Channels", msm_snd_enum[16],
+ msm_quat_tdm_rx_0_ch_get, msm_quat_tdm_rx_0_ch_put),
+ SOC_ENUM_EXT("QUAT_TDM_RX_1 Channels", msm_snd_enum[16],
+ msm_quat_tdm_rx_1_ch_get, msm_quat_tdm_rx_1_ch_put),
+ SOC_ENUM_EXT("QUAT_TDM_RX_2 Channels", msm_snd_enum[16],
+ msm_quat_tdm_rx_2_ch_get, msm_quat_tdm_rx_2_ch_put),
+ SOC_ENUM_EXT("QUAT_TDM_RX_3 Channels", msm_snd_enum[16],
+ msm_quat_tdm_rx_3_ch_get, msm_quat_tdm_rx_3_ch_put),
+ SOC_ENUM_EXT("QUAT_TDM_TX_0 Channels", msm_snd_enum[16],
+ msm_quat_tdm_tx_0_ch_get, msm_quat_tdm_tx_0_ch_put),
+ SOC_ENUM_EXT("QUAT_TDM_TX_1 Channels", msm_snd_enum[16],
+ msm_quat_tdm_tx_1_ch_get, msm_quat_tdm_tx_1_ch_put),
+ SOC_ENUM_EXT("QUAT_TDM_TX_2 Channels", msm_snd_enum[16],
+ msm_quat_tdm_tx_2_ch_get, msm_quat_tdm_tx_2_ch_put),
+ SOC_ENUM_EXT("QUAT_TDM_TX_3 Channels", msm_snd_enum[16],
+ msm_quat_tdm_tx_3_ch_get, msm_quat_tdm_tx_3_ch_put),
+ SOC_ENUM_EXT("PRI_TDM_TX_0 Bit Format", msm_snd_enum[17],
+ msm_pri_tdm_tx_0_bit_format_get,
+ msm_pri_tdm_tx_0_bit_format_put),
+ SOC_ENUM_EXT("PRI_TDM_TX_1 Bit Format", msm_snd_enum[17],
+ msm_pri_tdm_tx_1_bit_format_get,
+ msm_pri_tdm_tx_1_bit_format_put),
+ SOC_ENUM_EXT("PRI_TDM_TX_2 Bit Format", msm_snd_enum[17],
+ msm_pri_tdm_tx_2_bit_format_get,
+ msm_pri_tdm_tx_2_bit_format_put),
+ SOC_ENUM_EXT("PRI_TDM_TX_3 Bit Format", msm_snd_enum[17],
+ msm_pri_tdm_tx_3_bit_format_get,
+ msm_pri_tdm_tx_3_bit_format_put),
+ SOC_ENUM_EXT("PRI_TDM_RX_0 Bit Format", msm_snd_enum[17],
+ msm_pri_tdm_rx_0_bit_format_get,
+ msm_pri_tdm_rx_0_bit_format_put),
+ SOC_ENUM_EXT("PRI_TDM_RX_1 Bit Format", msm_snd_enum[17],
+ msm_pri_tdm_rx_1_bit_format_get,
+ msm_pri_tdm_rx_1_bit_format_put),
+ SOC_ENUM_EXT("PRI_TDM_RX_2 Bit Format", msm_snd_enum[17],
+ msm_pri_tdm_rx_2_bit_format_get,
+ msm_pri_tdm_rx_2_bit_format_put),
+ SOC_ENUM_EXT("PRI_TDM_RX_3 Bit Format", msm_snd_enum[17],
+ msm_pri_tdm_rx_3_bit_format_get,
+ msm_pri_tdm_rx_3_bit_format_put),
+ SOC_ENUM_EXT("SEC_TDM_RX_0 Bit Format", msm_snd_enum[17],
+ msm_sec_tdm_rx_0_bit_format_get,
+ msm_sec_tdm_rx_0_bit_format_put),
+ SOC_ENUM_EXT("SEC_TDM_RX_1 Bit Format", msm_snd_enum[17],
+ msm_sec_tdm_rx_1_bit_format_get,
+ msm_sec_tdm_rx_1_bit_format_put),
+ SOC_ENUM_EXT("SEC_TDM_RX_2 Bit Format", msm_snd_enum[17],
+ msm_sec_tdm_rx_2_bit_format_get,
+ msm_sec_tdm_rx_2_bit_format_put),
+ SOC_ENUM_EXT("SEC_TDM_RX_3 Bit Format", msm_snd_enum[17],
+ msm_sec_tdm_rx_3_bit_format_get,
+ msm_sec_tdm_rx_3_bit_format_put),
+ SOC_ENUM_EXT("SEC_TDM_TX_0 Bit Format", msm_snd_enum[17],
+ msm_sec_tdm_tx_0_bit_format_get,
+ msm_sec_tdm_tx_0_bit_format_put),
+ SOC_ENUM_EXT("SEC_TDM_TX_1 Bit Format", msm_snd_enum[17],
+ msm_sec_tdm_tx_1_bit_format_get,
+ msm_sec_tdm_tx_1_bit_format_put),
+ SOC_ENUM_EXT("SEC_TDM_TX_2 Bit Format", msm_snd_enum[17],
+ msm_sec_tdm_tx_2_bit_format_get,
+ msm_sec_tdm_tx_2_bit_format_put),
+ SOC_ENUM_EXT("SEC_TDM_TX_3 Bit Format", msm_snd_enum[17],
+ msm_sec_tdm_tx_3_bit_format_get,
+ msm_sec_tdm_tx_3_bit_format_put),
+ SOC_ENUM_EXT("TERT_TDM_RX_0 Bit Format", msm_snd_enum[17],
+ msm_tert_tdm_rx_0_bit_format_get,
+ msm_tert_tdm_rx_0_bit_format_put),
+ SOC_ENUM_EXT("TERT_TDM_RX_1 Bit Format", msm_snd_enum[17],
+ msm_tert_tdm_rx_1_bit_format_get,
+ msm_tert_tdm_rx_1_bit_format_put),
+ SOC_ENUM_EXT("TERT_TDM_RX_2 Bit Format", msm_snd_enum[17],
+ msm_tert_tdm_rx_2_bit_format_get,
+ msm_tert_tdm_rx_2_bit_format_put),
+ SOC_ENUM_EXT("TERT_TDM_RX_3 Bit Format", msm_snd_enum[17],
+ msm_tert_tdm_rx_3_bit_format_get,
+ msm_tert_tdm_rx_3_bit_format_put),
+ SOC_ENUM_EXT("TERT_TDM_TX_0 Bit Format", msm_snd_enum[17],
+ msm_tert_tdm_tx_0_bit_format_get,
+ msm_tert_tdm_tx_0_bit_format_put),
+ SOC_ENUM_EXT("TERT_TDM_TX_1 Bit Format", msm_snd_enum[17],
+ msm_tert_tdm_tx_1_bit_format_get,
+ msm_tert_tdm_tx_1_bit_format_put),
+ SOC_ENUM_EXT("TERT_TDM_TX_2 Bit Format", msm_snd_enum[17],
+ msm_tert_tdm_tx_2_bit_format_get,
+ msm_tert_tdm_tx_2_bit_format_put),
+ SOC_ENUM_EXT("TERT_TDM_TX_3 Bit Format", msm_snd_enum[17],
+ msm_tert_tdm_tx_3_bit_format_get,
+ msm_tert_tdm_tx_3_bit_format_put),
+ SOC_ENUM_EXT("QUAT_TDM_RX_0 Bit Format", msm_snd_enum[17],
+ msm_quat_tdm_rx_0_bit_format_get,
+ msm_quat_tdm_rx_0_bit_format_put),
+ SOC_ENUM_EXT("QUAT_TDM_RX_1 Bit Format", msm_snd_enum[17],
+ msm_quat_tdm_rx_1_bit_format_get,
+ msm_quat_tdm_rx_1_bit_format_put),
+ SOC_ENUM_EXT("QUAT_TDM_RX_2 Bit Format", msm_snd_enum[17],
+ msm_quat_tdm_rx_2_bit_format_get,
+ msm_quat_tdm_rx_2_bit_format_put),
+ SOC_ENUM_EXT("QUAT_TDM_RX_3 Bit Format", msm_snd_enum[17],
+ msm_quat_tdm_rx_3_bit_format_get,
+ msm_quat_tdm_rx_3_bit_format_put),
+ SOC_ENUM_EXT("QUAT_TDM_TX_0 Bit Format", msm_snd_enum[17],
+ msm_quat_tdm_tx_0_bit_format_get,
+ msm_quat_tdm_tx_0_bit_format_put),
+ SOC_ENUM_EXT("QUAT_TDM_TX_1 Bit Format", msm_snd_enum[17],
+ msm_quat_tdm_tx_1_bit_format_get,
+ msm_quat_tdm_tx_1_bit_format_put),
+ SOC_ENUM_EXT("QUAT_TDM_TX_2 Bit Format", msm_snd_enum[17],
+ msm_quat_tdm_tx_2_bit_format_get,
+ msm_quat_tdm_tx_2_bit_format_put),
+ SOC_ENUM_EXT("QUAT_TDM_TX_3 Bit Format", msm_snd_enum[17],
+ msm_quat_tdm_tx_3_bit_format_get,
+ msm_quat_tdm_tx_3_bit_format_put),
+ SOC_ENUM_EXT("PRI_TDM SampleRate", msm_snd_enum[18],
+ msm_pri_tdm_rate_get, msm_pri_tdm_rate_put),
+ SOC_ENUM_EXT("PRI_TDM Slot Number", msm_snd_enum[19],
+ msm_pri_tdm_slot_num_get, msm_pri_tdm_slot_num_put),
+ SOC_ENUM_EXT("PRI_TDM Slot Width", msm_snd_enum[20],
+ msm_pri_tdm_slot_width_get, msm_pri_tdm_slot_width_put),
+ SOC_ENUM_EXT("SEC_TDM SampleRate", msm_snd_enum[18],
+ msm_sec_tdm_rate_get, msm_sec_tdm_rate_put),
+ SOC_ENUM_EXT("SEC_TDM Slot Number", msm_snd_enum[19],
+ msm_sec_tdm_slot_num_get, msm_sec_tdm_slot_num_put),
+ SOC_ENUM_EXT("SEC_TDM Slot Width", msm_snd_enum[20],
+ msm_sec_tdm_slot_width_get, msm_sec_tdm_slot_width_put),
+ SOC_ENUM_EXT("TERT_TDM SampleRate", msm_snd_enum[18],
+ msm_tert_tdm_rate_get, msm_tert_tdm_rate_put),
+ SOC_ENUM_EXT("TERT_TDM Slot Number", msm_snd_enum[19],
+ msm_tert_tdm_slot_num_get, msm_tert_tdm_slot_num_put),
+ SOC_ENUM_EXT("TERT_TDM Slot Width", msm_snd_enum[20],
+ msm_tert_tdm_slot_width_get,
+ msm_tert_tdm_slot_width_put),
+ SOC_ENUM_EXT("QUAT_TDM SampleRate", msm_snd_enum[18],
+ msm_quat_tdm_rate_get, msm_quat_tdm_rate_put),
+ SOC_ENUM_EXT("QUAT_TDM Slot Number", msm_snd_enum[19],
+ msm_quat_tdm_slot_num_get, msm_quat_tdm_slot_num_put),
+ SOC_ENUM_EXT("QUAT_TDM Slot Width", msm_snd_enum[20],
+ msm_quat_tdm_slot_width_get,
+ msm_quat_tdm_slot_width_put),
+ SOC_SINGLE_MULTI_EXT("PRI_TDM_RX_0 Slot Mapping", SND_SOC_NOPM,
+ PRIMARY_TDM_RX_0, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("PRI_TDM_RX_1 Slot Mapping", SND_SOC_NOPM,
+ PRIMARY_TDM_RX_1, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("PRI_TDM_RX_2 Slot Mapping", SND_SOC_NOPM,
+ PRIMARY_TDM_RX_2, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("PRI_TDM_RX_3 Slot Mapping", SND_SOC_NOPM,
+ PRIMARY_TDM_RX_3, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("PRI_TDM_RX_4 Slot Mapping", SND_SOC_NOPM,
+ PRIMARY_TDM_RX_4, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("PRI_TDM_RX_5 Slot Mapping", SND_SOC_NOPM,
+ PRIMARY_TDM_RX_5, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("PRI_TDM_RX_6 Slot Mapping", SND_SOC_NOPM,
+ PRIMARY_TDM_RX_6, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("PRI_TDM_RX_7 Slot Mapping", SND_SOC_NOPM,
+ PRIMARY_TDM_RX_7, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("PRI_TDM_TX_0 Slot Mapping", SND_SOC_NOPM,
+ PRIMARY_TDM_TX_0, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("PRI_TDM_TX_1 Slot Mapping", SND_SOC_NOPM,
+ PRIMARY_TDM_TX_1, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("PRI_TDM_TX_2 Slot Mapping", SND_SOC_NOPM,
+ PRIMARY_TDM_TX_2, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("PRI_TDM_TX_3 Slot Mapping", SND_SOC_NOPM,
+ PRIMARY_TDM_TX_3, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("PRI_TDM_TX_4 Slot Mapping", SND_SOC_NOPM,
+ PRIMARY_TDM_TX_4, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("PRI_TDM_TX_5 Slot Mapping", SND_SOC_NOPM,
+ PRIMARY_TDM_TX_5, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("PRI_TDM_TX_6 Slot Mapping", SND_SOC_NOPM,
+ PRIMARY_TDM_TX_6, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("PRI_TDM_TX_7 Slot Mapping", SND_SOC_NOPM,
+ PRIMARY_TDM_TX_7, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("SEC_TDM_RX_0 Slot Mapping", SND_SOC_NOPM,
+ SECONDARY_TDM_RX_0, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("SEC_TDM_RX_1 Slot Mapping", SND_SOC_NOPM,
+ SECONDARY_TDM_RX_1, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("SEC_TDM_RX_2 Slot Mapping", SND_SOC_NOPM,
+ SECONDARY_TDM_RX_2, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("SEC_TDM_RX_3 Slot Mapping", SND_SOC_NOPM,
+ SECONDARY_TDM_RX_3, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("SEC_TDM_RX_4 Slot Mapping", SND_SOC_NOPM,
+ SECONDARY_TDM_RX_4, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("SEC_TDM_RX_5 Slot Mapping", SND_SOC_NOPM,
+ SECONDARY_TDM_RX_5, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("SEC_TDM_RX_6 Slot Mapping", SND_SOC_NOPM,
+ SECONDARY_TDM_RX_6, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("SEC_TDM_RX_7 Slot Mapping", SND_SOC_NOPM,
+ SECONDARY_TDM_RX_7, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("SEC_TDM_TX_0 Slot Mapping", SND_SOC_NOPM,
+ SECONDARY_TDM_TX_0, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("SEC_TDM_TX_1 Slot Mapping", SND_SOC_NOPM,
+ SECONDARY_TDM_TX_1, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("SEC_TDM_TX_2 Slot Mapping", SND_SOC_NOPM,
+ SECONDARY_TDM_TX_2, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("SEC_TDM_TX_3 Slot Mapping", SND_SOC_NOPM,
+ SECONDARY_TDM_TX_3, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("SEC_TDM_TX_4 Slot Mapping", SND_SOC_NOPM,
+ SECONDARY_TDM_TX_4, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("SEC_TDM_TX_5 Slot Mapping", SND_SOC_NOPM,
+ SECONDARY_TDM_TX_5, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("SEC_TDM_TX_6 Slot Mapping", SND_SOC_NOPM,
+ SECONDARY_TDM_TX_6, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("SEC_TDM_TX_7 Slot Mapping", SND_SOC_NOPM,
+ SECONDARY_TDM_TX_7, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("TERT_TDM_RX_0 Slot Mapping", SND_SOC_NOPM,
+ TERTIARY_TDM_RX_0, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("TERT_TDM_RX_1 Slot Mapping", SND_SOC_NOPM,
+ TERTIARY_TDM_RX_1, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("TERT_TDM_RX_2 Slot Mapping", SND_SOC_NOPM,
+ TERTIARY_TDM_RX_2, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("TERT_TDM_RX_3 Slot Mapping", SND_SOC_NOPM,
+ TERTIARY_TDM_RX_3, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("TERT_TDM_RX_4 Slot Mapping", SND_SOC_NOPM,
+ TERTIARY_TDM_RX_4, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("TERT_TDM_RX_5 Slot Mapping", SND_SOC_NOPM,
+ TERTIARY_TDM_RX_5, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("TERT_TDM_RX_6 Slot Mapping", SND_SOC_NOPM,
+ TERTIARY_TDM_RX_6, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("TERT_TDM_RX_7 Slot Mapping", SND_SOC_NOPM,
+ TERTIARY_TDM_RX_7, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("TERT_TDM_TX_0 Slot Mapping", SND_SOC_NOPM,
+ TERTIARY_TDM_TX_0, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("TERT_TDM_TX_1 Slot Mapping", SND_SOC_NOPM,
+ TERTIARY_TDM_TX_1, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("TERT_TDM_TX_2 Slot Mapping", SND_SOC_NOPM,
+ TERTIARY_TDM_TX_2, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("TERT_TDM_TX_3 Slot Mapping", SND_SOC_NOPM,
+ TERTIARY_TDM_TX_3, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("TERT_TDM_TX_4 Slot Mapping", SND_SOC_NOPM,
+ TERTIARY_TDM_TX_4, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("TERT_TDM_TX_5 Slot Mapping", SND_SOC_NOPM,
+ TERTIARY_TDM_TX_5, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("TERT_TDM_TX_6 Slot Mapping", SND_SOC_NOPM,
+ TERTIARY_TDM_TX_6, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("TERT_TDM_TX_7 Slot Mapping", SND_SOC_NOPM,
+ TERTIARY_TDM_TX_7, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("QUAT_TDM_RX_0 Slot Mapping", SND_SOC_NOPM,
+ QUATERNARY_TDM_RX_0, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("QUAT_TDM_RX_1 Slot Mapping", SND_SOC_NOPM,
+ QUATERNARY_TDM_RX_1, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("QUAT_TDM_RX_2 Slot Mapping", SND_SOC_NOPM,
+ QUATERNARY_TDM_RX_2, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("QUAT_TDM_RX_3 Slot Mapping", SND_SOC_NOPM,
+ QUATERNARY_TDM_RX_3, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("QUAT_TDM_RX_4 Slot Mapping", SND_SOC_NOPM,
+ QUATERNARY_TDM_RX_4, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("QUAT_TDM_RX_5 Slot Mapping", SND_SOC_NOPM,
+ QUATERNARY_TDM_RX_5, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("QUAT_TDM_RX_6 Slot Mapping", SND_SOC_NOPM,
+ QUATERNARY_TDM_RX_6, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("QUAT_TDM_RX_7 Slot Mapping", SND_SOC_NOPM,
+ QUATERNARY_TDM_RX_7, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("QUAT_TDM_TX_0 Slot Mapping", SND_SOC_NOPM,
+ QUATERNARY_TDM_TX_0, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("QUAT_TDM_TX_1 Slot Mapping", SND_SOC_NOPM,
+ QUATERNARY_TDM_TX_1, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("QUAT_TDM_TX_2 Slot Mapping", SND_SOC_NOPM,
+ QUATERNARY_TDM_TX_2, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("QUAT_TDM_TX_3 Slot Mapping", SND_SOC_NOPM,
+ QUATERNARY_TDM_TX_3, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("QUAT_TDM_TX_4 Slot Mapping", SND_SOC_NOPM,
+ QUATERNARY_TDM_TX_4, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("QUAT_TDM_TX_5 Slot Mapping", SND_SOC_NOPM,
+ QUATERNARY_TDM_TX_5, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("QUAT_TDM_TX_6 Slot Mapping", SND_SOC_NOPM,
+ QUATERNARY_TDM_TX_6, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("QUAT_TDM_TX_7 Slot Mapping", SND_SOC_NOPM,
+ QUATERNARY_TDM_TX_7, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
};
static bool msm8996_swap_gnd_mic(struct snd_soc_codec *codec)
@@ -2016,10 +5965,8 @@ static void *def_tasha_mbhc_cal(void)
tasha_wcd_cal = kzalloc(WCD_MBHC_CAL_SIZE(WCD_MBHC_DEF_BUTTONS,
WCD9XXX_MBHC_DEF_RLOADS), GFP_KERNEL);
- if (!tasha_wcd_cal) {
- pr_err("%s: out of memory\n", __func__);
+ if (!tasha_wcd_cal)
return NULL;
- }
#define S(X, Y) ((WCD_MBHC_CAL_PLUG_TYPE_PTR(tasha_wcd_cal)->X) = (Y))
S(v_hs_max, 1500);
@@ -2060,7 +6007,7 @@ static int msm_snd_hw_params(struct snd_pcm_substream *substream,
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
ret = snd_soc_dai_get_channel_map(codec_dai,
- &tx_ch_cnt, tx_ch, &rx_ch_cnt , rx_ch);
+ &tx_ch_cnt, tx_ch, &rx_ch_cnt, rx_ch);
if (ret < 0) {
pr_err("%s: failed to get codec chan map, err:%d\n",
__func__, ret);
@@ -2079,7 +6026,7 @@ static int msm_snd_hw_params(struct snd_pcm_substream *substream,
msm_slim_0_rx_ch);
rx_ch_count = msm_slim_0_rx_ch;
}
- ret = snd_soc_dai_set_channel_map(cpu_dai, 0, 0,
+ ret = snd_soc_dai_set_channel_map(cpu_dai, 0, NULL,
rx_ch_count, rx_ch);
if (ret < 0) {
pr_err("%s: failed to set cpu chan map, err:%d\n",
@@ -2091,7 +6038,7 @@ static int msm_snd_hw_params(struct snd_pcm_substream *substream,
pr_debug("%s: %s_tx_dai_id_%d_ch=%d\n", __func__,
codec_dai->name, codec_dai->id, user_set_tx_ch);
ret = snd_soc_dai_get_channel_map(codec_dai,
- &tx_ch_cnt, tx_ch, &rx_ch_cnt , rx_ch);
+ &tx_ch_cnt, tx_ch, &rx_ch_cnt, rx_ch);
if (ret < 0) {
pr_err("%s: failed to get codec chan map\n, err:%d\n",
__func__, ret);
@@ -2119,7 +6066,8 @@ static int msm_snd_hw_params(struct snd_pcm_substream *substream,
tx_ch_cnt, dai_link->be_id);
ret = snd_soc_dai_set_channel_map(cpu_dai,
- user_set_tx_ch, tx_ch, 0 , 0);
+ user_set_tx_ch,
+ tx_ch, 0, NULL);
if (ret < 0) {
pr_err("%s: failed to set cpu chan map, err:%d\n",
__func__, ret);
@@ -2153,7 +6101,7 @@ static int msm_snd_cpe_hw_params(struct snd_pcm_substream *substream,
pr_debug("%s: %s_tx_dai_id_%d\n", __func__,
codec_dai->name, codec_dai->id);
ret = snd_soc_dai_get_channel_map(codec_dai,
- &tx_ch_cnt, tx_ch, NULL , NULL);
+ &tx_ch_cnt, tx_ch, NULL, NULL);
if (ret < 0) {
pr_err("%s: failed to get codec chan map\n, err:%d\n",
__func__, ret);
@@ -2166,7 +6114,7 @@ static int msm_snd_cpe_hw_params(struct snd_pcm_substream *substream,
__func__, tx_ch_cnt, dai_link->be_id);
ret = snd_soc_dai_set_channel_map(cpu_dai,
- user_set_tx_ch, tx_ch, 0 , 0);
+ user_set_tx_ch, tx_ch, 0, NULL);
if (ret < 0)
pr_err("%s: failed to set cpu chan map, err:%d\n",
__func__, ret);
@@ -2199,13 +6147,13 @@ static int msm8996_slimbus_2_hw_params(struct snd_pcm_substream *substream,
pr_debug("%s: %s rx_dai_id = %d num_ch = %d\n", __func__,
codec_dai->name, codec_dai->id, num_rx_ch);
ret = snd_soc_dai_get_channel_map(codec_dai,
- &tx_ch_cnt, tx_ch, &rx_ch_cnt , rx_ch);
+ &tx_ch_cnt, tx_ch, &rx_ch_cnt, rx_ch);
if (ret < 0) {
pr_err("%s: failed to get codec chan map, err:%d\n",
__func__, ret);
goto end;
}
- ret = snd_soc_dai_set_channel_map(cpu_dai, 0, 0,
+ ret = snd_soc_dai_set_channel_map(cpu_dai, 0, NULL,
num_rx_ch, rx_ch);
if (ret < 0) {
pr_err("%s: failed to set cpu chan map, err:%d\n",
@@ -2217,14 +6165,14 @@ static int msm8996_slimbus_2_hw_params(struct snd_pcm_substream *substream,
pr_debug("%s: %s tx_dai_id = %d num_ch = %d\n", __func__,
codec_dai->name, codec_dai->id, num_tx_ch);
ret = snd_soc_dai_get_channel_map(codec_dai,
- &tx_ch_cnt, tx_ch, &rx_ch_cnt , rx_ch);
+ &tx_ch_cnt, tx_ch, &rx_ch_cnt, rx_ch);
if (ret < 0) {
pr_err("%s: failed to get codec chan map, err:%d\n",
__func__, ret);
goto end;
}
ret = snd_soc_dai_set_channel_map(cpu_dai,
- num_tx_ch, tx_ch, 0 , 0);
+ num_tx_ch, tx_ch, 0, NULL);
if (ret < 0) {
pr_err("%s: failed to set cpu chan map, err:%d\n",
__func__, ret);
@@ -2963,7 +6911,489 @@ static struct snd_soc_dai_link msm8996_common_dai_links[] = {
.be_id = MSM_FRONTEND_DAI_VOICE2,
},
};
+static struct snd_soc_dai_link msm8996_tdm_fe_dai_links[] = {
+ {
+ .name = "Primary TDM RX 0 Hostless",
+ .stream_name = "Primary TDM RX 0 Hostless",
+ .cpu_dai_name = "PRI_TDM_RX_0_HOSTLESS",
+ .platform_name = "msm-pcm-hostless",
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+ .ignore_suspend = 1,
+ .ignore_pmdown_time = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ },
+ {
+ .name = "Primary TDM RX 1 Hostless",
+ .stream_name = "Primary TDM RX 1 Hostless",
+ .cpu_dai_name = "PRI_TDM_RX_1_HOSTLESS",
+ .platform_name = "msm-pcm-hostless",
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+ .ignore_suspend = 1,
+ .ignore_pmdown_time = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ },
+ {
+ .name = "Primary TDM RX 2 Hostless",
+ .stream_name = "Primary TDM RX 2 Hostless",
+ .cpu_dai_name = "PRI_TDM_RX_2_HOSTLESS",
+ .platform_name = "msm-pcm-hostless",
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+ .ignore_suspend = 1,
+ .ignore_pmdown_time = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ },
+ {
+ .name = "Primary TDM RX 3 Hostless",
+ .stream_name = "Primary TDM RX 3 Hostless",
+ .cpu_dai_name = "PRI_TDM_RX_3_HOSTLESS",
+ .platform_name = "msm-pcm-hostless",
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+ .ignore_suspend = 1,
+ .ignore_pmdown_time = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ },
+ {
+ .name = "Primary TDM TX 0 Hostless",
+ .stream_name = "Primary TDM TX 0 Hostless",
+ .cpu_dai_name = "PRI_TDM_TX_0_HOSTLESS",
+ .platform_name = "msm-pcm-hostless",
+ .dynamic = 1,
+ .dpcm_capture = 1,
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+ .ignore_suspend = 1,
+ .ignore_pmdown_time = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ },
+ {
+ .name = "Primary TDM TX 1 Hostless",
+ .stream_name = "Primary TDM TX 1 Hostless",
+ .cpu_dai_name = "PRI_TDM_TX_1_HOSTLESS",
+ .platform_name = "msm-pcm-hostless",
+ .dynamic = 1,
+ .dpcm_capture = 1,
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+ .ignore_suspend = 1,
+ .ignore_pmdown_time = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ },
+ {
+ .name = "Primary TDM TX 2 Hostless",
+ .stream_name = "Primary TDM TX 2 Hostless",
+ .cpu_dai_name = "PRI_TDM_TX_2_HOSTLESS",
+ .platform_name = "msm-pcm-hostless",
+ .dynamic = 1,
+ .dpcm_capture = 1,
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+ .ignore_suspend = 1,
+ .ignore_pmdown_time = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ },
+ {
+ .name = "Primary TDM TX 3 Hostless",
+ .stream_name = "Primary TDM TX 3 Hostless",
+ .cpu_dai_name = "PRI_TDM_TX_3_HOSTLESS",
+ .platform_name = "msm-pcm-hostless",
+ .dynamic = 1,
+ .dpcm_capture = 1,
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+ .ignore_suspend = 1,
+ .ignore_pmdown_time = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ },
+ {
+ .name = "Secondary TDM RX 0 Hostless",
+ .stream_name = "Secondary TDM RX 0 Hostless",
+ .cpu_dai_name = "SEC_TDM_RX_0_HOSTLESS",
+ .platform_name = "msm-pcm-hostless",
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+ .ignore_suspend = 1,
+ .ignore_pmdown_time = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ },
+ {
+ .name = "Secondary TDM RX 1 Hostless",
+ .stream_name = "Secondary TDM RX 1 Hostless",
+ .cpu_dai_name = "SEC_TDM_RX_1_HOSTLESS",
+ .platform_name = "msm-pcm-hostless",
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+ .ignore_suspend = 1,
+ .ignore_pmdown_time = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ },
+ {
+ .name = "Secondary TDM RX 2 Hostless",
+ .stream_name = "Secondary TDM RX 2 Hostless",
+ .cpu_dai_name = "SEC_TDM_RX_2_HOSTLESS",
+ .platform_name = "msm-pcm-hostless",
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+ .ignore_suspend = 1,
+ .ignore_pmdown_time = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ },
+ {
+ .name = "Secondary TDM RX 3 Hostless",
+ .stream_name = "Secondary TDM RX 3 Hostless",
+ .cpu_dai_name = "SEC_TDM_RX_3_HOSTLESS",
+ .platform_name = "msm-pcm-hostless",
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+ .ignore_suspend = 1,
+ .ignore_pmdown_time = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ },
+ {
+ .name = "Secondary TDM TX 0 Hostless",
+ .stream_name = "Secondary TDM TX 0 Hostless",
+ .cpu_dai_name = "SEC_TDM_TX_0_HOSTLESS",
+ .platform_name = "msm-pcm-hostless",
+ .dynamic = 1,
+ .dpcm_capture = 1,
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+ .ignore_suspend = 1,
+ .ignore_pmdown_time = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ },
+ {
+ .name = "Secondary TDM TX 1 Hostless",
+ .stream_name = "Secondary TDM TX 1 Hostless",
+ .cpu_dai_name = "SEC_TDM_TX_1_HOSTLESS",
+ .platform_name = "msm-pcm-hostless",
+ .dynamic = 1,
+ .dpcm_capture = 1,
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+ .ignore_suspend = 1,
+ .ignore_pmdown_time = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ },
+ {
+ .name = "Secondary TDM TX 2 Hostless",
+ .stream_name = "Secondary TDM TX 2 Hostless",
+ .cpu_dai_name = "SEC_TDM_TX_2_HOSTLESS",
+ .platform_name = "msm-pcm-hostless",
+ .dynamic = 1,
+ .dpcm_capture = 1,
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+ .ignore_suspend = 1,
+ .ignore_pmdown_time = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ },
+ {
+ .name = "Secondary TDM TX 3 Hostless",
+ .stream_name = "Secondary TDM TX 3 Hostless",
+ .cpu_dai_name = "SEC_TDM_TX_3_HOSTLESS",
+ .platform_name = "msm-pcm-hostless",
+ .dynamic = 1,
+ .dpcm_capture = 1,
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+ .ignore_suspend = 1,
+ .ignore_pmdown_time = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ },
+ {
+ .name = "Tertiary TDM RX 0 Hostless",
+ .stream_name = "Tertiary TDM RX 0 Hostless",
+ .cpu_dai_name = "TERT_TDM_RX_0_HOSTLESS",
+ .platform_name = "msm-pcm-hostless",
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+ .ignore_suspend = 1,
+ .ignore_pmdown_time = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ },
+ {
+ .name = "Tertiary TDM RX 1 Hostless",
+ .stream_name = "Tertiary TDM RX 1 Hostless",
+ .cpu_dai_name = "TERT_TDM_RX_1_HOSTLESS",
+ .platform_name = "msm-pcm-hostless",
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+ .ignore_suspend = 1,
+ .ignore_pmdown_time = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ },
+ {
+ .name = "Tertiary TDM RX 2 Hostless",
+ .stream_name = "Tertiary TDM RX 2 Hostless",
+ .cpu_dai_name = "TERT_TDM_RX_2_HOSTLESS",
+ .platform_name = "msm-pcm-hostless",
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+ .ignore_suspend = 1,
+ .ignore_pmdown_time = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ },
+ {
+ .name = "Tertiary TDM RX 3 Hostless",
+ .stream_name = "Tertiary TDM RX 3 Hostless",
+ .cpu_dai_name = "TERT_TDM_RX_3_HOSTLESS",
+ .platform_name = "msm-pcm-hostless",
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+ .ignore_suspend = 1,
+ .ignore_pmdown_time = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ },
+ {
+ .name = "Tertiary TDM TX 0 Hostless",
+ .stream_name = "Tertiary TDM TX 0 Hostless",
+ .cpu_dai_name = "TERT_TDM_TX_0_HOSTLESS",
+ .platform_name = "msm-pcm-hostless",
+ .dynamic = 1,
+ .dpcm_capture = 1,
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+ .ignore_suspend = 1,
+ .ignore_pmdown_time = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ },
+ {
+ .name = "Tertiary TDM TX 1 Hostless",
+ .stream_name = "Tertiary TDM TX 1 Hostless",
+ .cpu_dai_name = "TERT_TDM_TX_1_HOSTLESS",
+ .platform_name = "msm-pcm-hostless",
+ .dynamic = 1,
+ .dpcm_capture = 1,
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+ .ignore_suspend = 1,
+ .ignore_pmdown_time = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ },
+ {
+ .name = "Tertiary TDM TX 2 Hostless",
+ .stream_name = "Tertiary TDM TX 2 Hostless",
+ .cpu_dai_name = "TERT_TDM_TX_2_HOSTLESS",
+ .platform_name = "msm-pcm-hostless",
+ .dynamic = 1,
+ .dpcm_capture = 1,
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+ .ignore_suspend = 1,
+ .ignore_pmdown_time = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ },
+ {
+ .name = "Tertiary TDM TX 3 Hostless",
+ .stream_name = "Tertiary TDM TX 3 Hostless",
+ .cpu_dai_name = "TERT_TDM_TX_3_HOSTLESS",
+ .platform_name = "msm-pcm-hostless",
+ .dynamic = 1,
+ .dpcm_capture = 1,
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+ .ignore_suspend = 1,
+ .ignore_pmdown_time = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ },
+ {
+ .name = "Quaternary TDM RX 0 Hostless",
+ .stream_name = "Quaternary TDM RX 0 Hostless",
+ .cpu_dai_name = "QUAT_TDM_RX_0_HOSTLESS",
+ .platform_name = "msm-pcm-hostless",
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+ .ignore_suspend = 1,
+ .ignore_pmdown_time = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ },
+ {
+ .name = "Quaternary TDM RX 1 Hostless",
+ .stream_name = "Quaternary TDM RX 1 Hostless",
+ .cpu_dai_name = "QUAT_TDM_RX_1_HOSTLESS",
+ .platform_name = "msm-pcm-hostless",
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+ .ignore_suspend = 1,
+ .ignore_pmdown_time = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ },
+ {
+ .name = "Quaternary TDM RX 2 Hostless",
+ .stream_name = "Quaternary TDM RX 2 Hostless",
+ .cpu_dai_name = "QUAT_TDM_RX_2_HOSTLESS",
+ .platform_name = "msm-pcm-hostless",
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+ .ignore_suspend = 1,
+ .ignore_pmdown_time = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ },
+ {
+ .name = "Quaternary TDM RX 3 Hostless",
+ .stream_name = "Quaternary TDM RX 3 Hostless",
+ .cpu_dai_name = "QUAT_TDM_RX_3_HOSTLESS",
+ .platform_name = "msm-pcm-hostless",
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+ .ignore_suspend = 1,
+ .ignore_pmdown_time = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ },
+ {
+ .name = "Quaternary TDM TX 0 Hostless",
+ .stream_name = "Quaternary TDM TX 0 Hostless",
+ .cpu_dai_name = "QUAT_TDM_TX_0_HOSTLESS",
+ .platform_name = "msm-pcm-hostless",
+ .dynamic = 1,
+ .dpcm_capture = 1,
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+ .ignore_suspend = 1,
+ .ignore_pmdown_time = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ },
+ {
+ .name = "Quaternary TDM TX 1 Hostless",
+ .stream_name = "Quaternary TDM TX 1 Hostless",
+ .cpu_dai_name = "QUAT_TDM_TX_1_HOSTLESS",
+ .platform_name = "msm-pcm-hostless",
+ .dynamic = 1,
+ .dpcm_capture = 1,
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+ .ignore_suspend = 1,
+ .ignore_pmdown_time = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ },
+ {
+ .name = "Quaternary TDM TX 2 Hostless",
+ .stream_name = "Quaternary TDM TX 2 Hostless",
+ .cpu_dai_name = "QUAT_TDM_TX_2_HOSTLESS",
+ .platform_name = "msm-pcm-hostless",
+ .dynamic = 1,
+ .dpcm_capture = 1,
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+ .ignore_suspend = 1,
+ .ignore_pmdown_time = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ },
+ {
+ .name = "Quaternary TDM TX 3 Hostless",
+ .stream_name = "Quaternary TDM TX 3 Hostless",
+ .cpu_dai_name = "QUAT_TDM_TX_3_HOSTLESS",
+ .platform_name = "msm-pcm-hostless",
+ .dynamic = 1,
+ .dpcm_capture = 1,
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+ .ignore_suspend = 1,
+ .ignore_pmdown_time = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ },
+};
static struct snd_soc_dai_link msm8996_tasha_fe_dai_links[] = {
{
.name = LPASS_BE_SLIMBUS_4_TX,
@@ -3178,6 +7608,33 @@ static struct snd_soc_dai_link msm8996_common_be_dai_links[] = {
.be_hw_params_fixup = msm_tx_be_hw_params_fixup,
.ops = &msm8996_mi2s_be_ops,
.ignore_suspend = 1,
+ },
+ {
+ .name = LPASS_BE_USB_AUDIO_RX,
+ .stream_name = "USB Audio Playback",
+ .cpu_dai_name = "msm-dai-q6-dev.28672",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-rx",
+ .no_pcm = 1,
+ .dpcm_playback = 1,
+ .be_id = MSM_BACKEND_DAI_USB_RX,
+ .be_hw_params_fixup = msm_be_hw_params_fixup,
+ .ignore_pmdown_time = 1,
+ .ignore_suspend = 1,
+ },
+ {
+ .name = LPASS_BE_USB_AUDIO_TX,
+ .stream_name = "USB Audio Capture",
+ .cpu_dai_name = "msm-dai-q6-dev.28673",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-tx",
+ .no_pcm = 1,
+ .dpcm_capture = 1,
+ .be_id = MSM_BACKEND_DAI_USB_TX,
+ .be_hw_params_fixup = msm_be_hw_params_fixup,
+ .ignore_suspend = 1,
}
};
@@ -3336,7 +7793,458 @@ static struct snd_soc_dai_link msm8996_tasha_be_dai_links[] = {
/* dai link has playback support */
.ignore_pmdown_time = 1,
.ignore_suspend = 1,
+ }
+};
+static struct snd_soc_dai_link msm8996_tdm_be_dai_links[] = {
+ {
+ .name = LPASS_BE_SEC_TDM_RX_0,
+ .stream_name = "Secondary TDM0 Playback",
+ .cpu_dai_name = "msm-dai-q6-tdm.36880",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-rx",
+ .no_pcm = 1,
+ .dpcm_playback = 1,
+ .be_id = MSM_BACKEND_DAI_SEC_TDM_RX_0,
+ .be_hw_params_fixup = msm_tdm_be_hw_params_fixup,
+ .ops = &msm8996_tdm_be_ops,
+ .ignore_suspend = 1,
+ },
+ {
+ .name = LPASS_BE_SEC_TDM_RX_1,
+ .stream_name = "Secondary TDM1 Playback",
+ .cpu_dai_name = "msm-dai-q6-tdm.36882",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-rx",
+ .no_pcm = 1,
+ .dpcm_playback = 1,
+ .be_id = MSM_BACKEND_DAI_SEC_TDM_RX_1,
+ .be_hw_params_fixup = msm_tdm_be_hw_params_fixup,
+ .ops = &msm8996_tdm_be_ops,
+ .ignore_suspend = 1,
+ },
+ {
+ .name = LPASS_BE_SEC_TDM_RX_2,
+ .stream_name = "Secondary TDM2 Playback",
+ .cpu_dai_name = "msm-dai-q6-tdm.36884",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-rx",
+ .no_pcm = 1,
+ .dpcm_playback = 1,
+ .be_id = MSM_BACKEND_DAI_SEC_TDM_RX_2,
+ .be_hw_params_fixup = msm_tdm_be_hw_params_fixup,
+ .ops = &msm8996_tdm_be_ops,
+ .ignore_suspend = 1,
+ },
+ {
+ .name = LPASS_BE_SEC_TDM_RX_3,
+ .stream_name = "Secondary TDM3 Playback",
+ .cpu_dai_name = "msm-dai-q6-tdm.36886",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-rx",
+ .no_pcm = 1,
+ .dpcm_playback = 1,
+ .be_id = MSM_BACKEND_DAI_SEC_TDM_RX_3,
+ .be_hw_params_fixup = msm_tdm_be_hw_params_fixup,
+ .ops = &msm8996_tdm_be_ops,
+ .ignore_suspend = 1,
+ },
+ {
+ .name = LPASS_BE_SEC_TDM_TX_0,
+ .stream_name = "Secondary TDM0 Capture",
+ .cpu_dai_name = "msm-dai-q6-tdm.36881",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-rx",
+ .no_pcm = 1,
+ .dpcm_capture = 1,
+ .be_id = MSM_BACKEND_DAI_SEC_TDM_TX_0,
+ .be_hw_params_fixup = msm_tdm_be_hw_params_fixup,
+ .ops = &msm8996_tdm_be_ops,
+ .ignore_suspend = 1,
+ },
+ {
+ .name = LPASS_BE_SEC_TDM_TX_1,
+ .stream_name = "Secondary TDM1 Capture",
+ .cpu_dai_name = "msm-dai-q6-tdm.36883",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-rx",
+ .no_pcm = 1,
+ .dpcm_capture = 1,
+ .be_id = MSM_BACKEND_DAI_SEC_TDM_TX_1,
+ .be_hw_params_fixup = msm_tdm_be_hw_params_fixup,
+ .ops = &msm8996_tdm_be_ops,
+ .ignore_suspend = 1,
+ },
+ {
+ .name = LPASS_BE_SEC_TDM_TX_2,
+ .stream_name = "Secondary TDM2 Capture",
+ .cpu_dai_name = "msm-dai-q6-tdm.36885",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-rx",
+ .no_pcm = 1,
+ .dpcm_capture = 1,
+ .be_id = MSM_BACKEND_DAI_SEC_TDM_TX_2,
+ .be_hw_params_fixup = msm_tdm_be_hw_params_fixup,
+ .ops = &msm8996_tdm_be_ops,
+ .ignore_suspend = 1,
+ },
+ {
+ .name = LPASS_BE_SEC_TDM_TX_3,
+ .stream_name = "Secondary TDM3 Capture",
+ .cpu_dai_name = "msm-dai-q6-tdm.36887",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-rx",
+ .no_pcm = 1,
+ .dpcm_capture = 1,
+ .be_id = MSM_BACKEND_DAI_SEC_TDM_TX_3,
+ .be_hw_params_fixup = msm_tdm_be_hw_params_fixup,
+ .ops = &msm8996_tdm_be_ops,
+ .ignore_suspend = 1,
+ },
+ {
+ .name = LPASS_BE_TERT_TDM_RX_0,
+ .stream_name = "Tertiary TDM0 Playback",
+ .cpu_dai_name = "msm-dai-q6-tdm.36896",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-rx",
+ .no_pcm = 1,
+ .dpcm_playback = 1,
+ .be_id = MSM_BACKEND_DAI_TERT_TDM_RX_0,
+ .be_hw_params_fixup = msm_tdm_be_hw_params_fixup,
+ .ops = &msm8996_tdm_be_ops,
+ .ignore_suspend = 1,
+ },
+ {
+ .name = LPASS_BE_TERT_TDM_RX_1,
+ .stream_name = "Tertiary TDM1 Playback",
+ .cpu_dai_name = "msm-dai-q6-tdm.36898",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-rx",
+ .no_pcm = 1,
+ .dpcm_playback = 1,
+ .be_id = MSM_BACKEND_DAI_TERT_TDM_RX_1,
+ .be_hw_params_fixup = msm_tdm_be_hw_params_fixup,
+ .ops = &msm8996_tdm_be_ops,
+ .ignore_suspend = 1,
+ },
+ {
+ .name = LPASS_BE_TERT_TDM_RX_2,
+ .stream_name = "Tertiary TDM2 Playback",
+ .cpu_dai_name = "msm-dai-q6-tdm.36900",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-rx",
+ .no_pcm = 1,
+ .dpcm_playback = 1,
+ .be_id = MSM_BACKEND_DAI_TERT_TDM_RX_2,
+ .be_hw_params_fixup = msm_tdm_be_hw_params_fixup,
+ .ops = &msm8996_tdm_be_ops,
+ .ignore_suspend = 1,
+ },
+ {
+ .name = LPASS_BE_TERT_TDM_RX_3,
+ .stream_name = "Tertiary TDM3 Playback",
+ .cpu_dai_name = "msm-dai-q6-tdm.36902",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-rx",
+ .no_pcm = 1,
+ .dpcm_playback = 1,
+ .be_id = MSM_BACKEND_DAI_TERT_TDM_RX_3,
+ .be_hw_params_fixup = msm_tdm_be_hw_params_fixup,
+ .ops = &msm8996_tdm_be_ops,
+ .ignore_suspend = 1,
+ },
+ {
+ .name = LPASS_BE_TERT_TDM_TX_0,
+ .stream_name = "Tertiary TDM0 Capture",
+ .cpu_dai_name = "msm-dai-q6-tdm.36897",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-rx",
+ .no_pcm = 1,
+ .dpcm_capture = 1,
+ .be_id = MSM_BACKEND_DAI_TERT_TDM_TX_0,
+ .be_hw_params_fixup = msm_tdm_be_hw_params_fixup,
+ .ops = &msm8996_tdm_be_ops,
+ .ignore_suspend = 1,
+ },
+ {
+ .name = LPASS_BE_TERT_TDM_TX_1,
+ .stream_name = "Tertiary TDM1 Capture",
+ .cpu_dai_name = "msm-dai-q6-tdm.36899",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-rx",
+ .no_pcm = 1,
+ .dpcm_capture = 1,
+ .be_id = MSM_BACKEND_DAI_TERT_TDM_TX_1,
+ .be_hw_params_fixup = msm_tdm_be_hw_params_fixup,
+ .ops = &msm8996_tdm_be_ops,
+ .ignore_suspend = 1,
+ },
+ {
+ .name = LPASS_BE_TERT_TDM_TX_2,
+ .stream_name = "Tertiary TDM2 Capture",
+ .cpu_dai_name = "msm-dai-q6-tdm.36901",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-rx",
+ .no_pcm = 1,
+ .dpcm_capture = 1,
+ .be_id = MSM_BACKEND_DAI_TERT_TDM_TX_2,
+ .be_hw_params_fixup = msm_tdm_be_hw_params_fixup,
+ .ops = &msm8996_tdm_be_ops,
+ .ignore_suspend = 1,
+ },
+ {
+ .name = LPASS_BE_TERT_TDM_TX_3,
+ .stream_name = "Tertiary TDM3 Capture",
+ .cpu_dai_name = "msm-dai-q6-tdm.36903",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-rx",
+ .no_pcm = 1,
+ .dpcm_capture = 1,
+ .be_id = MSM_BACKEND_DAI_TERT_TDM_TX_3,
+ .be_hw_params_fixup = msm_tdm_be_hw_params_fixup,
+ .ops = &msm8996_tdm_be_ops,
+ .ignore_suspend = 1,
+ },
+ {
+ .name = LPASS_BE_QUAT_TDM_RX_0,
+ .stream_name = "Quaternary TDM0 Playback",
+ .cpu_dai_name = "msm-dai-q6-tdm.36912",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-rx",
+ .no_pcm = 1,
+ .dpcm_playback = 1,
+ .be_id = MSM_BACKEND_DAI_QUAT_TDM_RX_0,
+ .be_hw_params_fixup = msm_tdm_be_hw_params_fixup,
+ .ops = &msm8996_tdm_be_ops,
+ .ignore_suspend = 1,
+ },
+ {
+ .name = LPASS_BE_QUAT_TDM_RX_1,
+ .stream_name = "Quaternary TDM1 Playback",
+ .cpu_dai_name = "msm-dai-q6-tdm.36914",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-rx",
+ .no_pcm = 1,
+ .dpcm_playback = 1,
+ .be_id = MSM_BACKEND_DAI_QUAT_TDM_RX_1,
+ .be_hw_params_fixup = msm_tdm_be_hw_params_fixup,
+ .ops = &msm8996_tdm_be_ops,
+ .ignore_suspend = 1,
+ },
+ {
+ .name = LPASS_BE_QUAT_TDM_RX_2,
+ .stream_name = "Quaternary TDM2 Playback",
+ .cpu_dai_name = "msm-dai-q6-tdm.36916",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-rx",
+ .no_pcm = 1,
+ .dpcm_playback = 1,
+ .be_id = MSM_BACKEND_DAI_QUAT_TDM_RX_2,
+ .be_hw_params_fixup = msm_tdm_be_hw_params_fixup,
+ .ops = &msm8996_tdm_be_ops,
+ .ignore_suspend = 1,
+ },
+ {
+ .name = LPASS_BE_QUAT_TDM_RX_3,
+ .stream_name = "Quaternary TDM3 Playback",
+ .cpu_dai_name = "msm-dai-q6-tdm.36918",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-rx",
+ .no_pcm = 1,
+ .dpcm_playback = 1,
+ .be_id = MSM_BACKEND_DAI_QUAT_TDM_RX_3,
+ .be_hw_params_fixup = msm_tdm_be_hw_params_fixup,
+ .ops = &msm8996_tdm_be_ops,
+ .ignore_suspend = 1,
},
+ {
+ .name = LPASS_BE_QUAT_TDM_TX_0,
+ .stream_name = "Quaternary TDM0 Capture",
+ .cpu_dai_name = "msm-dai-q6-tdm.36913",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-rx",
+ .no_pcm = 1,
+ .dpcm_capture = 1,
+ .be_id = MSM_BACKEND_DAI_QUAT_TDM_TX_0,
+ .be_hw_params_fixup = msm_tdm_be_hw_params_fixup,
+ .ops = &msm8996_tdm_be_ops,
+ .ignore_suspend = 1,
+ },
+ {
+ .name = LPASS_BE_QUAT_TDM_TX_1,
+ .stream_name = "Quaternary TDM1 Capture",
+ .cpu_dai_name = "msm-dai-q6-tdm.36915",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-rx",
+ .no_pcm = 1,
+ .dpcm_capture = 1,
+ .be_id = MSM_BACKEND_DAI_QUAT_TDM_TX_1,
+ .be_hw_params_fixup = msm_tdm_be_hw_params_fixup,
+ .ops = &msm8996_tdm_be_ops,
+ .ignore_suspend = 1,
+ },
+ {
+ .name = LPASS_BE_QUAT_TDM_TX_2,
+ .stream_name = "Quaternary TDM2 Capture",
+ .cpu_dai_name = "msm-dai-q6-tdm.36917",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-rx",
+ .no_pcm = 1,
+ .dpcm_capture = 1,
+ .be_id = MSM_BACKEND_DAI_QUAT_TDM_TX_2,
+ .be_hw_params_fixup = msm_tdm_be_hw_params_fixup,
+ .ops = &msm8996_tdm_be_ops,
+ .ignore_suspend = 1,
+ },
+ {
+ .name = LPASS_BE_QUAT_TDM_TX_3,
+ .stream_name = "Quaternary TDM3 Capture",
+ .cpu_dai_name = "msm-dai-q6-tdm.36919",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-rx",
+ .no_pcm = 1,
+ .dpcm_capture = 1,
+ .be_id = MSM_BACKEND_DAI_QUAT_TDM_TX_3,
+ .be_hw_params_fixup = msm_tdm_be_hw_params_fixup,
+ .ops = &msm8996_tdm_be_ops,
+ .ignore_suspend = 1,
+ },
+ {
+ .name = LPASS_BE_PRI_TDM_RX_0,
+ .stream_name = "Primary TDM0 Playback",
+ .cpu_dai_name = "msm-dai-q6-tdm.36864",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-rx",
+ .no_pcm = 1,
+ .dpcm_playback = 1,
+ .be_id = MSM_BACKEND_DAI_PRI_TDM_RX_0,
+ .be_hw_params_fixup = msm_tdm_be_hw_params_fixup,
+ .ops = &msm8996_tdm_be_ops,
+ .ignore_suspend = 1,
+ },
+ {
+ .name = LPASS_BE_PRI_TDM_RX_1,
+ .stream_name = "Primary TDM1 Playback",
+ .cpu_dai_name = "msm-dai-q6-tdm.36866",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-rx",
+ .no_pcm = 1,
+ .dpcm_playback = 1,
+ .be_id = MSM_BACKEND_DAI_PRI_TDM_RX_1,
+ .be_hw_params_fixup = msm_tdm_be_hw_params_fixup,
+ .ops = &msm8996_tdm_be_ops,
+ .ignore_suspend = 1,
+ },
+ {
+ .name = LPASS_BE_PRI_TDM_RX_2,
+ .stream_name = "Primary TDM2 Playback",
+ .cpu_dai_name = "msm-dai-q6-tdm.36868",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-rx",
+ .no_pcm = 1,
+ .dpcm_playback = 1,
+ .be_id = MSM_BACKEND_DAI_PRI_TDM_RX_2,
+ .be_hw_params_fixup = msm_tdm_be_hw_params_fixup,
+ .ops = &msm8996_tdm_be_ops,
+ .ignore_suspend = 1,
+ },
+ {
+ .name = LPASS_BE_PRI_TDM_RX_3,
+ .stream_name = "Primary TDM3 Playback",
+ .cpu_dai_name = "msm-dai-q6-tdm.36870",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-rx",
+ .no_pcm = 1,
+ .dpcm_playback = 1,
+ .be_id = MSM_BACKEND_DAI_PRI_TDM_RX_3,
+ .be_hw_params_fixup = msm_tdm_be_hw_params_fixup,
+ .ops = &msm8996_tdm_be_ops,
+ .ignore_suspend = 1,
+ },
+ {
+ .name = LPASS_BE_PRI_TDM_TX_0,
+ .stream_name = "Primary TDM0 Capture",
+ .cpu_dai_name = "msm-dai-q6-tdm.36865",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-rx",
+ .no_pcm = 1,
+ .dpcm_capture = 1,
+ .be_id = MSM_BACKEND_DAI_PRI_TDM_TX_0,
+ .be_hw_params_fixup = msm_tdm_be_hw_params_fixup,
+ .ops = &msm8996_tdm_be_ops,
+ .ignore_suspend = 1,
+ },
+ {
+ .name = LPASS_BE_PRI_TDM_TX_1,
+ .stream_name = "Primary TDM1 Capture",
+ .cpu_dai_name = "msm-dai-q6-tdm.36867",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-rx",
+ .no_pcm = 1,
+ .dpcm_capture = 1,
+ .be_id = MSM_BACKEND_DAI_PRI_TDM_TX_1,
+ .be_hw_params_fixup = msm_tdm_be_hw_params_fixup,
+ .ops = &msm8996_tdm_be_ops,
+ .ignore_suspend = 1,
+ },
+ {
+ .name = LPASS_BE_PRI_TDM_TX_2,
+ .stream_name = "Primary TDM2 Capture",
+ .cpu_dai_name = "msm-dai-q6-tdm.36869",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-rx",
+ .no_pcm = 1,
+ .dpcm_capture = 1,
+ .be_id = MSM_BACKEND_DAI_PRI_TDM_TX_2,
+ .be_hw_params_fixup = msm_tdm_be_hw_params_fixup,
+ .ops = &msm8996_tdm_be_ops,
+ .ignore_suspend = 1,
+ },
+ {
+ .name = LPASS_BE_PRI_TDM_TX_3,
+ .stream_name = "Primary TDM3 Capture",
+ .cpu_dai_name = "msm-dai-q6-tdm.36871",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-rx",
+ .no_pcm = 1,
+ .dpcm_capture = 1,
+ .be_id = MSM_BACKEND_DAI_PRI_TDM_TX_3,
+ .be_hw_params_fixup = msm_tdm_be_hw_params_fixup,
+ .ops = &msm8996_tdm_be_ops,
+ .ignore_suspend = 1,
+ }
+
};
static struct snd_soc_dai_link msm8996_hdmi_dai_link[] = {
@@ -3360,8 +8268,10 @@ static struct snd_soc_dai_link msm8996_hdmi_dai_link[] = {
static struct snd_soc_dai_link msm8996_tasha_dai_links[
ARRAY_SIZE(msm8996_common_dai_links) +
ARRAY_SIZE(msm8996_tasha_fe_dai_links) +
+ ARRAY_SIZE(msm8996_tdm_fe_dai_links) +
ARRAY_SIZE(msm8996_common_be_dai_links) +
ARRAY_SIZE(msm8996_tasha_be_dai_links) +
+ ARRAY_SIZE(msm8996_tdm_be_dai_links) +
ARRAY_SIZE(msm8996_hdmi_dai_link)];
static int msm8996_wsa881x_init(struct snd_soc_component *component)
@@ -3564,8 +8474,8 @@ static const struct of_device_id msm8996_asoc_machine_of_match[] = {
static struct snd_soc_card *populate_snd_card_dailinks(struct device *dev)
{
struct snd_soc_card *card = NULL;
- struct snd_soc_dai_link *dailink;
- int len_1, len_2, len_3, len_4;
+ struct snd_soc_dai_link *dailink = NULL;
+ int len_1 = 0, len_2 = 0, len_3 = 0, len_4 = 0, len_5 = 0;
const struct of_device_id *match;
match = of_match_node(msm8996_asoc_machine_of_match, dev->of_node);
@@ -3579,7 +8489,8 @@ static struct snd_soc_card *populate_snd_card_dailinks(struct device *dev)
card = &snd_soc_card_tasha_msm8996;
len_1 = ARRAY_SIZE(msm8996_common_dai_links);
len_2 = len_1 + ARRAY_SIZE(msm8996_tasha_fe_dai_links);
- len_3 = len_2 + ARRAY_SIZE(msm8996_common_be_dai_links);
+ len_3 = len_2 + ARRAY_SIZE(msm8996_tdm_fe_dai_links);
+ len_4 = len_3 + ARRAY_SIZE(msm8996_common_be_dai_links);
memcpy(msm8996_tasha_dai_links,
msm8996_common_dai_links,
@@ -3588,29 +8499,40 @@ static struct snd_soc_card *populate_snd_card_dailinks(struct device *dev)
msm8996_tasha_fe_dai_links,
sizeof(msm8996_tasha_fe_dai_links));
memcpy(msm8996_tasha_dai_links + len_2,
+ msm8996_tdm_fe_dai_links,
+ sizeof(msm8996_tdm_fe_dai_links));
+ memcpy(msm8996_tasha_dai_links + len_3,
msm8996_common_be_dai_links,
sizeof(msm8996_common_be_dai_links));
- memcpy(msm8996_tasha_dai_links + len_3,
+ memcpy(msm8996_tasha_dai_links + len_4,
msm8996_tasha_be_dai_links,
sizeof(msm8996_tasha_be_dai_links));
dailink = msm8996_tasha_dai_links;
- len_4 = len_3 + ARRAY_SIZE(msm8996_tasha_be_dai_links);
+ len_5 = len_4 + ARRAY_SIZE(msm8996_tasha_be_dai_links);
}
if (of_property_read_bool(dev->of_node, "qcom,hdmi-audio-rx")) {
dev_dbg(dev, "%s(): hdmi audio support present\n",
__func__);
- memcpy(dailink + len_4, msm8996_hdmi_dai_link,
+ memcpy(dailink + len_5, msm8996_hdmi_dai_link,
sizeof(msm8996_hdmi_dai_link));
- len_4 += ARRAY_SIZE(msm8996_hdmi_dai_link);
+ len_5 += ARRAY_SIZE(msm8996_hdmi_dai_link);
} else {
dev_dbg(dev, "%s(): No hdmi audio support\n", __func__);
}
-
+ if (of_property_read_bool(dev->of_node, "qcom,tdm-audio-intf")) {
+ dev_dbg(dev, "%s(): TDM support present\n",
+ __func__);
+ memcpy(dailink + len_5, msm8996_tdm_be_dai_links,
+ sizeof(msm8996_tdm_be_dai_links));
+ len_5 += ARRAY_SIZE(msm8996_tdm_be_dai_links);
+ } else {
+ dev_dbg(dev, "%s(): No TDM support\n", __func__);
+ }
if (card) {
card->dai_link = dailink;
- card->num_links = len_4;
+ card->num_links = len_5;
}
return card;
diff --git a/sound/soc/msm/qdsp6v2/Makefile b/sound/soc/msm/qdsp6v2/Makefile
index 4116f79890a3..67d3d277404d 100644
--- a/sound/soc/msm/qdsp6v2/Makefile
+++ b/sound/soc/msm/qdsp6v2/Makefile
@@ -19,6 +19,6 @@ obj-$(CONFIG_DTS_SRS_TM) += msm-dts-srs-tm-config.o
obj-$(CONFIG_QTI_PP) += msm-qti-pp-config.o
obj-y += audio_calibration.o audio_cal_utils.o q6adm.o q6afe.o q6asm.o \
q6audio-v2.o q6voice.o q6core.o rtac.o q6lsm.o \
- msm-pcm-q6-noirq.o
+ msm-pcm-q6-noirq.o q6common.o
ocmem-audio-objs += audio_ocmem.o
obj-$(CONFIG_AUDIO_OCMEM) += ocmem-audio.o
diff --git a/sound/soc/msm/qdsp6v2/msm-audio-effects-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-audio-effects-q6-v2.c
index 1286d3185780..37c43253a5bd 100644
--- a/sound/soc/msm/qdsp6v2/msm-audio-effects-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-audio-effects-q6-v2.c
@@ -16,6 +16,7 @@
#include <sound/compress_params.h>
#include <sound/msm-audio-effects-q6-v2.h>
#include <sound/devdep_params.h>
+#include <sound/q6common.h>
#define MAX_ENABLE_CMD_SIZE 32
@@ -61,44 +62,35 @@ int msm_audio_effects_enable_extn(struct audio_client *ac,
struct msm_nt_eff_all_config *effects,
bool flag)
{
- uint32_t updt_params[MAX_ENABLE_CMD_SIZE] = {0};
- uint32_t params_length;
+ u32 flag_param = flag ? 1 : 0;
+ struct param_hdr_v3 param_hdr = {0};
int rc = 0;
pr_debug("%s\n", __func__);
- if (!ac) {
- pr_err("%s: cannot set audio effects\n", __func__);
- return -EINVAL;
- }
- params_length = 0;
- updt_params[0] = AUDPROC_MODULE_ID_VIRTUALIZER;
- updt_params[1] = AUDPROC_PARAM_ID_ENABLE;
- updt_params[2] = VIRTUALIZER_ENABLE_PARAM_SZ;
- updt_params[3] = flag;
- params_length += COMMAND_PAYLOAD_SZ + VIRTUALIZER_ENABLE_PARAM_SZ;
+ param_hdr.module_id = AUDPROC_MODULE_ID_VIRTUALIZER;
+ param_hdr.instance_id = INSTANCE_ID_0;
+ param_hdr.param_id = AUDPROC_PARAM_ID_ENABLE;
+ param_hdr.param_size = VIRTUALIZER_ENABLE_PARAM_SZ;
if (effects->virtualizer.enable_flag)
- q6asm_send_audio_effects_params(ac, (char *)&updt_params[0],
- params_length);
- memset(updt_params, 0, MAX_ENABLE_CMD_SIZE);
- params_length = 0;
- updt_params[0] = AUDPROC_MODULE_ID_BASS_BOOST;
- updt_params[1] = AUDPROC_PARAM_ID_ENABLE;
- updt_params[2] = BASS_BOOST_ENABLE_PARAM_SZ;
- updt_params[3] = flag;
- params_length += COMMAND_PAYLOAD_SZ + BASS_BOOST_ENABLE_PARAM_SZ;
+ rc = q6asm_pack_and_set_pp_param_in_band(ac, param_hdr,
+ (u8 *) &flag_param);
+
+ param_hdr.module_id = AUDPROC_MODULE_ID_BASS_BOOST;
+ param_hdr.instance_id = INSTANCE_ID_0;
+ param_hdr.param_id = AUDPROC_PARAM_ID_ENABLE;
+ param_hdr.param_size = BASS_BOOST_ENABLE_PARAM_SZ;
if (effects->bass_boost.enable_flag)
- q6asm_send_audio_effects_params(ac, (char *)&updt_params[0],
- params_length);
- memset(updt_params, 0, MAX_ENABLE_CMD_SIZE);
- params_length = 0;
- updt_params[0] = AUDPROC_MODULE_ID_POPLESS_EQUALIZER;
- updt_params[1] = AUDPROC_PARAM_ID_ENABLE;
- updt_params[2] = EQ_ENABLE_PARAM_SZ;
- updt_params[3] = flag;
- params_length += COMMAND_PAYLOAD_SZ + EQ_ENABLE_PARAM_SZ;
+ rc = q6asm_pack_and_set_pp_param_in_band(ac, param_hdr,
+ (u8 *) &flag_param);
+
+ param_hdr.module_id = AUDPROC_MODULE_ID_POPLESS_EQUALIZER;
+ param_hdr.instance_id = INSTANCE_ID_0;
+ param_hdr.param_id = AUDPROC_PARAM_ID_ENABLE;
+ param_hdr.param_size = EQ_ENABLE_PARAM_SZ;
if (effects->equalizer.enable_flag)
- q6asm_send_audio_effects_params(ac, (char *)&updt_params[0],
- params_length);
+ rc = q6asm_pack_and_set_pp_param_in_band(ac, param_hdr,
+ (u8 *) &flag_param);
+
return rc;
}
@@ -108,25 +100,32 @@ int msm_audio_effects_virtualizer_handler(struct audio_client *ac,
{
long *param_max_offset = values + MAX_PP_PARAMS_SZ - 1;
char *params = NULL;
+ u8 *updt_params;
int rc = 0;
int devices = GET_NEXT(values, param_max_offset, rc);
int num_commands = GET_NEXT(values, param_max_offset, rc);
- int *updt_params, i, prev_enable_flag;
- uint32_t params_length = (MAX_INBAND_PARAM_SZ);
+ int i, prev_enable_flag;
+ uint32_t max_params_length = 0;
+ uint32_t params_length = 0;
+ struct param_hdr_v3 param_hdr = {0};
+ u8 *param_data = NULL;
+ u32 packed_data_size = 0;
pr_debug("%s\n", __func__);
if (!ac || (devices == -EINVAL) || (num_commands == -EINVAL)) {
pr_err("%s: cannot set audio effects\n", __func__);
return -EINVAL;
}
- params = kzalloc(params_length, GFP_KERNEL);
+ params = kzalloc(MAX_INBAND_PARAM_SZ, GFP_KERNEL);
if (!params) {
pr_err("%s, params memory alloc failed\n", __func__);
return -ENOMEM;
}
pr_debug("%s: device: %d\n", __func__, devices);
- updt_params = (int *)params;
- params_length = 0;
+ updt_params = (u8 *) params;
+ /* Set MID and IID once at top and only update param specific fields*/
+ param_hdr.module_id = AUDPROC_MODULE_ID_VIRTUALIZER;
+ param_hdr.instance_id = INSTANCE_ID_0;
for (i = 0; i < num_commands; i++) {
uint32_t command_id =
GET_NEXT(values, param_max_offset, rc);
@@ -148,23 +147,19 @@ int msm_audio_effects_virtualizer_handler(struct audio_client *ac,
GET_NEXT(values, param_max_offset, rc);
pr_debug("%s:VIRT ENABLE prev:%d, new:%d\n", __func__,
prev_enable_flag, virtualizer->enable_flag);
- if (prev_enable_flag != virtualizer->enable_flag) {
- params_length += COMMAND_PAYLOAD_SZ +
- VIRTUALIZER_ENABLE_PARAM_SZ;
- CHECK_PARAM_LEN(params_length,
- MAX_INBAND_PARAM_SZ,
- "VIRT ENABLE", rc);
- if (rc != 0)
- goto invalid_config;
- *updt_params++ =
- AUDPROC_MODULE_ID_VIRTUALIZER;
- *updt_params++ =
+ if (prev_enable_flag == virtualizer->enable_flag)
+ break;
+ max_params_length = params_length +
+ COMMAND_IID_PAYLOAD_SZ +
+ VIRTUALIZER_ENABLE_PARAM_SZ;
+ CHECK_PARAM_LEN(max_params_length, MAX_INBAND_PARAM_SZ,
+ "VIRT ENABLE", rc);
+ if (rc != 0)
+ break;
+ param_hdr.param_id =
AUDPROC_PARAM_ID_VIRTUALIZER_ENABLE;
- *updt_params++ =
- VIRTUALIZER_ENABLE_PARAM_SZ;
- *updt_params++ =
- virtualizer->enable_flag;
- }
+ param_hdr.param_size = VIRTUALIZER_ENABLE_PARAM_SZ;
+ param_data = (u8 *) &virtualizer->enable_flag;
break;
case VIRTUALIZER_STRENGTH:
if (length != 1 || index_offset != 0) {
@@ -176,23 +171,19 @@ int msm_audio_effects_virtualizer_handler(struct audio_client *ac,
GET_NEXT(values, param_max_offset, rc);
pr_debug("%s: VIRT STRENGTH val: %d\n",
__func__, virtualizer->strength);
- if (command_config_state == CONFIG_SET) {
- params_length += COMMAND_PAYLOAD_SZ +
- VIRTUALIZER_STRENGTH_PARAM_SZ;
- CHECK_PARAM_LEN(params_length,
- MAX_INBAND_PARAM_SZ,
- "VIRT STRENGTH", rc);
- if (rc != 0)
- goto invalid_config;
- *updt_params++ =
- AUDPROC_MODULE_ID_VIRTUALIZER;
- *updt_params++ =
- AUDPROC_PARAM_ID_VIRTUALIZER_STRENGTH;
- *updt_params++ =
- VIRTUALIZER_STRENGTH_PARAM_SZ;
- *updt_params++ =
- virtualizer->strength;
- }
+ if (command_config_state != CONFIG_SET)
+ break;
+ max_params_length = params_length +
+ COMMAND_IID_PAYLOAD_SZ +
+ VIRTUALIZER_STRENGTH_PARAM_SZ;
+ CHECK_PARAM_LEN(max_params_length, MAX_INBAND_PARAM_SZ,
+ "VIRT STRENGTH", rc);
+ if (rc != 0)
+ break;
+ param_hdr.param_id =
+ AUDPROC_PARAM_ID_VIRTUALIZER_STRENGTH;
+ param_hdr.param_size = VIRTUALIZER_STRENGTH_PARAM_SZ;
+ param_data = (u8 *) &virtualizer->strength;
break;
case VIRTUALIZER_OUT_TYPE:
if (length != 1 || index_offset != 0) {
@@ -204,23 +195,19 @@ int msm_audio_effects_virtualizer_handler(struct audio_client *ac,
GET_NEXT(values, param_max_offset, rc);
pr_debug("%s: VIRT OUT_TYPE val:%d\n",
__func__, virtualizer->out_type);
- if (command_config_state == CONFIG_SET) {
- params_length += COMMAND_PAYLOAD_SZ +
- VIRTUALIZER_OUT_TYPE_PARAM_SZ;
- CHECK_PARAM_LEN(params_length,
- MAX_INBAND_PARAM_SZ,
- "VIRT OUT_TYPE", rc);
- if (rc != 0)
- goto invalid_config;
- *updt_params++ =
- AUDPROC_MODULE_ID_VIRTUALIZER;
- *updt_params++ =
- AUDPROC_PARAM_ID_VIRTUALIZER_OUT_TYPE;
- *updt_params++ =
- VIRTUALIZER_OUT_TYPE_PARAM_SZ;
- *updt_params++ =
- virtualizer->out_type;
- }
+ if (command_config_state != CONFIG_SET)
+ break;
+ max_params_length = params_length +
+ COMMAND_IID_PAYLOAD_SZ +
+ VIRTUALIZER_OUT_TYPE_PARAM_SZ;
+ CHECK_PARAM_LEN(max_params_length, MAX_INBAND_PARAM_SZ,
+ "VIRT OUT_TYPE", rc);
+ if (rc != 0)
+ break;
+ param_hdr.param_id =
+ AUDPROC_PARAM_ID_VIRTUALIZER_OUT_TYPE;
+ param_hdr.param_size = VIRTUALIZER_OUT_TYPE_PARAM_SZ;
+ param_data = (u8 *) &virtualizer->out_type;
break;
case VIRTUALIZER_GAIN_ADJUST:
if (length != 1 || index_offset != 0) {
@@ -232,32 +219,40 @@ int msm_audio_effects_virtualizer_handler(struct audio_client *ac,
GET_NEXT(values, param_max_offset, rc);
pr_debug("%s: VIRT GAIN_ADJUST val:%d\n",
__func__, virtualizer->gain_adjust);
- if (command_config_state == CONFIG_SET) {
- params_length += COMMAND_PAYLOAD_SZ +
- VIRTUALIZER_GAIN_ADJUST_PARAM_SZ;
- CHECK_PARAM_LEN(params_length,
- MAX_INBAND_PARAM_SZ,
- "VIRT GAIN_ADJUST", rc);
- if (rc != 0)
- goto invalid_config;
- *updt_params++ =
- AUDPROC_MODULE_ID_VIRTUALIZER;
- *updt_params++ =
+ if (command_config_state != CONFIG_SET)
+ break;
+ max_params_length = params_length +
+ COMMAND_IID_PAYLOAD_SZ +
+ VIRTUALIZER_GAIN_ADJUST_PARAM_SZ;
+ CHECK_PARAM_LEN(max_params_length, MAX_INBAND_PARAM_SZ,
+ "VIRT GAIN_ADJUST", rc);
+ if (rc != 0)
+ break;
+ param_hdr.param_id =
AUDPROC_PARAM_ID_VIRTUALIZER_GAIN_ADJUST;
- *updt_params++ =
- VIRTUALIZER_GAIN_ADJUST_PARAM_SZ;
- *updt_params++ =
- virtualizer->gain_adjust;
- }
+ param_hdr.param_size = VIRTUALIZER_GAIN_ADJUST_PARAM_SZ;
+ param_data = (u8 *) &virtualizer->gain_adjust;
break;
default:
pr_err("%s: Invalid command to set config\n", __func__);
- break;
+ continue;
+ }
+ if (rc)
+ goto invalid_config;
+
+ rc = q6common_pack_pp_params(updt_params, &param_hdr,
+ param_data, &packed_data_size);
+ if (rc) {
+ pr_err("%s: Failed to pack params, error %d\n",
+ __func__, rc);
+ goto invalid_config;
}
+
+ updt_params += packed_data_size;
+ params_length += packed_data_size;
}
if (params_length && (rc == 0))
- q6asm_send_audio_effects_params(ac, params,
- params_length);
+ q6asm_set_pp_params(ac, NULL, params, params_length);
else
pr_debug("%s: did not send pp params\n", __func__);
invalid_config:
@@ -271,25 +266,32 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac,
{
long *param_max_offset = values + MAX_PP_PARAMS_SZ - 1;
char *params = NULL;
+ u8 *updt_params;
int rc = 0;
int devices = GET_NEXT(values, param_max_offset, rc);
int num_commands = GET_NEXT(values, param_max_offset, rc);
- int *updt_params, i, prev_enable_flag;
- uint32_t params_length = (MAX_INBAND_PARAM_SZ);
+ int i, prev_enable_flag;
+ uint32_t max_params_length = 0;
+ uint32_t params_length = 0;
+ struct param_hdr_v3 param_hdr = {0};
+ u8 *param_data = NULL;
+ u32 packed_data_size = 0;
pr_debug("%s\n", __func__);
if (!ac || (devices == -EINVAL) || (num_commands == -EINVAL)) {
pr_err("%s: cannot set audio effects\n", __func__);
return -EINVAL;
}
- params = kzalloc(params_length, GFP_KERNEL);
+ params = kzalloc(MAX_INBAND_PARAM_SZ, GFP_KERNEL);
if (!params) {
pr_err("%s, params memory alloc failed\n", __func__);
return -ENOMEM;
}
pr_debug("%s: device: %d\n", __func__, devices);
- updt_params = (int *)params;
- params_length = 0;
+ updt_params = (u8 *) params;
+ /* Set MID and IID once at top and only update param specific fields*/
+ param_hdr.module_id = AUDPROC_MODULE_ID_REVERB;
+ param_hdr.instance_id = INSTANCE_ID_0;
for (i = 0; i < num_commands; i++) {
uint32_t command_id =
GET_NEXT(values, param_max_offset, rc);
@@ -311,23 +313,18 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac,
GET_NEXT(values, param_max_offset, rc);
pr_debug("%s:REVERB_ENABLE prev:%d,new:%d\n", __func__,
prev_enable_flag, reverb->enable_flag);
- if (prev_enable_flag != reverb->enable_flag) {
- params_length += COMMAND_PAYLOAD_SZ +
- REVERB_ENABLE_PARAM_SZ;
- CHECK_PARAM_LEN(params_length,
- MAX_INBAND_PARAM_SZ,
- "REVERB_ENABLE", rc);
- if (rc != 0)
- goto invalid_config;
- *updt_params++ =
- AUDPROC_MODULE_ID_REVERB;
- *updt_params++ =
- AUDPROC_PARAM_ID_REVERB_ENABLE;
- *updt_params++ =
- REVERB_ENABLE_PARAM_SZ;
- *updt_params++ =
- reverb->enable_flag;
- }
+ if (prev_enable_flag == reverb->enable_flag)
+ break;
+ max_params_length = params_length +
+ COMMAND_IID_PAYLOAD_SZ +
+ REVERB_ENABLE_PARAM_SZ;
+ CHECK_PARAM_LEN(max_params_length, MAX_INBAND_PARAM_SZ,
+ "REVERB_ENABLE", rc);
+ if (rc != 0)
+ break;
+ param_hdr.param_id = AUDPROC_PARAM_ID_REVERB_ENABLE;
+ param_hdr.param_size = REVERB_ENABLE_PARAM_SZ;
+ param_data = (u8 *) &reverb->enable_flag;
break;
case REVERB_MODE:
if (length != 1 || index_offset != 0) {
@@ -339,23 +336,18 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac,
GET_NEXT(values, param_max_offset, rc);
pr_debug("%s: REVERB_MODE val:%d\n",
__func__, reverb->mode);
- if (command_config_state == CONFIG_SET) {
- params_length += COMMAND_PAYLOAD_SZ +
- REVERB_MODE_PARAM_SZ;
- CHECK_PARAM_LEN(params_length,
- MAX_INBAND_PARAM_SZ,
- "REVERB_MODE", rc);
- if (rc != 0)
- goto invalid_config;
- *updt_params++ =
- AUDPROC_MODULE_ID_REVERB;
- *updt_params++ =
- AUDPROC_PARAM_ID_REVERB_MODE;
- *updt_params++ =
- REVERB_MODE_PARAM_SZ;
- *updt_params++ =
- reverb->mode;
- }
+ if (command_config_state != CONFIG_SET)
+ break;
+ max_params_length = params_length +
+ COMMAND_IID_PAYLOAD_SZ +
+ REVERB_MODE_PARAM_SZ;
+ CHECK_PARAM_LEN(max_params_length, MAX_INBAND_PARAM_SZ,
+ "REVERB_MODE", rc);
+ if (rc != 0)
+ break;
+ param_hdr.param_id = AUDPROC_PARAM_ID_REVERB_MODE;
+ param_hdr.param_size = REVERB_MODE_PARAM_SZ;
+ param_data = (u8 *) &reverb->mode;
break;
case REVERB_PRESET:
if (length != 1 || index_offset != 0) {
@@ -367,23 +359,18 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac,
GET_NEXT(values, param_max_offset, rc);
pr_debug("%s: REVERB_PRESET val:%d\n",
__func__, reverb->preset);
- if (command_config_state == CONFIG_SET) {
- params_length += COMMAND_PAYLOAD_SZ +
- REVERB_PRESET_PARAM_SZ;
- CHECK_PARAM_LEN(params_length,
- MAX_INBAND_PARAM_SZ,
- "REVERB_PRESET", rc);
- if (rc != 0)
- goto invalid_config;
- *updt_params++ =
- AUDPROC_MODULE_ID_REVERB;
- *updt_params++ =
- AUDPROC_PARAM_ID_REVERB_PRESET;
- *updt_params++ =
- REVERB_PRESET_PARAM_SZ;
- *updt_params++ =
- reverb->preset;
- }
+ if (command_config_state != CONFIG_SET)
+ break;
+ max_params_length = params_length +
+ COMMAND_IID_PAYLOAD_SZ +
+ REVERB_PRESET_PARAM_SZ;
+ CHECK_PARAM_LEN(max_params_length, MAX_INBAND_PARAM_SZ,
+ "REVERB_PRESET", rc);
+ if (rc != 0)
+ break;
+ param_hdr.param_id = AUDPROC_PARAM_ID_REVERB_PRESET;
+ param_hdr.param_size = REVERB_PRESET_PARAM_SZ;
+ param_data = (u8 *) &reverb->preset;
break;
case REVERB_WET_MIX:
if (length != 1 || index_offset != 0) {
@@ -395,23 +382,18 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac,
GET_NEXT(values, param_max_offset, rc);
pr_debug("%s: REVERB_WET_MIX val:%d\n",
__func__, reverb->wet_mix);
- if (command_config_state == CONFIG_SET) {
- params_length += COMMAND_PAYLOAD_SZ +
- REVERB_WET_MIX_PARAM_SZ;
- CHECK_PARAM_LEN(params_length,
- MAX_INBAND_PARAM_SZ,
- "REVERB_WET_MIX", rc);
- if (rc != 0)
- goto invalid_config;
- *updt_params++ =
- AUDPROC_MODULE_ID_REVERB;
- *updt_params++ =
- AUDPROC_PARAM_ID_REVERB_WET_MIX;
- *updt_params++ =
- REVERB_WET_MIX_PARAM_SZ;
- *updt_params++ =
- reverb->wet_mix;
- }
+ if (command_config_state != CONFIG_SET)
+ break;
+ max_params_length = params_length +
+ COMMAND_IID_PAYLOAD_SZ +
+ REVERB_WET_MIX_PARAM_SZ;
+ CHECK_PARAM_LEN(max_params_length, MAX_INBAND_PARAM_SZ,
+ "REVERB_WET_MIX", rc);
+ if (rc != 0)
+ break;
+ param_hdr.param_id = AUDPROC_PARAM_ID_REVERB_WET_MIX;
+ param_hdr.param_size = REVERB_WET_MIX_PARAM_SZ;
+ param_data = (u8 *) &reverb->wet_mix;
break;
case REVERB_GAIN_ADJUST:
if (length != 1 || index_offset != 0) {
@@ -423,23 +405,19 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac,
GET_NEXT(values, param_max_offset, rc);
pr_debug("%s: REVERB_GAIN_ADJUST val:%d\n",
__func__, reverb->gain_adjust);
- if (command_config_state == CONFIG_SET) {
- params_length += COMMAND_PAYLOAD_SZ +
- REVERB_GAIN_ADJUST_PARAM_SZ;
- CHECK_PARAM_LEN(params_length,
- MAX_INBAND_PARAM_SZ,
- "REVERB_GAIN_ADJUST", rc);
- if (rc != 0)
- goto invalid_config;
- *updt_params++ =
- AUDPROC_MODULE_ID_REVERB;
- *updt_params++ =
- AUDPROC_PARAM_ID_REVERB_GAIN_ADJUST;
- *updt_params++ =
- REVERB_GAIN_ADJUST_PARAM_SZ;
- *updt_params++ =
- reverb->gain_adjust;
- }
+ if (command_config_state != CONFIG_SET)
+ break;
+ max_params_length = params_length +
+ COMMAND_IID_PAYLOAD_SZ +
+ REVERB_GAIN_ADJUST_PARAM_SZ;
+ CHECK_PARAM_LEN(max_params_length, MAX_INBAND_PARAM_SZ,
+ "REVERB_GAIN_ADJUST", rc);
+ if (rc != 0)
+ break;
+ param_hdr.param_id =
+ AUDPROC_PARAM_ID_REVERB_GAIN_ADJUST;
+ param_hdr.param_size = REVERB_GAIN_ADJUST_PARAM_SZ;
+ param_data = (u8 *) &reverb->gain_adjust;
break;
case REVERB_ROOM_LEVEL:
if (length != 1 || index_offset != 0) {
@@ -451,23 +429,18 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac,
GET_NEXT(values, param_max_offset, rc);
pr_debug("%s: REVERB_ROOM_LEVEL val:%d\n",
__func__, reverb->room_level);
- if (command_config_state == CONFIG_SET) {
- params_length += COMMAND_PAYLOAD_SZ +
- REVERB_ROOM_LEVEL_PARAM_SZ;
- CHECK_PARAM_LEN(params_length,
- MAX_INBAND_PARAM_SZ,
- "REVERB_ROOM_LEVEL", rc);
- if (rc != 0)
- goto invalid_config;
- *updt_params++ =
- AUDPROC_MODULE_ID_REVERB;
- *updt_params++ =
- AUDPROC_PARAM_ID_REVERB_ROOM_LEVEL;
- *updt_params++ =
- REVERB_ROOM_LEVEL_PARAM_SZ;
- *updt_params++ =
- reverb->room_level;
- }
+ if (command_config_state != CONFIG_SET)
+ break;
+ max_params_length = params_length +
+ COMMAND_IID_PAYLOAD_SZ +
+ REVERB_ROOM_LEVEL_PARAM_SZ;
+ CHECK_PARAM_LEN(max_params_length, MAX_INBAND_PARAM_SZ,
+ "REVERB_ROOM_LEVEL", rc);
+ if (rc != 0)
+ break;
+ param_hdr.param_id = AUDPROC_PARAM_ID_REVERB_ROOM_LEVEL;
+ param_hdr.param_size = REVERB_ROOM_LEVEL_PARAM_SZ;
+ param_data = (u8 *) &reverb->room_level;
break;
case REVERB_ROOM_HF_LEVEL:
if (length != 1 || index_offset != 0) {
@@ -479,23 +452,19 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac,
GET_NEXT(values, param_max_offset, rc);
pr_debug("%s: REVERB_ROOM_HF_LEVEL val%d\n",
__func__, reverb->room_hf_level);
- if (command_config_state == CONFIG_SET) {
- params_length += COMMAND_PAYLOAD_SZ +
- REVERB_ROOM_HF_LEVEL_PARAM_SZ;
- CHECK_PARAM_LEN(params_length,
- MAX_INBAND_PARAM_SZ,
- "REVERB_ROOM_HF_LEVEL", rc);
- if (rc != 0)
- goto invalid_config;
- *updt_params++ =
- AUDPROC_MODULE_ID_REVERB;
- *updt_params++ =
- AUDPROC_PARAM_ID_REVERB_ROOM_HF_LEVEL;
- *updt_params++ =
- REVERB_ROOM_HF_LEVEL_PARAM_SZ;
- *updt_params++ =
- reverb->room_hf_level;
- }
+ if (command_config_state != CONFIG_SET)
+ break;
+ max_params_length = params_length +
+ COMMAND_IID_PAYLOAD_SZ +
+ REVERB_ROOM_HF_LEVEL_PARAM_SZ;
+ CHECK_PARAM_LEN(max_params_length, MAX_INBAND_PARAM_SZ,
+ "REVERB_ROOM_HF_LEVEL", rc);
+ if (rc != 0)
+ break;
+ param_hdr.param_id =
+ AUDPROC_PARAM_ID_REVERB_ROOM_HF_LEVEL;
+ param_hdr.param_size = REVERB_ROOM_HF_LEVEL_PARAM_SZ;
+ param_data = (u8 *) &reverb->room_hf_level;
break;
case REVERB_DECAY_TIME:
if (length != 1 || index_offset != 0) {
@@ -507,23 +476,18 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac,
GET_NEXT(values, param_max_offset, rc);
pr_debug("%s: REVERB_DECAY_TIME val:%d\n",
__func__, reverb->decay_time);
- if (command_config_state == CONFIG_SET) {
- params_length += COMMAND_PAYLOAD_SZ +
- REVERB_DECAY_TIME_PARAM_SZ;
- CHECK_PARAM_LEN(params_length,
- MAX_INBAND_PARAM_SZ,
- "REVERB_DECAY_TIME", rc);
- if (rc != 0)
- goto invalid_config;
- *updt_params++ =
- AUDPROC_MODULE_ID_REVERB;
- *updt_params++ =
- AUDPROC_PARAM_ID_REVERB_DECAY_TIME;
- *updt_params++ =
- REVERB_DECAY_TIME_PARAM_SZ;
- *updt_params++ =
- reverb->decay_time;
- }
+ if (command_config_state != CONFIG_SET)
+ break;
+ max_params_length = params_length +
+ COMMAND_IID_PAYLOAD_SZ +
+ REVERB_DECAY_TIME_PARAM_SZ;
+ CHECK_PARAM_LEN(max_params_length, MAX_INBAND_PARAM_SZ,
+ "REVERB_DECAY_TIME", rc);
+ if (rc != 0)
+ break;
+ param_hdr.param_id = AUDPROC_PARAM_ID_REVERB_DECAY_TIME;
+ param_hdr.param_size = REVERB_DECAY_TIME_PARAM_SZ;
+ param_data = (u8 *) &reverb->decay_time;
break;
case REVERB_DECAY_HF_RATIO:
if (length != 1 || index_offset != 0) {
@@ -535,23 +499,19 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac,
GET_NEXT(values, param_max_offset, rc);
pr_debug("%s: REVERB_DECAY_HF_RATIO val%d\n",
__func__, reverb->decay_hf_ratio);
- if (command_config_state == CONFIG_SET) {
- params_length += COMMAND_PAYLOAD_SZ +
- REVERB_DECAY_HF_RATIO_PARAM_SZ;
- CHECK_PARAM_LEN(params_length,
- MAX_INBAND_PARAM_SZ,
- "REVERB_DECAY_HF_RATIO", rc);
- if (rc != 0)
- goto invalid_config;
- *updt_params++ =
- AUDPROC_MODULE_ID_REVERB;
- *updt_params++ =
- AUDPROC_PARAM_ID_REVERB_DECAY_HF_RATIO;
- *updt_params++ =
- REVERB_DECAY_HF_RATIO_PARAM_SZ;
- *updt_params++ =
- reverb->decay_hf_ratio;
- }
+ if (command_config_state != CONFIG_SET)
+ break;
+ max_params_length = params_length +
+ COMMAND_IID_PAYLOAD_SZ +
+ REVERB_DECAY_HF_RATIO_PARAM_SZ;
+ CHECK_PARAM_LEN(max_params_length, MAX_INBAND_PARAM_SZ,
+ "REVERB_DECAY_HF_RATIO", rc);
+ if (rc != 0)
+ break;
+ param_hdr.param_id =
+ AUDPROC_PARAM_ID_REVERB_DECAY_HF_RATIO;
+ param_hdr.param_size = REVERB_DECAY_HF_RATIO_PARAM_SZ;
+ param_data = (u8 *) &reverb->decay_hf_ratio;
break;
case REVERB_REFLECTIONS_LEVEL:
if (length != 1 || index_offset != 0) {
@@ -563,23 +523,20 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac,
GET_NEXT(values, param_max_offset, rc);
pr_debug("%s: REVERB_REFLECTIONS_LEVEL val:%d\n",
__func__, reverb->reflections_level);
- if (command_config_state == CONFIG_SET) {
- params_length += COMMAND_PAYLOAD_SZ +
- REVERB_REFLECTIONS_LEVEL_PARAM_SZ;
- CHECK_PARAM_LEN(params_length,
- MAX_INBAND_PARAM_SZ,
- "REVERB_REFLECTIONS_LEVEL", rc);
- if (rc != 0)
- goto invalid_config;
- *updt_params++ =
- AUDPROC_MODULE_ID_REVERB;
- *updt_params++ =
+ if (command_config_state != CONFIG_SET)
+ break;
+ max_params_length = params_length +
+ COMMAND_IID_PAYLOAD_SZ +
+ REVERB_REFLECTIONS_LEVEL_PARAM_SZ;
+ CHECK_PARAM_LEN(max_params_length, MAX_INBAND_PARAM_SZ,
+ "REVERB_REFLECTIONS_LEVEL", rc);
+ if (rc != 0)
+ break;
+ param_hdr.param_id =
AUDPROC_PARAM_ID_REVERB_REFLECTIONS_LEVEL;
- *updt_params++ =
+ param_hdr.param_size =
REVERB_REFLECTIONS_LEVEL_PARAM_SZ;
- *updt_params++ =
- reverb->reflections_level;
- }
+ param_data = (u8 *) &reverb->reflections_level;
break;
case REVERB_REFLECTIONS_DELAY:
if (length != 1 || index_offset != 0) {
@@ -591,23 +548,20 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac,
GET_NEXT(values, param_max_offset, rc);
pr_debug("%s: REVERB_REFLECTIONS_DELAY val:%d\n",
__func__, reverb->reflections_delay);
- if (command_config_state == CONFIG_SET) {
- params_length += COMMAND_PAYLOAD_SZ +
- REVERB_REFLECTIONS_DELAY_PARAM_SZ;
- CHECK_PARAM_LEN(params_length,
- MAX_INBAND_PARAM_SZ,
- "REVERB_REFLECTIONS_DELAY", rc);
- if (rc != 0)
- goto invalid_config;
- *updt_params++ =
- AUDPROC_MODULE_ID_REVERB;
- *updt_params++ =
+ if (command_config_state != CONFIG_SET)
+ break;
+ max_params_length = params_length +
+ COMMAND_IID_PAYLOAD_SZ +
+ REVERB_REFLECTIONS_DELAY_PARAM_SZ;
+ CHECK_PARAM_LEN(max_params_length, MAX_INBAND_PARAM_SZ,
+ "REVERB_REFLECTIONS_DELAY", rc);
+ if (rc != 0)
+ break;
+ param_hdr.param_id =
AUDPROC_PARAM_ID_REVERB_REFLECTIONS_DELAY;
- *updt_params++ =
+ param_hdr.param_size =
REVERB_REFLECTIONS_DELAY_PARAM_SZ;
- *updt_params++ =
- reverb->reflections_delay;
- }
+ param_data = (u8 *) &reverb->reflections_delay;
break;
case REVERB_LEVEL:
if (length != 1 || index_offset != 0) {
@@ -619,23 +573,18 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac,
GET_NEXT(values, param_max_offset, rc);
pr_debug("%s: REVERB_LEVEL val:%d\n",
__func__, reverb->level);
- if (command_config_state == CONFIG_SET) {
- params_length += COMMAND_PAYLOAD_SZ +
- REVERB_LEVEL_PARAM_SZ;
- CHECK_PARAM_LEN(params_length,
- MAX_INBAND_PARAM_SZ,
- "REVERB_LEVEL", rc);
- if (rc != 0)
- goto invalid_config;
- *updt_params++ =
- AUDPROC_MODULE_ID_REVERB;
- *updt_params++ =
- AUDPROC_PARAM_ID_REVERB_LEVEL;
- *updt_params++ =
- REVERB_LEVEL_PARAM_SZ;
- *updt_params++ =
- reverb->level;
- }
+ if (command_config_state != CONFIG_SET)
+ break;
+ max_params_length = params_length +
+ COMMAND_IID_PAYLOAD_SZ +
+ REVERB_LEVEL_PARAM_SZ;
+ CHECK_PARAM_LEN(max_params_length, MAX_INBAND_PARAM_SZ,
+ "REVERB_LEVEL", rc);
+ if (rc != 0)
+ break;
+ param_hdr.param_id = AUDPROC_PARAM_ID_REVERB_LEVEL;
+ param_hdr.param_size = REVERB_LEVEL_PARAM_SZ;
+ param_data = (u8 *) &reverb->level;
break;
case REVERB_DELAY:
if (length != 1 || index_offset != 0) {
@@ -647,23 +596,18 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac,
GET_NEXT(values, param_max_offset, rc);
pr_debug("%s:REVERB_DELAY val:%d\n",
__func__, reverb->delay);
- if (command_config_state == CONFIG_SET) {
- params_length += COMMAND_PAYLOAD_SZ +
- REVERB_DELAY_PARAM_SZ;
- CHECK_PARAM_LEN(params_length,
- MAX_INBAND_PARAM_SZ,
- "REVERB_DELAY", rc);
- if (rc != 0)
- goto invalid_config;
- *updt_params++ =
- AUDPROC_MODULE_ID_REVERB;
- *updt_params++ =
- AUDPROC_PARAM_ID_REVERB_DELAY;
- *updt_params++ =
- REVERB_DELAY_PARAM_SZ;
- *updt_params++ =
- reverb->delay;
- }
+ if (command_config_state != CONFIG_SET)
+ break;
+ max_params_length = params_length +
+ COMMAND_IID_PAYLOAD_SZ +
+ REVERB_DELAY_PARAM_SZ;
+ CHECK_PARAM_LEN(max_params_length, MAX_INBAND_PARAM_SZ,
+ "REVERB_DELAY", rc);
+ if (rc != 0)
+ break;
+ param_hdr.param_id = AUDPROC_PARAM_ID_REVERB_DELAY;
+ param_hdr.param_size = REVERB_DELAY_PARAM_SZ;
+ param_data = (u8 *) &reverb->delay;
break;
case REVERB_DIFFUSION:
if (length != 1 || index_offset != 0) {
@@ -675,23 +619,18 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac,
GET_NEXT(values, param_max_offset, rc);
pr_debug("%s: REVERB_DIFFUSION val:%d\n",
__func__, reverb->diffusion);
- if (command_config_state == CONFIG_SET) {
- params_length += COMMAND_PAYLOAD_SZ +
- REVERB_DIFFUSION_PARAM_SZ;
- CHECK_PARAM_LEN(params_length,
- MAX_INBAND_PARAM_SZ,
- "REVERB_DIFFUSION", rc);
- if (rc != 0)
- goto invalid_config;
- *updt_params++ =
- AUDPROC_MODULE_ID_REVERB;
- *updt_params++ =
- AUDPROC_PARAM_ID_REVERB_DIFFUSION;
- *updt_params++ =
- REVERB_DIFFUSION_PARAM_SZ;
- *updt_params++ =
- reverb->diffusion;
- }
+ if (command_config_state != CONFIG_SET)
+ break;
+ max_params_length = params_length +
+ COMMAND_IID_PAYLOAD_SZ +
+ REVERB_DIFFUSION_PARAM_SZ;
+ CHECK_PARAM_LEN(max_params_length, MAX_INBAND_PARAM_SZ,
+ "REVERB_DIFFUSION", rc);
+ if (rc != 0)
+ break;
+ param_hdr.param_id = AUDPROC_PARAM_ID_REVERB_DIFFUSION;
+ param_hdr.param_size = REVERB_DIFFUSION_PARAM_SZ;
+ param_data = (u8 *) &reverb->diffusion;
break;
case REVERB_DENSITY:
if (length != 1 || index_offset != 0) {
@@ -703,32 +642,39 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac,
GET_NEXT(values, param_max_offset, rc);
pr_debug("%s: REVERB_DENSITY val:%d\n",
__func__, reverb->density);
- if (command_config_state == CONFIG_SET) {
- params_length += COMMAND_PAYLOAD_SZ +
- REVERB_DENSITY_PARAM_SZ;
- CHECK_PARAM_LEN(params_length,
- MAX_INBAND_PARAM_SZ,
- "REVERB_DENSITY", rc);
- if (rc != 0)
- goto invalid_config;
- *updt_params++ =
- AUDPROC_MODULE_ID_REVERB;
- *updt_params++ =
- AUDPROC_PARAM_ID_REVERB_DENSITY;
- *updt_params++ =
- REVERB_DENSITY_PARAM_SZ;
- *updt_params++ =
- reverb->density;
- }
+ if (command_config_state != CONFIG_SET)
+ break;
+ max_params_length = params_length +
+ COMMAND_IID_PAYLOAD_SZ +
+ REVERB_DENSITY_PARAM_SZ;
+ CHECK_PARAM_LEN(max_params_length, MAX_INBAND_PARAM_SZ,
+ "REVERB_DENSITY", rc);
+ if (rc != 0)
+ break;
+ param_hdr.param_id = AUDPROC_PARAM_ID_REVERB_DENSITY;
+ param_hdr.param_size = REVERB_DENSITY_PARAM_SZ;
+ param_data = (u8 *) &reverb->density;
break;
default:
pr_err("%s: Invalid command to set config\n", __func__);
- break;
+ continue;
}
+ if (rc)
+ goto invalid_config;
+
+ rc = q6common_pack_pp_params(updt_params, &param_hdr,
+ param_data, &packed_data_size);
+ if (rc) {
+ pr_err("%s: Failed to pack params, error %d\n",
+ __func__, rc);
+ goto invalid_config;
+ }
+
+ updt_params += packed_data_size;
+ params_length += packed_data_size;
}
if (params_length && (rc == 0))
- q6asm_send_audio_effects_params(ac, params,
- params_length);
+ q6asm_set_pp_params(ac, NULL, params, params_length);
else
pr_debug("%s: did not send pp params\n", __func__);
invalid_config:
@@ -742,25 +688,32 @@ int msm_audio_effects_bass_boost_handler(struct audio_client *ac,
{
long *param_max_offset = values + MAX_PP_PARAMS_SZ - 1;
char *params = NULL;
+ u8 *updt_params;
int rc = 0;
int devices = GET_NEXT(values, param_max_offset, rc);
int num_commands = GET_NEXT(values, param_max_offset, rc);
- int *updt_params, i, prev_enable_flag;
- uint32_t params_length = (MAX_INBAND_PARAM_SZ);
+ int i, prev_enable_flag;
+ uint32_t max_params_length = 0;
+ uint32_t params_length = 0;
+ struct param_hdr_v3 param_hdr = {0};
+ u8 *param_data = NULL;
+ u32 packed_data_size = 0;
pr_debug("%s\n", __func__);
if (!ac || (devices == -EINVAL) || (num_commands == -EINVAL)) {
pr_err("%s: cannot set audio effects\n", __func__);
return -EINVAL;
}
- params = kzalloc(params_length, GFP_KERNEL);
+ params = kzalloc(MAX_INBAND_PARAM_SZ, GFP_KERNEL);
if (!params) {
pr_err("%s, params memory alloc failed\n", __func__);
return -ENOMEM;
}
pr_debug("%s: device: %d\n", __func__, devices);
- updt_params = (int *)params;
- params_length = 0;
+ updt_params = (u8 *) params;
+ /* Set MID and IID once at top and only update param specific fields*/
+ param_hdr.module_id = AUDPROC_MODULE_ID_BASS_BOOST;
+ param_hdr.instance_id = INSTANCE_ID_0;
for (i = 0; i < num_commands; i++) {
uint32_t command_id =
GET_NEXT(values, param_max_offset, rc);
@@ -783,23 +736,18 @@ int msm_audio_effects_bass_boost_handler(struct audio_client *ac,
pr_debug("%s: BASS_BOOST_ENABLE prev:%d new:%d\n",
__func__, prev_enable_flag,
bass_boost->enable_flag);
- if (prev_enable_flag != bass_boost->enable_flag) {
- params_length += COMMAND_PAYLOAD_SZ +
- BASS_BOOST_ENABLE_PARAM_SZ;
- CHECK_PARAM_LEN(params_length,
- MAX_INBAND_PARAM_SZ,
- "BASS_BOOST_ENABLE", rc);
- if (rc != 0)
- goto invalid_config;
- *updt_params++ =
- AUDPROC_MODULE_ID_BASS_BOOST;
- *updt_params++ =
- AUDPROC_PARAM_ID_BASS_BOOST_ENABLE;
- *updt_params++ =
- BASS_BOOST_ENABLE_PARAM_SZ;
- *updt_params++ =
- bass_boost->enable_flag;
- }
+ if (prev_enable_flag == bass_boost->enable_flag)
+ break;
+ max_params_length = params_length +
+ COMMAND_IID_PAYLOAD_SZ +
+ BASS_BOOST_ENABLE_PARAM_SZ;
+ CHECK_PARAM_LEN(max_params_length, MAX_INBAND_PARAM_SZ,
+ "BASS_BOOST_ENABLE", rc);
+ if (rc != 0)
+ break;
+ param_hdr.param_id = AUDPROC_PARAM_ID_BASS_BOOST_ENABLE;
+ param_hdr.param_size = BASS_BOOST_ENABLE_PARAM_SZ;
+ param_data = (u8 *) &bass_boost->enable_flag;
break;
case BASS_BOOST_MODE:
if (length != 1 || index_offset != 0) {
@@ -811,23 +759,18 @@ int msm_audio_effects_bass_boost_handler(struct audio_client *ac,
GET_NEXT(values, param_max_offset, rc);
pr_debug("%s: BASS_BOOST_MODE val:%d\n",
__func__, bass_boost->mode);
- if (command_config_state == CONFIG_SET) {
- params_length += COMMAND_PAYLOAD_SZ +
- BASS_BOOST_MODE_PARAM_SZ;
- CHECK_PARAM_LEN(params_length,
- MAX_INBAND_PARAM_SZ,
- "BASS_BOOST_MODE", rc);
- if (rc != 0)
- goto invalid_config;
- *updt_params++ =
- AUDPROC_MODULE_ID_BASS_BOOST;
- *updt_params++ =
- AUDPROC_PARAM_ID_BASS_BOOST_MODE;
- *updt_params++ =
- BASS_BOOST_MODE_PARAM_SZ;
- *updt_params++ =
- bass_boost->mode;
- }
+ if (command_config_state != CONFIG_SET)
+ break;
+ max_params_length = params_length +
+ COMMAND_IID_PAYLOAD_SZ +
+ BASS_BOOST_MODE_PARAM_SZ;
+ CHECK_PARAM_LEN(max_params_length, MAX_INBAND_PARAM_SZ,
+ "BASS_BOOST_MODE", rc);
+ if (rc != 0)
+ break;
+ param_hdr.param_id = AUDPROC_PARAM_ID_BASS_BOOST_MODE;
+ param_hdr.param_size = BASS_BOOST_MODE_PARAM_SZ;
+ param_data = (u8 *) &bass_boost->mode;
break;
case BASS_BOOST_STRENGTH:
if (length != 1 || index_offset != 0) {
@@ -839,32 +782,40 @@ int msm_audio_effects_bass_boost_handler(struct audio_client *ac,
GET_NEXT(values, param_max_offset, rc);
pr_debug("%s: BASS_BOOST_STRENGTH val:%d\n",
__func__, bass_boost->strength);
- if (command_config_state == CONFIG_SET) {
- params_length += COMMAND_PAYLOAD_SZ +
- BASS_BOOST_STRENGTH_PARAM_SZ;
- CHECK_PARAM_LEN(params_length,
- MAX_INBAND_PARAM_SZ,
- "BASS_BOOST_STRENGTH", rc);
- if (rc != 0)
- goto invalid_config;
- *updt_params++ =
- AUDPROC_MODULE_ID_BASS_BOOST;
- *updt_params++ =
- AUDPROC_PARAM_ID_BASS_BOOST_STRENGTH;
- *updt_params++ =
- BASS_BOOST_STRENGTH_PARAM_SZ;
- *updt_params++ =
- bass_boost->strength;
- }
+ if (command_config_state != CONFIG_SET)
+ break;
+ max_params_length = params_length +
+ COMMAND_IID_PAYLOAD_SZ +
+ BASS_BOOST_STRENGTH_PARAM_SZ;
+ CHECK_PARAM_LEN(max_params_length, MAX_INBAND_PARAM_SZ,
+ "BASS_BOOST_STRENGTH", rc);
+ if (rc != 0)
+ break;
+ param_hdr.param_id =
+ AUDPROC_PARAM_ID_BASS_BOOST_STRENGTH;
+ param_hdr.param_size = BASS_BOOST_STRENGTH_PARAM_SZ;
+ param_data = (u8 *) &bass_boost->strength;
break;
default:
pr_err("%s: Invalid command to set config\n", __func__);
- break;
+ continue;
}
+ if (rc)
+ goto invalid_config;
+
+ rc = q6common_pack_pp_params(updt_params, &param_hdr,
+ param_data, &packed_data_size);
+ if (rc) {
+ pr_err("%s: Failed to pack params, error %d\n",
+ __func__, rc);
+ goto invalid_config;
+ }
+
+ updt_params += packed_data_size;
+ params_length += packed_data_size;
}
if (params_length && (rc == 0))
- q6asm_send_audio_effects_params(ac, params,
- params_length);
+ q6asm_set_pp_params(ac, NULL, params, params_length);
else
pr_debug("%s: did not send pp params\n", __func__);
invalid_config:
@@ -878,25 +829,32 @@ int msm_audio_effects_pbe_handler(struct audio_client *ac,
{
long *param_max_offset = values + MAX_PP_PARAMS_SZ - 1;
char *params = NULL;
+ u8 *updt_params;
int rc = 0;
int devices = GET_NEXT(values, param_max_offset, rc);
int num_commands = GET_NEXT(values, param_max_offset, rc);
- int *updt_params, i, j, prev_enable_flag;
- uint32_t params_length = (MAX_INBAND_PARAM_SZ);
+ int i, prev_enable_flag;
+ uint32_t max_params_length = 0;
+ uint32_t params_length = 0;
+ struct param_hdr_v3 param_hdr = {0};
+ u8 *param_data = NULL;
+ u32 packed_data_size = 0;
pr_debug("%s\n", __func__);
if (!ac || (devices == -EINVAL) || (num_commands == -EINVAL)) {
pr_err("%s: cannot set audio effects\n", __func__);
return -EINVAL;
}
- params = kzalloc(params_length, GFP_KERNEL);
+ params = kzalloc(MAX_INBAND_PARAM_SZ, GFP_KERNEL);
if (!params) {
pr_err("%s, params memory alloc failed\n", __func__);
return -ENOMEM;
}
pr_debug("%s: device: %d\n", __func__, devices);
- updt_params = (int *)params;
- params_length = 0;
+ updt_params = (u8 *) params;
+ /* Set MID and IID once at top and only update param specific fields*/
+ param_hdr.module_id = AUDPROC_MODULE_ID_PBE;
+ param_hdr.instance_id = INSTANCE_ID_0;
for (i = 0; i < num_commands; i++) {
uint32_t command_id =
GET_NEXT(values, param_max_offset, rc);
@@ -917,23 +875,18 @@ int msm_audio_effects_pbe_handler(struct audio_client *ac,
prev_enable_flag = pbe->enable_flag;
pbe->enable_flag =
GET_NEXT(values, param_max_offset, rc);
- if (prev_enable_flag != pbe->enable_flag) {
- params_length += COMMAND_PAYLOAD_SZ +
- PBE_ENABLE_PARAM_SZ;
- CHECK_PARAM_LEN(params_length,
- MAX_INBAND_PARAM_SZ,
- "PBE_ENABLE", rc);
- if (rc != 0)
- goto invalid_config;
- *updt_params++ =
- AUDPROC_MODULE_ID_PBE;
- *updt_params++ =
- AUDPROC_PARAM_ID_PBE_ENABLE;
- *updt_params++ =
- PBE_ENABLE_PARAM_SZ;
- *updt_params++ =
- pbe->enable_flag;
- }
+ if (prev_enable_flag == pbe->enable_flag)
+ break;
+ max_params_length = params_length +
+ COMMAND_IID_PAYLOAD_SZ +
+ PBE_ENABLE_PARAM_SZ;
+ CHECK_PARAM_LEN(max_params_length, MAX_INBAND_PARAM_SZ,
+ "PBE_ENABLE", rc);
+ if (rc != 0)
+ break;
+ param_hdr.param_id = AUDPROC_PARAM_ID_PBE_ENABLE;
+ param_hdr.param_size = PBE_ENABLE_PARAM_SZ;
+ param_data = (u8 *) &pbe->enable_flag;
break;
case PBE_CONFIG:
pr_debug("%s: PBE_PARAM length %u\n", __func__, length);
@@ -944,37 +897,38 @@ int msm_audio_effects_pbe_handler(struct audio_client *ac,
rc = -EINVAL;
goto invalid_config;
}
- if (command_config_state == CONFIG_SET) {
- params_length += COMMAND_PAYLOAD_SZ + length;
- CHECK_PARAM_LEN(params_length,
- MAX_INBAND_PARAM_SZ,
- "PBE_PARAM", rc);
- if (rc != 0)
- goto invalid_config;
- *updt_params++ =
- AUDPROC_MODULE_ID_PBE;
- *updt_params++ =
- AUDPROC_PARAM_ID_PBE_PARAM_CONFIG;
- *updt_params++ =
- length;
- for (j = 0; j < length; ) {
- j += sizeof(*updt_params);
- *updt_params++ =
- GET_NEXT(
- values,
- param_max_offset,
- rc);
- }
- }
+ if (command_config_state != CONFIG_SET)
+ break;
+ max_params_length =
+ params_length + COMMAND_IID_PAYLOAD_SZ + length;
+ CHECK_PARAM_LEN(max_params_length, MAX_INBAND_PARAM_SZ,
+ "PBE_PARAM", rc);
+ if (rc != 0)
+ break;
+ param_hdr.param_id = AUDPROC_PARAM_ID_PBE_PARAM_CONFIG;
+ param_hdr.param_size = length;
+ param_data = (u8 *) values;
break;
default:
pr_err("%s: Invalid command to set config\n", __func__);
- break;
+ continue;
+ }
+ if (rc)
+ goto invalid_config;
+
+ rc = q6common_pack_pp_params(updt_params, &param_hdr,
+ param_data, &packed_data_size);
+ if (rc) {
+ pr_err("%s: Failed to pack params, error %d\n",
+ __func__, rc);
+ goto invalid_config;
}
+
+ updt_params += packed_data_size;
+ params_length += packed_data_size;
}
if (params_length && (rc == 0))
- q6asm_send_audio_effects_params(ac, params,
- params_length);
+ q6asm_set_pp_params(ac, NULL, params, params_length);
invalid_config:
kfree(params);
return rc;
@@ -986,25 +940,35 @@ int msm_audio_effects_popless_eq_handler(struct audio_client *ac,
{
long *param_max_offset = values + MAX_PP_PARAMS_SZ - 1;
char *params = NULL;
+ u8 *updt_params = NULL;
int rc = 0;
int devices = GET_NEXT(values, param_max_offset, rc);
int num_commands = GET_NEXT(values, param_max_offset, rc);
- int *updt_params, i, prev_enable_flag;
- uint32_t params_length = (MAX_INBAND_PARAM_SZ);
+ int i, prev_enable_flag;
+ uint32_t max_params_length = 0;
+ uint32_t params_length = 0;
+ struct param_hdr_v3 param_hdr = {0};
+ u8 *param_data = NULL;
+ u32 packed_data_size = 0;
+ u8 *eq_config_data = NULL;
+ u32 *updt_config_data = NULL;
+ int config_param_length;
pr_debug("%s\n", __func__);
if (!ac || (devices == -EINVAL) || (num_commands == -EINVAL)) {
pr_err("%s: cannot set audio effects\n", __func__);
return -EINVAL;
}
- params = kzalloc(params_length, GFP_KERNEL);
+ params = kzalloc(MAX_INBAND_PARAM_SZ, GFP_KERNEL);
if (!params) {
pr_err("%s, params memory alloc failed\n", __func__);
return -ENOMEM;
}
pr_debug("%s: device: %d\n", __func__, devices);
- updt_params = (int *)params;
- params_length = 0;
+ updt_params = (u8 *) params;
+ /* Set MID and IID once at top and only update param specific fields*/
+ param_hdr.module_id = AUDPROC_MODULE_ID_POPLESS_EQUALIZER;
+ param_hdr.instance_id = INSTANCE_ID_0;
for (i = 0; i < num_commands; i++) {
uint32_t command_id =
GET_NEXT(values, param_max_offset, rc);
@@ -1028,23 +992,18 @@ int msm_audio_effects_popless_eq_handler(struct audio_client *ac,
GET_NEXT(values, param_max_offset, rc);
pr_debug("%s: EQ_ENABLE prev:%d new:%d\n", __func__,
prev_enable_flag, eq->enable_flag);
- if (prev_enable_flag != eq->enable_flag) {
- params_length += COMMAND_PAYLOAD_SZ +
- EQ_ENABLE_PARAM_SZ;
- CHECK_PARAM_LEN(params_length,
- MAX_INBAND_PARAM_SZ,
- "EQ_ENABLE", rc);
- if (rc != 0)
- goto invalid_config;
- *updt_params++ =
- AUDPROC_MODULE_ID_POPLESS_EQUALIZER;
- *updt_params++ =
- AUDPROC_PARAM_ID_EQ_ENABLE;
- *updt_params++ =
- EQ_ENABLE_PARAM_SZ;
- *updt_params++ =
- eq->enable_flag;
- }
+ if (prev_enable_flag == eq->enable_flag)
+ break;
+ max_params_length = params_length +
+ COMMAND_IID_PAYLOAD_SZ +
+ EQ_ENABLE_PARAM_SZ;
+ CHECK_PARAM_LEN(max_params_length, MAX_INBAND_PARAM_SZ,
+ "EQ_ENABLE", rc);
+ if (rc != 0)
+ break;
+ param_hdr.param_id = AUDPROC_PARAM_ID_EQ_ENABLE;
+ param_hdr.param_size = EQ_ENABLE_PARAM_SZ;
+ param_data = (u8 *) &eq->enable_flag;
break;
case EQ_CONFIG:
if (length < EQ_CONFIG_PARAM_LEN || index_offset != 0) {
@@ -1093,43 +1052,46 @@ int msm_audio_effects_popless_eq_handler(struct audio_client *ac,
eq->per_band_cfg[idx].quality_factor =
GET_NEXT(values, param_max_offset, rc);
}
- if (command_config_state == CONFIG_SET) {
- int config_param_length = EQ_CONFIG_PARAM_SZ +
- (EQ_CONFIG_PER_BAND_PARAM_SZ*
- eq->config.num_bands);
- params_length += COMMAND_PAYLOAD_SZ +
- config_param_length;
- CHECK_PARAM_LEN(params_length,
- MAX_INBAND_PARAM_SZ,
- "EQ_CONFIG", rc);
- if (rc != 0)
- goto invalid_config;
- *updt_params++ =
- AUDPROC_MODULE_ID_POPLESS_EQUALIZER;
- *updt_params++ =
- AUDPROC_PARAM_ID_EQ_CONFIG;
- *updt_params++ =
- config_param_length;
- *updt_params++ =
- eq->config.eq_pregain;
- *updt_params++ =
- eq->config.preset_id;
- *updt_params++ =
- eq->config.num_bands;
- for (idx = 0; idx < MAX_EQ_BANDS; idx++) {
- if (eq->per_band_cfg[idx].band_idx < 0)
- continue;
- *updt_params++ =
+ if (command_config_state != CONFIG_SET)
+ break;
+ config_param_length = EQ_CONFIG_PARAM_SZ +
+ (EQ_CONFIG_PER_BAND_PARAM_SZ *
+ eq->config.num_bands);
+ max_params_length = params_length +
+ COMMAND_IID_PAYLOAD_SZ +
+ config_param_length;
+ CHECK_PARAM_LEN(max_params_length, MAX_INBAND_PARAM_SZ,
+ "EQ_CONFIG", rc);
+ if (rc != 0)
+ break;
+ param_hdr.param_id = AUDPROC_PARAM_ID_EQ_CONFIG;
+ param_hdr.param_size = config_param_length;
+
+ if (!eq_config_data)
+ eq_config_data = kzalloc(config_param_length,
+ GFP_KERNEL);
+ else
+ memset(eq_config_data, 0, config_param_length);
+ if (!eq_config_data)
+ return -ENOMEM;
+ param_data = eq_config_data;
+ updt_config_data = (u32 *) eq_config_data;
+ *updt_config_data++ = eq->config.eq_pregain;
+ *updt_config_data++ = eq->config.preset_id;
+ *updt_config_data++ = eq->config.num_bands;
+ for (idx = 0; idx < MAX_EQ_BANDS; idx++) {
+ if (eq->per_band_cfg[idx].band_idx < 0)
+ continue;
+ *updt_config_data++ =
eq->per_band_cfg[idx].filter_type;
- *updt_params++ =
+ *updt_config_data++ =
eq->per_band_cfg[idx].freq_millihertz;
- *updt_params++ =
+ *updt_config_data++ =
eq->per_band_cfg[idx].gain_millibels;
- *updt_params++ =
+ *updt_config_data++ =
eq->per_band_cfg[idx].quality_factor;
- *updt_params++ =
+ *updt_config_data++ =
eq->per_band_cfg[idx].band_idx;
- }
}
break;
case EQ_BAND_INDEX:
@@ -1147,23 +1109,18 @@ int msm_audio_effects_popless_eq_handler(struct audio_client *ac,
eq->band_index = idx;
pr_debug("%s: EQ_BAND_INDEX val:%d\n",
__func__, eq->band_index);
- if (command_config_state == CONFIG_SET) {
- params_length += COMMAND_PAYLOAD_SZ +
- EQ_BAND_INDEX_PARAM_SZ;
- CHECK_PARAM_LEN(params_length,
- MAX_INBAND_PARAM_SZ,
- "EQ_BAND_INDEX", rc);
- if (rc != 0)
- goto invalid_config;
- *updt_params++ =
- AUDPROC_MODULE_ID_POPLESS_EQUALIZER;
- *updt_params++ =
- AUDPROC_PARAM_ID_EQ_BAND_INDEX;
- *updt_params++ =
- EQ_BAND_INDEX_PARAM_SZ;
- *updt_params++ =
- eq->band_index;
- }
+ if (command_config_state != CONFIG_SET)
+ break;
+ max_params_length = params_length +
+ COMMAND_IID_PAYLOAD_SZ +
+ EQ_BAND_INDEX_PARAM_SZ;
+ CHECK_PARAM_LEN(max_params_length, MAX_INBAND_PARAM_SZ,
+ "EQ_BAND_INDEX", rc);
+ if (rc != 0)
+ break;
+ param_hdr.param_id = AUDPROC_PARAM_ID_EQ_BAND_INDEX;
+ param_hdr.param_size = EQ_BAND_INDEX_PARAM_SZ;
+ param_data = (u8 *) &eq->band_index;
break;
case EQ_SINGLE_BAND_FREQ:
if (length != 1 || index_offset != 0) {
@@ -1179,36 +1136,45 @@ int msm_audio_effects_popless_eq_handler(struct audio_client *ac,
GET_NEXT(values, param_max_offset, rc);
pr_debug("%s: EQ_SINGLE_BAND_FREQ idx:%d, val:%d\n",
__func__, eq->band_index, eq->freq_millihertz);
- if (command_config_state == CONFIG_SET) {
- params_length += COMMAND_PAYLOAD_SZ +
- EQ_SINGLE_BAND_FREQ_PARAM_SZ;
- CHECK_PARAM_LEN(params_length,
- MAX_INBAND_PARAM_SZ,
- "EQ_SINGLE_BAND_FREQ", rc);
- if (rc != 0)
- goto invalid_config;
- *updt_params++ =
- AUDPROC_MODULE_ID_POPLESS_EQUALIZER;
- *updt_params++ =
- AUDPROC_PARAM_ID_EQ_SINGLE_BAND_FREQ;
- *updt_params++ =
- EQ_SINGLE_BAND_FREQ_PARAM_SZ;
- *updt_params++ =
- eq->freq_millihertz;
- }
+ if (command_config_state != CONFIG_SET)
+ break;
+ max_params_length = params_length +
+ COMMAND_IID_PAYLOAD_SZ +
+ EQ_SINGLE_BAND_FREQ_PARAM_SZ;
+ CHECK_PARAM_LEN(max_params_length, MAX_INBAND_PARAM_SZ,
+ "EQ_SINGLE_BAND_FREQ", rc);
+ if (rc != 0)
+ break;
+ param_hdr.param_id =
+ AUDPROC_PARAM_ID_EQ_SINGLE_BAND_FREQ;
+ param_hdr.param_size = EQ_SINGLE_BAND_FREQ_PARAM_SZ;
+ param_data = (u8 *) &eq->freq_millihertz;
break;
default:
pr_err("%s: Invalid command to set config\n", __func__);
- break;
+ continue;
+ }
+ if (rc)
+ goto invalid_config;
+
+ rc = q6common_pack_pp_params(updt_params, &param_hdr,
+ param_data, &packed_data_size);
+ if (rc) {
+ pr_err("%s: Failed to pack params, error %d\n",
+ __func__, rc);
+ goto invalid_config;
}
+
+ updt_params += packed_data_size;
+ params_length += packed_data_size;
}
if (params_length && (rc == 0))
- q6asm_send_audio_effects_params(ac, params,
- params_length);
+ q6asm_set_pp_params(ac, NULL, params, params_length);
else
pr_debug("%s: did not send pp params\n", __func__);
invalid_config:
kfree(params);
+ kfree(eq_config_data);
return rc;
}
@@ -1220,8 +1186,13 @@ static int __msm_audio_effects_volume_handler(struct audio_client *ac,
int devices;
int num_commands;
char *params = NULL;
- int *updt_params, i;
- uint32_t params_length = (MAX_INBAND_PARAM_SZ);
+ u8 *updt_params;
+ int i;
+ uint32_t vol_gain_2ch = 0;
+ uint32_t max_params_length = 0;
+ uint32_t params_length = 0;
+ struct param_hdr_v3 param_hdr = {0};
+ u32 packed_data_size = 0;
long *param_max_offset;
int rc = 0;
@@ -1238,13 +1209,14 @@ static int __msm_audio_effects_volume_handler(struct audio_client *ac,
pr_err("%s: cannot set audio effects\n", __func__);
return -EINVAL;
}
- params = kzalloc(params_length, GFP_KERNEL);
+ params = kzalloc(MAX_INBAND_PARAM_SZ, GFP_KERNEL);
if (!params) {
pr_err("%s, params memory alloc failed\n", __func__);
return -ENOMEM;
}
- updt_params = (int *)params;
- params_length = 0;
+ updt_params = (u8 *) params;
+ /* Set MID and IID once at top and only update param specific fields*/
+ q6asm_set_soft_volume_module_instance_ids(instance, &param_hdr);
for (i = 0; i < num_commands; i++) {
uint32_t command_id =
GET_NEXT(values, param_max_offset, rc);
@@ -1266,43 +1238,15 @@ static int __msm_audio_effects_volume_handler(struct audio_client *ac,
vol->right_gain =
GET_NEXT(values, param_max_offset, rc);
vol->master_gain = 0x2000;
- if (command_config_state == CONFIG_SET) {
- params_length += COMMAND_PAYLOAD_SZ +
- SOFT_VOLUME_GAIN_2CH_PARAM_SZ;
- params_length += COMMAND_PAYLOAD_SZ +
- SOFT_VOLUME_GAIN_MASTER_PARAM_SZ;
- CHECK_PARAM_LEN(params_length,
- MAX_INBAND_PARAM_SZ,
- "VOLUME/VOLUME2_GAIN_2CH",
- rc);
- if (rc != 0)
- goto invalid_config;
- if (instance == SOFT_VOLUME_INSTANCE_2)
- *updt_params++ =
- ASM_MODULE_ID_VOL_CTRL2;
- else
- *updt_params++ =
- ASM_MODULE_ID_VOL_CTRL;
- *updt_params++ =
- ASM_PARAM_ID_VOL_CTRL_LR_CHANNEL_GAIN;
- *updt_params++ =
- SOFT_VOLUME_GAIN_2CH_PARAM_SZ;
- *updt_params++ =
- (vol->left_gain << 16) |
- vol->right_gain;
- if (instance == SOFT_VOLUME_INSTANCE_2)
- *updt_params++ =
- ASM_MODULE_ID_VOL_CTRL2;
- else
- *updt_params++ =
- ASM_MODULE_ID_VOL_CTRL;
- *updt_params++ =
- ASM_PARAM_ID_VOL_CTRL_MASTER_GAIN;
- *updt_params++ =
- SOFT_VOLUME_GAIN_MASTER_PARAM_SZ;
- *updt_params++ =
- vol->master_gain;
- }
+ if (command_config_state != CONFIG_SET)
+ break;
+ max_params_length = params_length +
+ COMMAND_IID_PAYLOAD_SZ +
+ SOFT_VOLUME_GAIN_2CH_PARAM_SZ +
+ COMMAND_IID_PAYLOAD_SZ +
+ SOFT_VOLUME_GAIN_MASTER_PARAM_SZ;
+ CHECK_PARAM_LEN(max_params_length, MAX_INBAND_PARAM_SZ,
+ "VOLUME/VOLUME2_GAIN_2CH", rc);
break;
case SOFT_VOLUME_GAIN_MASTER:
case SOFT_VOLUME2_GAIN_MASTER:
@@ -1315,53 +1259,57 @@ static int __msm_audio_effects_volume_handler(struct audio_client *ac,
vol->right_gain = 0x2000;
vol->master_gain =
GET_NEXT(values, param_max_offset, rc);
- if (command_config_state == CONFIG_SET) {
- params_length += COMMAND_PAYLOAD_SZ +
- SOFT_VOLUME_GAIN_2CH_PARAM_SZ;
- params_length += COMMAND_PAYLOAD_SZ +
- SOFT_VOLUME_GAIN_MASTER_PARAM_SZ;
- CHECK_PARAM_LEN(params_length,
- MAX_INBAND_PARAM_SZ,
- "VOLUME/VOLUME2_GAIN_MASTER",
- rc);
- if (rc != 0)
- goto invalid_config;
- if (instance == SOFT_VOLUME_INSTANCE_2)
- *updt_params++ =
- ASM_MODULE_ID_VOL_CTRL2;
- else
- *updt_params++ =
- ASM_MODULE_ID_VOL_CTRL;
- *updt_params++ =
- ASM_PARAM_ID_VOL_CTRL_LR_CHANNEL_GAIN;
- *updt_params++ =
- SOFT_VOLUME_GAIN_2CH_PARAM_SZ;
- *updt_params++ =
- (vol->left_gain << 16) |
- vol->right_gain;
- if (instance == SOFT_VOLUME_INSTANCE_2)
- *updt_params++ =
- ASM_MODULE_ID_VOL_CTRL2;
- else
- *updt_params++ =
- ASM_MODULE_ID_VOL_CTRL;
- *updt_params++ =
- ASM_PARAM_ID_VOL_CTRL_MASTER_GAIN;
- *updt_params++ =
- SOFT_VOLUME_GAIN_MASTER_PARAM_SZ;
- *updt_params++ =
- vol->master_gain;
- }
+ if (command_config_state != CONFIG_SET)
+ break;
+ max_params_length = params_length +
+ COMMAND_IID_PAYLOAD_SZ +
+ SOFT_VOLUME_GAIN_2CH_PARAM_SZ +
+ COMMAND_IID_PAYLOAD_SZ +
+ SOFT_VOLUME_GAIN_MASTER_PARAM_SZ;
+ CHECK_PARAM_LEN(max_params_length, MAX_INBAND_PARAM_SZ,
+ "VOLUME/VOLUME2_GAIN_MASTER", rc);
break;
default:
pr_err("%s: Invalid command id: %d to set config\n",
__func__, command_id);
- break;
+ continue;
+ }
+ if (rc)
+ continue;
+
+ /* Set Volume Control for Left/Right */
+ param_hdr.param_id = ASM_PARAM_ID_VOL_CTRL_LR_CHANNEL_GAIN;
+ param_hdr.param_size = SOFT_VOLUME_GAIN_2CH_PARAM_SZ;
+ vol_gain_2ch = (vol->left_gain << 16) | vol->right_gain;
+ rc = q6common_pack_pp_params(updt_params, &param_hdr,
+ (u8 *) &vol_gain_2ch,
+ &packed_data_size);
+ if (rc) {
+ pr_err("%s: Failed to pack params, error %d\n",
+ __func__, rc);
+ goto invalid_config;
}
+
+ updt_params += packed_data_size;
+ params_length += packed_data_size;
+
+ /* Set Master Volume Control */
+ param_hdr.param_id = ASM_PARAM_ID_VOL_CTRL_MASTER_GAIN;
+ param_hdr.param_size = SOFT_VOLUME_GAIN_MASTER_PARAM_SZ;
+ rc = q6common_pack_pp_params(updt_params, &param_hdr,
+ (u8 *) &vol->master_gain,
+ &packed_data_size);
+ if (rc) {
+ pr_err("%s: Failed to pack params, error %d\n",
+ __func__, rc);
+ goto invalid_config;
+ }
+
+ updt_params += packed_data_size;
+ params_length += packed_data_size;
}
if (params_length && (rc == 0))
- q6asm_send_audio_effects_params(ac, params,
- params_length);
+ q6asm_set_pp_params(ac, NULL, params, params_length);
invalid_config:
kfree(params);
return rc;
diff --git a/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
index 7e22567ead96..45a3f42b6f55 100644
--- a/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
@@ -105,6 +105,7 @@ struct msm_compr_pdata {
struct msm_compr_dec_params *dec_params[MSM_FRONTEND_DAI_MAX];
struct msm_compr_ch_map *ch_map[MSM_FRONTEND_DAI_MAX];
int32_t ion_fd[MSM_FRONTEND_DAI_MAX];
+ bool is_in_use[MSM_FRONTEND_DAI_MAX];
};
struct msm_compr_audio {
@@ -1574,12 +1575,17 @@ static int msm_compr_playback_open(struct snd_compr_stream *cstream)
{
struct snd_compr_runtime *runtime = cstream->runtime;
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
- struct msm_compr_audio *prtd;
+ struct msm_compr_audio *prtd = NULL;
struct msm_compr_pdata *pdata =
snd_soc_platform_get_drvdata(rtd->platform);
int ret = 0;
pr_debug("%s\n", __func__);
+ if (pdata->is_in_use[rtd->dai_link->be_id] == true) {
+ pr_err("%s: %s is already in use,err: %d ",
+ __func__, rtd->dai_link->cpu_dai_name, -EBUSY);
+ return -EBUSY;
+ }
prtd = kzalloc(sizeof(struct msm_compr_audio), GFP_KERNEL);
if (prtd == NULL) {
pr_err("Failed to allocate memory for msm_compr_audio\n");
@@ -1591,14 +1597,14 @@ static int msm_compr_playback_open(struct snd_compr_stream *cstream)
pdata->cstream[rtd->dai_link->be_id] = cstream;
pdata->audio_effects[rtd->dai_link->be_id] =
kzalloc(sizeof(struct msm_compr_audio_effects), GFP_KERNEL);
- if (!pdata->audio_effects[rtd->dai_link->be_id]) {
+ if (pdata->audio_effects[rtd->dai_link->be_id] == NULL) {
pr_err("%s: Could not allocate memory for effects\n", __func__);
ret = -ENOMEM;
goto effect_err;
}
pdata->dec_params[rtd->dai_link->be_id] =
kzalloc(sizeof(struct msm_compr_dec_params), GFP_KERNEL);
- if (!pdata->dec_params[rtd->dai_link->be_id]) {
+ if (pdata->dec_params[rtd->dai_link->be_id] == NULL) {
pr_err("%s: Could not allocate memory for dec params\n",
__func__);
ret = -ENOMEM;
@@ -1659,14 +1665,17 @@ static int msm_compr_playback_open(struct snd_compr_stream *cstream)
if (ret < 0)
goto map_err;
}
+ pdata->is_in_use[rtd->dai_link->be_id] = true;
return 0;
map_err:
q6asm_audio_client_free(prtd->audio_client);
ac_err:
kfree(pdata->dec_params[rtd->dai_link->be_id]);
+ pdata->dec_params[rtd->dai_link->be_id] = NULL;
param_err:
kfree(pdata->audio_effects[rtd->dai_link->be_id]);
+ pdata->audio_effects[rtd->dai_link->be_id] = NULL;
effect_err:
pdata->cstream[rtd->dai_link->be_id] = NULL;
runtime->private_data = NULL;
@@ -1836,10 +1845,15 @@ static int msm_compr_playback_free(struct snd_compr_stream *cstream)
q6asm_audio_client_free(ac);
msm_adsp_clean_mixer_ctl_pp_event_queue(soc_prtd);
+ if (pdata->audio_effects[soc_prtd->dai_link->be_id] != NULL) {
kfree(pdata->audio_effects[soc_prtd->dai_link->be_id]);
pdata->audio_effects[soc_prtd->dai_link->be_id] = NULL;
+ }
+ if (pdata->dec_params[soc_prtd->dai_link->be_id] != NULL) {
kfree(pdata->dec_params[soc_prtd->dai_link->be_id]);
pdata->dec_params[soc_prtd->dai_link->be_id] = NULL;
+ }
+ pdata->is_in_use[soc_prtd->dai_link->be_id] = false;
kfree(prtd);
runtime->private_data = NULL;
@@ -4038,6 +4052,7 @@ static int msm_compr_probe(struct snd_soc_platform *platform)
pdata->dec_params[i] = NULL;
pdata->cstream[i] = NULL;
pdata->ch_map[i] = NULL;
+ pdata->is_in_use[i] = false;
}
snd_soc_add_platform_controls(platform, msm_compr_gapless_controls,
diff --git a/sound/soc/msm/qdsp6v2/msm-ds2-dap-config.c b/sound/soc/msm/qdsp6v2/msm-ds2-dap-config.c
index 4de712a10f96..6dda41cc85bb 100644
--- a/sound/soc/msm/qdsp6v2/msm-ds2-dap-config.c
+++ b/sound/soc/msm/qdsp6v2/msm-ds2-dap-config.c
@@ -14,6 +14,7 @@
#include <linux/bitops.h>
#include <sound/control.h>
#include <sound/q6adm-v2.h>
+#include <sound/q6common.h>
#include "msm-ds2-dap-config.h"
#include "msm-pcm-routing-v2.h"
@@ -196,6 +197,7 @@ static void msm_ds2_dap_check_and_update_ramp_wait(int port_id, int copp_idx,
int32_t *update_params_value = NULL;
uint32_t params_length = SOFT_VOLUME_PARAM_SIZE * sizeof(uint32_t);
uint32_t param_payload_len = PARAM_PAYLOAD_SIZE * sizeof(uint32_t);
+ struct param_hdr_v3 param_hdr = {0};
int rc = 0;
update_params_value = kzalloc(params_length + param_payload_len,
@@ -204,11 +206,13 @@ static void msm_ds2_dap_check_and_update_ramp_wait(int port_id, int copp_idx,
pr_err("%s: params memory alloc failed\n", __func__);
goto end;
}
- rc = adm_get_params(port_id, copp_idx,
- AUDPROC_MODULE_ID_VOL_CTRL,
- AUDPROC_PARAM_ID_SOFT_VOL_STEPPING_PARAMETERS,
- params_length + param_payload_len,
- (char *) update_params_value);
+
+ param_hdr.module_id = AUDPROC_MODULE_ID_VOL_CTRL;
+ param_hdr.instance_id = INSTANCE_ID_0;
+ param_hdr.param_id = AUDPROC_PARAM_ID_SOFT_VOL_STEPPING_PARAMETERS;
+ param_hdr.param_size = params_length + param_payload_len;
+ rc = adm_get_pp_params(port_id, copp_idx, ADM_CLIENT_ID_DEFAULT, NULL,
+ &param_hdr, (char *) update_params_value);
if (rc == 0) {
pr_debug("%s: params_value [0x%x, 0x%x, 0x%x]\n",
__func__, update_params_value[0],
@@ -229,12 +233,13 @@ end:
static int msm_ds2_dap_set_vspe_vdhe(int dev_map_idx,
bool is_custom_stereo_enabled)
{
- int32_t *update_params_value = NULL;
- int32_t *param_val = NULL;
- int idx, i, j, rc = 0, cdev;
- uint32_t params_length = (TOTAL_LENGTH_DOLBY_PARAM +
- 2 * DOLBY_PARAM_PAYLOAD_SIZE) *
- sizeof(uint32_t);
+ u8 *packed_param_data = NULL;
+ u8 *param_data = NULL;
+ struct param_hdr_v3 param_hdr = {0};
+ u32 packed_param_size = 0;
+ u32 param_size = 0;
+ int cdev;
+ int rc = 0;
if (dev_map_idx < 0 || dev_map_idx >= DS2_DEVICES_ALL) {
pr_err("%s: invalid dev map index %d\n", __func__, dev_map_idx);
@@ -262,73 +267,88 @@ static int msm_ds2_dap_set_vspe_vdhe(int dev_map_idx,
goto end;
}
- update_params_value = kzalloc(params_length, GFP_KERNEL);
- if (!update_params_value) {
- pr_err("%s: params memory alloc failed\n", __func__);
- rc = -ENOMEM;
+ /* Allocate the max space needed */
+ packed_param_size = (TOTAL_LENGTH_DOLBY_PARAM * sizeof(uint32_t)) +
+ (2 * sizeof(union param_hdrs));
+ packed_param_data = kzalloc(packed_param_size, GFP_KERNEL);
+ if (!packed_param_data)
+ return -ENOMEM;
+
+ packed_param_size = 0;
+
+ /* Set common values */
+ cdev = dev_map[dev_map_idx].cache_dev;
+ param_hdr.module_id = DOLBY_BUNDLE_MODULE_ID;
+ param_hdr.instance_id = INSTANCE_ID_0;
+
+ /* Pack VDHE header + data */
+ param_hdr.param_id = DOLBY_PARAM_ID_VDHE;
+ param_size = DOLBY_PARAM_VDHE_LENGTH * sizeof(uint32_t);
+ param_hdr.param_size = param_size;
+
+ if (is_custom_stereo_enabled)
+ param_data = NULL;
+ else
+ param_data = (u8 *) &ds2_dap_params[cdev]
+ .params_val[DOLBY_PARAM_VDHE_OFFSET];
+
+ rc = q6common_pack_pp_params(packed_param_data, &param_hdr, param_data,
+ &param_size);
+ if (rc) {
+ pr_err("%s: Failed to pack params, error %d\n", __func__, rc);
goto end;
}
- params_length = 0;
- param_val = update_params_value;
- cdev = dev_map[dev_map_idx].cache_dev;
- /* for VDHE and VSPE DAP params at index 0 and 1 in table */
- for (i = 0; i < 2; i++) {
- *update_params_value++ = DOLBY_BUNDLE_MODULE_ID;
- *update_params_value++ = ds2_dap_params_id[i];
- *update_params_value++ = ds2_dap_params_length[i] *
- sizeof(uint32_t);
- idx = ds2_dap_params_offset[i];
- for (j = 0; j < ds2_dap_params_length[i]; j++) {
- if (is_custom_stereo_enabled)
- *update_params_value++ = 0;
- else
- *update_params_value++ =
- ds2_dap_params[cdev].params_val[idx+j];
- }
- params_length += (DOLBY_PARAM_PAYLOAD_SIZE +
- ds2_dap_params_length[i]) *
- sizeof(uint32_t);
- }
-
- pr_debug("%s: valid param length: %d\n", __func__, params_length);
- if (params_length) {
- rc = adm_dolby_dap_send_params(dev_map[dev_map_idx].port_id,
- dev_map[dev_map_idx].copp_idx,
- (char *)param_val,
- params_length);
- if (rc) {
- pr_err("%s: send vdhe/vspe params failed with rc=%d\n",
- __func__, rc);
- rc = -EINVAL;
- goto end;
- }
+ packed_param_size += param_size;
+
+ /* Pack VSPE header + data */
+ param_hdr.param_id = DOLBY_PARAM_ID_VSPE;
+ param_size = DOLBY_PARAM_VSPE_LENGTH * sizeof(uint32_t);
+ param_hdr.param_size = param_size;
+
+ if (is_custom_stereo_enabled)
+ param_data = NULL;
+ else
+ param_data = (u8 *) &ds2_dap_params[cdev]
+ .params_val[DOLBY_PARAM_VSPE_OFFSET];
+
+ rc = q6common_pack_pp_params(packed_param_data + packed_param_size,
+ &param_hdr, param_data, &param_size);
+ if (rc) {
+ pr_err("%s: Failed to pack params, error %d\n", __func__, rc);
+ goto end;
+ }
+ packed_param_size += param_size;
+
+ rc = adm_set_pp_params(dev_map[dev_map_idx].port_id,
+ dev_map[dev_map_idx].copp_idx, NULL,
+ packed_param_data, packed_param_size);
+ if (rc) {
+ pr_err("%s: send vdhe/vspe params failed with rc=%d\n",
+ __func__, rc);
+ rc = -EINVAL;
+ goto end;
}
end:
- kfree(param_val);
+ kfree(packed_param_data);
return rc;
}
int qti_set_custom_stereo_on(int port_id, int copp_idx,
bool is_custom_stereo_on)
{
-
+ struct custom_stereo_param custom_stereo = {0};
+ struct param_hdr_v3 param_hdr = {0};
uint16_t op_FL_ip_FL_weight;
uint16_t op_FL_ip_FR_weight;
uint16_t op_FR_ip_FL_weight;
uint16_t op_FR_ip_FR_weight;
-
- int32_t *update_params_value32 = NULL, rc = 0;
- int32_t *param_val = NULL;
- int16_t *update_params_value16 = 0;
- uint32_t params_length_bytes = CUSTOM_STEREO_PAYLOAD_SIZE *
- sizeof(uint32_t);
- uint32_t avail_length = params_length_bytes;
+ int rc = 0;
if ((port_id != SLIMBUS_0_RX) &&
(port_id != RT_PROXY_PORT_001_RX)) {
pr_debug("%s:No Custom stereo for port:0x%x\n",
__func__, port_id);
- goto skip_send_cmd;
+ return 0;
}
pr_debug("%s: port 0x%x, copp_idx %d, is_custom_stereo_on %d\n",
@@ -349,76 +369,49 @@ int qti_set_custom_stereo_on(int port_id, int copp_idx,
op_FR_ip_FR_weight = Q14_GAIN_UNITY;
}
- update_params_value32 = kzalloc(params_length_bytes, GFP_KERNEL);
- if (!update_params_value32) {
- pr_err("%s, params memory alloc failed\n", __func__);
- rc = -ENOMEM;
- goto skip_send_cmd;
- }
- param_val = update_params_value32;
- if (avail_length < 2 * sizeof(uint32_t))
- goto skip_send_cmd;
- *update_params_value32++ = MTMX_MODULE_ID_DEFAULT_CHMIXER;
- *update_params_value32++ = DEFAULT_CHMIXER_PARAM_ID_COEFF;
- avail_length = avail_length - (2 * sizeof(uint32_t));
-
- update_params_value16 = (int16_t *)update_params_value32;
- if (avail_length < 10 * sizeof(uint16_t))
- goto skip_send_cmd;
- *update_params_value16++ = CUSTOM_STEREO_CMD_PARAM_SIZE;
- /* for alignment only*/
- *update_params_value16++ = 0;
+ param_hdr.module_id = MTMX_MODULE_ID_DEFAULT_CHMIXER;
+ param_hdr.instance_id = INSTANCE_ID_0;
+ param_hdr.param_id = DEFAULT_CHMIXER_PARAM_ID_COEFF;
+ param_hdr.param_size = sizeof(struct custom_stereo_param);
+
/* index is 32-bit param in little endian*/
- *update_params_value16++ = CUSTOM_STEREO_INDEX_PARAM;
- *update_params_value16++ = 0;
+ custom_stereo.index = CUSTOM_STEREO_INDEX_PARAM;
+ custom_stereo.reserved = 0;
/* for stereo mixing num out ch*/
- *update_params_value16++ = CUSTOM_STEREO_NUM_OUT_CH;
+ custom_stereo.num_out_ch = CUSTOM_STEREO_NUM_OUT_CH;
/* for stereo mixing num in ch*/
- *update_params_value16++ = CUSTOM_STEREO_NUM_IN_CH;
+ custom_stereo.num_in_ch = CUSTOM_STEREO_NUM_IN_CH;
/* Out ch map FL/FR*/
- *update_params_value16++ = PCM_CHANNEL_FL;
- *update_params_value16++ = PCM_CHANNEL_FR;
+ custom_stereo.out_fl = PCM_CHANNEL_FL;
+ custom_stereo.out_fr = PCM_CHANNEL_FR;
/* In ch map FL/FR*/
- *update_params_value16++ = PCM_CHANNEL_FL;
- *update_params_value16++ = PCM_CHANNEL_FR;
- avail_length = avail_length - (10 * sizeof(uint16_t));
+ custom_stereo.in_fl = PCM_CHANNEL_FL;
+ custom_stereo.in_fr = PCM_CHANNEL_FR;
+
/* weighting coefficients as name suggests,
mixing will be done according to these coefficients*/
- if (avail_length < 4 * sizeof(uint16_t))
- goto skip_send_cmd;
- *update_params_value16++ = op_FL_ip_FL_weight;
- *update_params_value16++ = op_FL_ip_FR_weight;
- *update_params_value16++ = op_FR_ip_FL_weight;
- *update_params_value16++ = op_FR_ip_FR_weight;
- avail_length = avail_length - (4 * sizeof(uint16_t));
- if (params_length_bytes != 0) {
- rc = adm_dolby_dap_send_params(port_id, copp_idx,
- (char *)param_val,
- params_length_bytes);
- if (rc) {
- pr_err("%s: send params failed rc=%d\n", __func__, rc);
- rc = -EINVAL;
- goto skip_send_cmd;
- }
+ custom_stereo.op_FL_ip_FL_weight = op_FL_ip_FL_weight;
+ custom_stereo.op_FL_ip_FR_weight = op_FL_ip_FR_weight;
+ custom_stereo.op_FR_ip_FL_weight = op_FR_ip_FL_weight;
+ custom_stereo.op_FR_ip_FR_weight = op_FR_ip_FR_weight;
+ rc = adm_pack_and_set_one_pp_param(port_id, copp_idx, param_hdr,
+ (u8 *) &custom_stereo);
+ if (rc) {
+ pr_err("%s: send params failed rc=%d\n", __func__, rc);
+ return -EINVAL;
}
- kfree(param_val);
+
return 0;
-skip_send_cmd:
- pr_err("%s: insufficient memory, send cmd failed\n",
- __func__);
- kfree(param_val);
- return rc;
}
static int dap_set_custom_stereo_onoff(int dev_map_idx,
bool is_custom_stereo_enabled)
{
+ uint32_t enable = is_custom_stereo_enabled ? 1 : 0;
+ struct param_hdr_v3 param_hdr = {0};
+ int rc = 0;
- int32_t *update_params_value = NULL, rc = 0;
- int32_t *param_val = NULL;
- uint32_t params_length_bytes = (TOTAL_LENGTH_DOLBY_PARAM +
- DOLBY_PARAM_PAYLOAD_SIZE) * sizeof(uint32_t);
if ((dev_map[dev_map_idx].port_id != SLIMBUS_0_RX) &&
(dev_map[dev_map_idx].port_id != RT_PROXY_PORT_001_RX)) {
pr_debug("%s:No Custom stereo for port:0x%x\n",
@@ -435,38 +428,21 @@ static int dap_set_custom_stereo_onoff(int dev_map_idx,
/* DAP custom stereo */
msm_ds2_dap_set_vspe_vdhe(dev_map_idx,
is_custom_stereo_enabled);
- update_params_value = kzalloc(params_length_bytes, GFP_KERNEL);
- if (!update_params_value) {
- pr_err("%s: params memory alloc failed\n", __func__);
- rc = -ENOMEM;
+ param_hdr.module_id = DOLBY_BUNDLE_MODULE_ID;
+ param_hdr.instance_id = INSTANCE_ID_0;
+ param_hdr.param_id = DOLBY_ENABLE_CUSTOM_STEREO;
+ param_hdr.param_size = sizeof(enable);
+
+ rc = adm_pack_and_set_one_pp_param(dev_map[dev_map_idx].port_id,
+ dev_map[dev_map_idx].copp_idx,
+ param_hdr, (u8 *) &enable);
+ if (rc) {
+ pr_err("%s: set custom stereo enable failed with rc=%d\n",
+ __func__, rc);
+ rc = -EINVAL;
goto end;
}
- params_length_bytes = 0;
- param_val = update_params_value;
- *update_params_value++ = DOLBY_BUNDLE_MODULE_ID;
- *update_params_value++ = DOLBY_ENABLE_CUSTOM_STEREO;
- *update_params_value++ = sizeof(uint32_t);
- if (is_custom_stereo_enabled)
- *update_params_value++ = 1;
- else
- *update_params_value++ = 0;
- params_length_bytes += (DOLBY_PARAM_PAYLOAD_SIZE + 1) *
- sizeof(uint32_t);
- pr_debug("%s: valid param length: %d\n", __func__, params_length_bytes);
- if (params_length_bytes) {
- rc = adm_dolby_dap_send_params(dev_map[dev_map_idx].port_id,
- dev_map[dev_map_idx].copp_idx,
- (char *)param_val,
- params_length_bytes);
- if (rc) {
- pr_err("%s: custom stereo param failed with rc=%d\n",
- __func__, rc);
- rc = -EINVAL;
- goto end;
- }
- }
end:
- kfree(param_val);
return rc;
}
@@ -654,8 +630,11 @@ static int msm_ds2_dap_init_modules_in_topology(int dev_map_idx)
{
int rc = 0, i = 0, port_id, copp_idx;
/* Account for 32 bit interger allocation */
- int32_t param_sz = (ADM_GET_TOPO_MODULE_LIST_LENGTH / sizeof(uint32_t));
+ int32_t param_sz =
+ (ADM_GET_TOPO_MODULE_INSTANCE_LIST_LENGTH / sizeof(uint32_t));
int32_t *update_param_val = NULL;
+ struct module_instance_info mod_inst_info = {0};
+ int mod_inst_info_sz = 0;
if (dev_map_idx < 0 || dev_map_idx >= DS2_DEVICES_ALL) {
pr_err("%s: invalid dev map index %d\n", __func__, dev_map_idx);
@@ -666,7 +645,8 @@ static int msm_ds2_dap_init_modules_in_topology(int dev_map_idx)
port_id = dev_map[dev_map_idx].port_id;
copp_idx = dev_map[dev_map_idx].copp_idx;
pr_debug("%s: port_id 0x%x copp_idx %d\n", __func__, port_id, copp_idx);
- update_param_val = kzalloc(ADM_GET_TOPO_MODULE_LIST_LENGTH, GFP_KERNEL);
+ update_param_val =
+ kzalloc(ADM_GET_TOPO_MODULE_INSTANCE_LIST_LENGTH, GFP_KERNEL);
if (!update_param_val) {
pr_err("%s, param memory alloc failed\n", __func__);
rc = -ENOMEM;
@@ -675,9 +655,10 @@ static int msm_ds2_dap_init_modules_in_topology(int dev_map_idx)
if (!ds2_dap_params_states.dap_bypass) {
/* get modules from dsp */
- rc = adm_get_pp_topo_module_list(port_id, copp_idx,
- ADM_GET_TOPO_MODULE_LIST_LENGTH,
- (char *)update_param_val);
+ rc = adm_get_pp_topo_module_list_v2(
+ port_id, copp_idx,
+ ADM_GET_TOPO_MODULE_INSTANCE_LIST_LENGTH,
+ update_param_val);
if (rc < 0) {
pr_err("%s:topo list port %d, err %d,copp_idx %d\n",
__func__, port_id, copp_idx, rc);
@@ -691,11 +672,15 @@ static int msm_ds2_dap_init_modules_in_topology(int dev_map_idx)
rc = -EINVAL;
goto end;
}
+
+ mod_inst_info_sz = sizeof(struct module_instance_info) /
+ sizeof(uint32_t);
/* Turn off modules */
- for (i = 1; i < update_param_val[0]; i++) {
+ for (i = 1; i < update_param_val[0] * mod_inst_info_sz;
+ i += mod_inst_info_sz) {
if (!msm_ds2_dap_can_enable_module(
- update_param_val[i]) ||
- (update_param_val[i] == DS2_MODULE_ID)) {
+ update_param_val[i]) ||
+ (update_param_val[i] == DS2_MODULE_ID)) {
pr_debug("%s: Do not enable/disable %d\n",
__func__, update_param_val[i]);
continue;
@@ -703,15 +688,21 @@ static int msm_ds2_dap_init_modules_in_topology(int dev_map_idx)
pr_debug("%s: param disable %d\n",
__func__, update_param_val[i]);
- adm_param_enable(port_id, copp_idx, update_param_val[i],
- MODULE_DISABLE);
+ memcpy(&mod_inst_info, &update_param_val[i],
+ sizeof(mod_inst_info));
+ adm_param_enable_v2(port_id, copp_idx,
+ mod_inst_info,
+ MODULE_DISABLE);
}
} else {
msm_ds2_dap_send_cal_data(dev_map_idx);
}
- adm_param_enable(port_id, copp_idx, DS2_MODULE_ID,
- !ds2_dap_params_states.dap_bypass);
+
+ mod_inst_info.module_id = DS2_MODULE_ID;
+ mod_inst_info.instance_id = INSTANCE_ID_0;
+ adm_param_enable_v2(port_id, copp_idx, mod_inst_info,
+ !ds2_dap_params_states.dap_bypass);
end:
kfree(update_param_val);
return rc;
@@ -884,17 +875,21 @@ static int msm_ds2_dap_handle_bypass(struct dolby_param_data *dolby_data)
{
int rc = 0, i = 0, j = 0;
/*Account for 32 bit interger allocation */
- int32_t param_sz = (ADM_GET_TOPO_MODULE_LIST_LENGTH / sizeof(uint32_t));
+ int32_t param_sz =
+ (ADM_GET_TOPO_MODULE_INSTANCE_LIST_LENGTH / sizeof(uint32_t));
int32_t *mod_list = NULL;
int port_id = 0, copp_idx = -1;
bool cs_onoff = ds2_dap_params_states.custom_stereo_onoff;
int ramp_wait = DOLBY_SOFT_VOLUME_PERIOD;
+ struct module_instance_info mod_inst_info = {0};
+ int mod_inst_info_sz = 0;
pr_debug("%s: bypass type %d bypass %d custom stereo %d\n", __func__,
ds2_dap_params_states.dap_bypass_type,
ds2_dap_params_states.dap_bypass,
ds2_dap_params_states.custom_stereo_onoff);
- mod_list = kzalloc(ADM_GET_TOPO_MODULE_LIST_LENGTH, GFP_KERNEL);
+ mod_list =
+ kzalloc(ADM_GET_TOPO_MODULE_INSTANCE_LIST_LENGTH, GFP_KERNEL);
if (!mod_list) {
pr_err("%s: param memory alloc failed\n", __func__);
rc = -ENOMEM;
@@ -921,9 +916,10 @@ static int msm_ds2_dap_handle_bypass(struct dolby_param_data *dolby_data)
}
/* getmodules from dsp */
- rc = adm_get_pp_topo_module_list(port_id, copp_idx,
- ADM_GET_TOPO_MODULE_LIST_LENGTH,
- (char *)mod_list);
+ rc = adm_get_pp_topo_module_list_v2(
+ port_id, copp_idx,
+ ADM_GET_TOPO_MODULE_INSTANCE_LIST_LENGTH,
+ mod_list);
if (rc < 0) {
pr_err("%s:adm get topo list port %d",
__func__, port_id);
@@ -975,8 +971,11 @@ static int msm_ds2_dap_handle_bypass(struct dolby_param_data *dolby_data)
/* if dap bypass is set */
if (ds2_dap_params_states.dap_bypass) {
/* Turn off dap module */
- adm_param_enable(port_id, copp_idx,
- DS2_MODULE_ID, MODULE_DISABLE);
+ mod_inst_info.module_id = DS2_MODULE_ID;
+ mod_inst_info.instance_id = INSTANCE_ID_0;
+ adm_param_enable_v2(port_id, copp_idx,
+ mod_inst_info,
+ MODULE_DISABLE);
/*
* If custom stereo is on at the time of bypass,
* switch off custom stereo on dap and turn on
@@ -999,8 +998,13 @@ static int msm_ds2_dap_handle_bypass(struct dolby_param_data *dolby_data)
copp_idx, rc);
}
}
+
+ mod_inst_info_sz =
+ sizeof(struct module_instance_info) /
+ sizeof(uint32_t);
/* Turn on qti modules */
- for (j = 1; j < mod_list[0]; j++) {
+ for (j = 1; j < mod_list[0] * mod_inst_info_sz;
+ j += mod_inst_info_sz) {
if (!msm_ds2_dap_can_enable_module(
mod_list[j]) ||
mod_list[j] ==
@@ -1008,9 +1012,11 @@ static int msm_ds2_dap_handle_bypass(struct dolby_param_data *dolby_data)
continue;
pr_debug("%s: param enable %d\n",
__func__, mod_list[j]);
- adm_param_enable(port_id, copp_idx,
- mod_list[j],
- MODULE_ENABLE);
+ memcpy(&mod_inst_info, &mod_list[j],
+ sizeof(mod_inst_info));
+ adm_param_enable_v2(port_id, copp_idx,
+ mod_inst_info,
+ MODULE_ENABLE);
}
/* Add adm api to resend calibration on port */
@@ -1025,7 +1031,8 @@ static int msm_ds2_dap_handle_bypass(struct dolby_param_data *dolby_data)
}
} else {
/* Turn off qti modules */
- for (j = 1; j < mod_list[0]; j++) {
+ for (j = 1; j < mod_list[0] * mod_inst_info_sz;
+ j += mod_inst_info_sz) {
if (!msm_ds2_dap_can_enable_module(
mod_list[j]) ||
mod_list[j] ==
@@ -1033,15 +1040,20 @@ static int msm_ds2_dap_handle_bypass(struct dolby_param_data *dolby_data)
continue;
pr_debug("%s: param disable %d\n",
__func__, mod_list[j]);
- adm_param_enable(port_id, copp_idx,
- mod_list[j],
- MODULE_DISABLE);
+ memcpy(&mod_inst_info, &mod_list[j],
+ sizeof(mod_inst_info));
+ adm_param_enable_v2(port_id, copp_idx,
+ mod_inst_info,
+ MODULE_DISABLE);
}
/* Enable DAP modules */
pr_debug("%s:DS2 param enable\n", __func__);
- adm_param_enable(port_id, copp_idx,
- DS2_MODULE_ID, MODULE_ENABLE);
+ mod_inst_info.module_id = DS2_MODULE_ID;
+ mod_inst_info.instance_id = INSTANCE_ID_0;
+ adm_param_enable_v2(port_id, copp_idx,
+ mod_inst_info,
+ MODULE_ENABLE);
/*
* If custom stereo is on at the time of dap on,
* switch off custom stereo on qti channel mixer
@@ -1100,13 +1112,12 @@ end:
static int msm_ds2_dap_send_end_point(int dev_map_idx, int endp_idx)
{
- int rc = 0;
- int32_t *update_params_value = NULL, *params_value = NULL;
- uint32_t params_length = (DOLBY_PARAM_INT_ENDP_LENGTH +
- DOLBY_PARAM_PAYLOAD_SIZE) * sizeof(uint32_t);
+ uint32_t offset = 0;
+ struct param_hdr_v3 param_hdr = {0};
int cache_device = 0;
struct ds2_dap_params_s *ds2_ap_params_obj = NULL;
int32_t *modified_param = NULL;
+ int rc = 0;
if (dev_map_idx < 0 || dev_map_idx >= DS2_DEVICES_ALL) {
pr_err("%s: invalid dev map index %d\n", __func__, dev_map_idx);
@@ -1121,13 +1132,6 @@ static int msm_ds2_dap_send_end_point(int dev_map_idx, int endp_idx)
pr_debug("%s: endp - %pK %pK\n", __func__,
&ds2_dap_params[cache_device], ds2_ap_params_obj);
- params_value = kzalloc(params_length, GFP_KERNEL);
- if (!params_value) {
- pr_err("%s: params memory alloc failed\n", __func__);
- rc = -ENOMEM;
- goto end;
- }
-
if (dev_map[dev_map_idx].port_id == DOLBY_INVALID_PORT_ID) {
pr_err("%s: invalid port\n", __func__);
rc = -EINVAL;
@@ -1141,21 +1145,20 @@ static int msm_ds2_dap_send_end_point(int dev_map_idx, int endp_idx)
goto end;
}
- update_params_value = params_value;
- *update_params_value++ = DOLBY_BUNDLE_MODULE_ID;
- *update_params_value++ = DOLBY_PARAM_ID_INIT_ENDP;
- *update_params_value++ = DOLBY_PARAM_INT_ENDP_LENGTH * sizeof(uint32_t);
- *update_params_value++ = ds2_ap_params_obj->params_val[
- ds2_dap_params_offset[endp_idx]];
+ param_hdr.module_id = DOLBY_BUNDLE_MODULE_ID;
+ param_hdr.instance_id = INSTANCE_ID_0;
+ param_hdr.param_id = DOLBY_PARAM_ID_INIT_ENDP;
+ param_hdr.param_size = sizeof(offset);
+ offset = ds2_ap_params_obj->params_val[ds2_dap_params_offset[endp_idx]];
pr_debug("%s: off %d, length %d\n", __func__,
ds2_dap_params_offset[endp_idx],
ds2_dap_params_length[endp_idx]);
pr_debug("%s: param 0x%x, param val %d\n", __func__,
ds2_dap_params_id[endp_idx], ds2_ap_params_obj->
params_val[ds2_dap_params_offset[endp_idx]]);
- rc = adm_dolby_dap_send_params(dev_map[dev_map_idx].port_id,
- dev_map[dev_map_idx].copp_idx,
- (char *)params_value, params_length);
+ rc = adm_pack_and_set_one_pp_param(dev_map[dev_map_idx].port_id,
+ dev_map[dev_map_idx].copp_idx,
+ param_hdr, (u8 *) &offset);
if (rc) {
pr_err("%s: send dolby params failed rc %d\n", __func__, rc);
rc = -EINVAL;
@@ -1172,19 +1175,17 @@ static int msm_ds2_dap_send_end_point(int dev_map_idx, int endp_idx)
ds2_ap_params_obj->dap_params_modified[endp_idx] = 0x00010001;
end:
- kfree(params_value);
return rc;
}
static int msm_ds2_dap_send_cached_params(int dev_map_idx,
int commit)
{
- int32_t *update_params_value = NULL, *params_value = NULL;
- uint32_t idx, i, j, ret = 0;
- uint32_t params_length = (TOTAL_LENGTH_DOLBY_PARAM +
- (MAX_DS2_PARAMS - 1) *
- DOLBY_PARAM_PAYLOAD_SIZE) *
- sizeof(uint32_t);
+ uint8_t *packed_params = NULL;
+ uint32_t packed_params_size = 0;
+ uint32_t param_size = 0;
+ struct param_hdr_v3 param_hdr = {0};
+ uint32_t idx, i, ret = 0;
int cache_device = 0;
struct ds2_dap_params_s *ds2_ap_params_obj = NULL;
int32_t *modified_param = NULL;
@@ -1207,12 +1208,16 @@ static int msm_ds2_dap_send_cached_params(int dev_map_idx,
pr_debug("%s: cached param - %pK %pK, cache_device %d\n", __func__,
&ds2_dap_params[cache_device], ds2_ap_params_obj,
cache_device);
- params_value = kzalloc(params_length, GFP_KERNEL);
- if (!params_value) {
- pr_err("%s: params memory alloc failed\n", __func__);
- ret = -ENOMEM;
- goto end;
- }
+
+ /*
+ * Allocate the max space needed. This is enough space to hold the
+ * header for each param plus the total size of all the params.
+ */
+ packed_params_size = (sizeof(param_hdr) * (MAX_DS2_PARAMS - 1)) +
+ (TOTAL_LENGTH_DOLBY_PARAM * sizeof(uint32_t));
+ packed_params = kzalloc(packed_params_size, GFP_KERNEL);
+ if (!packed_params)
+ return -ENOMEM;
if (dev_map[dev_map_idx].port_id == DOLBY_INVALID_PORT_ID) {
pr_err("%s: invalid port id\n", __func__);
@@ -1227,8 +1232,7 @@ static int msm_ds2_dap_send_cached_params(int dev_map_idx,
goto end;
}
- update_params_value = params_value;
- params_length = 0;
+ packed_params_size = 0;
for (i = 0; i < (MAX_DS2_PARAMS-1); i++) {
/*get the pointer to the param modified array in the cache*/
modified_param = ds2_ap_params_obj->dap_params_modified;
@@ -1241,28 +1245,33 @@ static int msm_ds2_dap_send_cached_params(int dev_map_idx,
if (!msm_ds2_dap_check_is_param_modified(modified_param, i,
commit))
continue;
- *update_params_value++ = DOLBY_BUNDLE_MODULE_ID;
- *update_params_value++ = ds2_dap_params_id[i];
- *update_params_value++ = ds2_dap_params_length[i] *
- sizeof(uint32_t);
+
+ param_hdr.module_id = DOLBY_BUNDLE_MODULE_ID;
+ param_hdr.instance_id = INSTANCE_ID_0;
+ param_hdr.param_id = ds2_dap_params_id[i];
+ param_hdr.param_size =
+ ds2_dap_params_length[i] * sizeof(uint32_t);
+
idx = ds2_dap_params_offset[i];
- for (j = 0; j < ds2_dap_params_length[i]; j++) {
- *update_params_value++ =
- ds2_ap_params_obj->params_val[idx+j];
- pr_debug("%s: id 0x%x,val %d\n", __func__,
- ds2_dap_params_id[i],
- ds2_ap_params_obj->params_val[idx+j]);
+ ret = q6common_pack_pp_params(
+ packed_params + packed_params_size, &param_hdr,
+ (u8 *) &ds2_ap_params_obj->params_val[idx],
+ &param_size);
+ if (ret) {
+ pr_err("%s: Failed to pack params, error %d\n",
+ __func__, ret);
+ goto end;
}
- params_length += (DOLBY_PARAM_PAYLOAD_SIZE +
- ds2_dap_params_length[i]) * sizeof(uint32_t);
+
+ packed_params_size += param_size;
}
- pr_debug("%s: valid param length: %d\n", __func__, params_length);
- if (params_length) {
- ret = adm_dolby_dap_send_params(dev_map[dev_map_idx].port_id,
- dev_map[dev_map_idx].copp_idx,
- (char *)params_value,
- params_length);
+ pr_debug("%s: total packed param length: %d\n", __func__,
+ packed_params_size);
+ if (packed_params_size) {
+ ret = adm_set_pp_params(dev_map[dev_map_idx].port_id,
+ dev_map[dev_map_idx].copp_idx, NULL,
+ packed_params, packed_params_size);
if (ret) {
pr_err("%s: send dolby params failed ret %d\n",
__func__, ret);
@@ -1285,7 +1294,7 @@ static int msm_ds2_dap_send_cached_params(int dev_map_idx,
}
}
end:
- kfree(params_value);
+ kfree(packed_params);
return ret;
}
@@ -1522,11 +1531,12 @@ static int msm_ds2_dap_get_param(u32 cmd, void *arg)
{
int rc = 0, i, port_id = 0, copp_idx = -1;
struct dolby_param_data *dolby_data = (struct dolby_param_data *)arg;
- int32_t *update_params_value = NULL, *params_value = NULL;
+ int32_t *params_value = NULL;
uint32_t params_length = DOLBY_MAX_LENGTH_INDIVIDUAL_PARAM *
sizeof(uint32_t);
uint32_t param_payload_len =
DOLBY_PARAM_PAYLOAD_SIZE * sizeof(uint32_t);
+ struct param_hdr_v3 param_hdr;
/* Return error on get param in soft or hard bypass */
if (ds2_dap_params_states.dap_bypass == true) {
@@ -1572,18 +1582,14 @@ static int msm_ds2_dap_get_param(u32 cmd, void *arg)
params_value = kzalloc(params_length + param_payload_len,
GFP_KERNEL);
- if (!params_value) {
- pr_err("%s: params memory alloc failed\n", __func__);
- rc = -ENOMEM;
- goto end;
- }
+ if (!params_value)
+ return -ENOMEM;
if (dolby_data->param_id == DOLBY_PARAM_ID_VER) {
- rc = adm_get_params(port_id, copp_idx,
- DOLBY_BUNDLE_MODULE_ID,
- DOLBY_PARAM_ID_VER,
- params_length + param_payload_len,
- (char *)params_value);
+ param_hdr.module_id = DOLBY_BUNDLE_MODULE_ID;
+ param_hdr.instance_id = INSTANCE_ID_0;
+ param_hdr.param_id = DOLBY_PARAM_ID_VER;
+ param_hdr.param_size = params_length + param_payload_len;
} else {
for (i = 0; i < MAX_DS2_PARAMS; i++)
if (ds2_dap_params_id[i] ==
@@ -1596,25 +1602,25 @@ static int msm_ds2_dap_get_param(u32 cmd, void *arg)
goto end;
} else {
params_length =
- ds2_dap_params_length[i] * sizeof(uint32_t);
+ ds2_dap_params_length[i] * sizeof(uint32_t);
- rc = adm_get_params(port_id, copp_idx,
- DOLBY_BUNDLE_MODULE_ID,
- ds2_dap_params_id[i],
- params_length +
- param_payload_len,
- (char *)params_value);
+ param_hdr.module_id = DOLBY_BUNDLE_MODULE_ID;
+ param_hdr.instance_id = INSTANCE_ID_0;
+ param_hdr.param_id = ds2_dap_params_id[i];
+ param_hdr.param_size =
+ params_length + param_payload_len;
}
}
+ rc = adm_get_pp_params(port_id, copp_idx, ADM_CLIENT_ID_DEFAULT, NULL,
+ &param_hdr, (u8 *) params_value);
if (rc) {
pr_err("%s: get parameters failed rc %d\n", __func__, rc);
rc = -EINVAL;
goto end;
}
- update_params_value = params_value;
- if (copy_to_user((void *)dolby_data->data,
- &update_params_value[DOLBY_PARAM_PAYLOAD_SIZE],
- (dolby_data->length * sizeof(uint32_t)))) {
+ if (copy_to_user((void __user *) dolby_data->data,
+ &params_value[DOLBY_PARAM_PAYLOAD_SIZE],
+ (dolby_data->length * sizeof(uint32_t)))) {
pr_err("%s: error getting param\n", __func__);
rc = -EFAULT;
goto end;
@@ -1633,6 +1639,7 @@ static int msm_ds2_dap_param_visualizer_control_get(u32 cmd, void *arg)
uint32_t offset, length, params_length;
uint32_t param_payload_len =
DOLBY_PARAM_PAYLOAD_SIZE * sizeof(uint32_t);
+ struct param_hdr_v3 param_hdr = {0};
for (i = 0; i < DS2_DEVICES_ALL; i++) {
if ((dev_map[i].active)) {
@@ -1683,11 +1690,13 @@ static int msm_ds2_dap_param_visualizer_control_get(u32 cmd, void *arg)
offset = 0;
params_length = length * sizeof(uint32_t);
- ret = adm_get_params(port_id, copp_idx,
- DOLBY_BUNDLE_MODULE_ID,
- DOLBY_PARAM_ID_VCBG,
- params_length + param_payload_len,
- (((char *)(visualizer_data)) + offset));
+ param_hdr.module_id = DOLBY_BUNDLE_MODULE_ID;
+ param_hdr.instance_id = INSTANCE_ID_0;
+ param_hdr.param_id = DOLBY_PARAM_ID_VCBG;
+ param_hdr.param_size = length * sizeof(uint32_t) + param_payload_len;
+ ret = adm_get_pp_params(port_id, copp_idx, ADM_CLIENT_ID_DEFAULT, NULL,
+ &param_hdr,
+ (((char *) (visualizer_data)) + offset));
if (ret) {
pr_err("%s: get parameters failed ret %d\n", __func__, ret);
ret = -EINVAL;
@@ -1695,11 +1704,13 @@ static int msm_ds2_dap_param_visualizer_control_get(u32 cmd, void *arg)
goto end;
}
offset = length * sizeof(uint32_t);
- ret = adm_get_params(port_id, copp_idx,
- DOLBY_BUNDLE_MODULE_ID,
- DOLBY_PARAM_ID_VCBE,
- params_length + param_payload_len,
- (((char *)(visualizer_data)) + offset));
+ param_hdr.module_id = DOLBY_BUNDLE_MODULE_ID;
+ param_hdr.instance_id = INSTANCE_ID_0;
+ param_hdr.param_id = DOLBY_PARAM_ID_VCBE;
+ param_hdr.param_size = length * sizeof(uint32_t) + param_payload_len;
+ ret = adm_get_pp_params(port_id, copp_idx, ADM_CLIENT_ID_DEFAULT, NULL,
+ &param_hdr,
+ (((char *) (visualizer_data)) + offset));
if (ret) {
pr_err("%s: get parameters failed ret %d\n", __func__, ret);
ret = -EINVAL;
diff --git a/sound/soc/msm/qdsp6v2/msm-ds2-dap-config.h b/sound/soc/msm/qdsp6v2/msm-ds2-dap-config.h
index 0eb6017fd383..c2687017c962 100644
--- a/sound/soc/msm/qdsp6v2/msm-ds2-dap-config.h
+++ b/sound/soc/msm/qdsp6v2/msm-ds2-dap-config.h
@@ -1,4 +1,5 @@
-/* Copyright (c) 2013-2014, 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2014, 2016-2017, The Linux Foundation. All rights
+ * reserved.
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
@@ -32,7 +33,6 @@ struct dolby_param_license32 {
compat_uptr_t license_key;
};
-
#define SNDRV_DEVDEP_DAP_IOCTL_SET_PARAM32\
_IOWR('U', 0x10, struct dolby_param_data32)
#define SNDRV_DEVDEP_DAP_IOCTL_GET_PARAM32\
@@ -62,6 +62,34 @@ enum {
DAP_CMD_SET_BYPASS_TYPE = 5,
};
+struct custom_stereo_param {
+ /* Index is 32-bit param in little endian */
+ u16 index;
+ u16 reserved;
+
+ /* For stereo mixing, the number of out channels */
+ u16 num_out_ch;
+ /* For stereo mixing, the number of in channels */
+ u16 num_in_ch;
+
+ /* Out channel map FL/FR*/
+ u16 out_fl;
+ u16 out_fr;
+
+ /* In channel map FL/FR*/
+ u16 in_fl;
+ u16 in_fr;
+
+ /*
+ * Weighting coefficients. Mixing will be done according to
+ * these coefficients.
+ */
+ u16 op_FL_ip_FL_weight;
+ u16 op_FL_ip_FR_weight;
+ u16 op_FR_ip_FL_weight;
+ u16 op_FR_ip_FR_weight;
+};
+
#define DOLBY_PARAM_INT_ENDP_LENGTH 1
#define DOLBY_PARAM_INT_ENDP_OFFSET (DOLBY_PARAM_PSTG_OFFSET + \
DOLBY_PARAM_PSTG_LENGTH)
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c
index 0d01803e634d..7e022619c097 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c
@@ -183,6 +183,11 @@ static void event_handler(uint32_t opcode,
case ASM_DATA_EVENT_READ_DONE_V2: {
pr_debug("ASM_DATA_EVENT_READ_DONE_V2\n");
buf_index = q6asm_get_buf_index_from_token(token);
+ if (buf_index >= CAPTURE_MAX_NUM_PERIODS) {
+ pr_err("%s: buffer index %u is out of range.\n",
+ __func__, buf_index);
+ return;
+ }
pr_debug("%s: token=0x%08x buf_index=0x%08x\n",
__func__, token, buf_index);
prtd->in_frame_info[buf_index].size = payload[4];
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
index 70531872076b..2dba05df40e0 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
@@ -28,6 +28,7 @@
#include <sound/q6adm-v2.h>
#include <sound/q6asm-v2.h>
#include <sound/q6afe-v2.h>
+#include <sound/q6common.h>
#include <sound/tlv.h>
#include <sound/asound.h>
#include <sound/pcm_params.h>
@@ -11383,22 +11384,23 @@ int msm_routing_get_rms_value_control(struct snd_kcontrol *kcontrol,
int be_idx = 0;
char *param_value;
int *update_param_value;
- uint32_t param_length = sizeof(uint32_t);
- uint32_t param_payload_len = RMS_PAYLOAD_LEN * sizeof(uint32_t);
- param_value = kzalloc(param_length + param_payload_len, GFP_KERNEL);
- if (!param_value) {
- pr_err("%s, param memory alloc failed\n", __func__);
+ uint32_t param_size = (RMS_PAYLOAD_LEN + 1) * sizeof(uint32_t);
+ struct param_hdr_v3 param_hdr = {0};
+
+ param_value = kzalloc(param_size, GFP_KERNEL);
+ if (!param_value)
return -ENOMEM;
- }
+
for (be_idx = 0; be_idx < MSM_BACKEND_DAI_MAX; be_idx++)
if (msm_bedais[be_idx].port_id == SLIMBUS_0_TX)
break;
if ((be_idx < MSM_BACKEND_DAI_MAX) && msm_bedais[be_idx].active) {
- rc = adm_get_params(SLIMBUS_0_TX, 0,
- RMS_MODULEID_APPI_PASSTHRU,
- RMS_PARAM_FIRST_SAMPLE,
- param_length + param_payload_len,
- param_value);
+ param_hdr.module_id = RMS_MODULEID_APPI_PASSTHRU;
+ param_hdr.instance_id = INSTANCE_ID_0;
+ param_hdr.param_id = RMS_PARAM_FIRST_SAMPLE;
+ param_hdr.param_size = param_size;
+ rc = adm_get_pp_params(SLIMBUS_0_TX, 0, ADM_CLIENT_ID_DEFAULT,
+ NULL, &param_hdr, (u8 *) param_value);
if (rc) {
pr_err("%s: get parameters failed:%d\n", __func__, rc);
kfree(param_value);
@@ -16426,6 +16428,47 @@ static const struct snd_kcontrol_new stereo_channel_reverse_control[] = {
msm_routing_stereo_channel_reverse_control_put),
};
+static int msm_routing_instance_id_support_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
+ uinfo->count = 1;
+ return 0;
+}
+
+static int msm_routing_instance_id_support_put(
+ struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
+{
+ bool supported = ucontrol->value.integer.value[0] ? true : false;
+
+ q6common_update_instance_id_support(supported);
+
+ return 0;
+}
+
+static int msm_routing_instance_id_support_get(
+ struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
+{
+ bool supported = false;
+
+ supported = q6common_is_instance_id_supported();
+ ucontrol->value.integer.value[0] = supported ? 1 : 0;
+
+ return 0;
+}
+
+static const struct snd_kcontrol_new
+ msm_routing_feature_support_mixer_controls[] = {
+ {
+ .access = SNDRV_CTL_ELEM_ACCESS_READ |
+ SNDRV_CTL_ELEM_ACCESS_WRITE,
+ .info = msm_routing_instance_id_support_info,
+ .name = "Instance ID Support",
+ .put = msm_routing_instance_id_support_put,
+ .get = msm_routing_instance_id_support_get,
+ },
+};
+
static struct snd_pcm_ops msm_routing_pcm_ops = {
.hw_params = msm_pcm_routing_hw_params,
.close = msm_pcm_routing_close,
@@ -16585,6 +16628,10 @@ static int msm_routing_probe(struct snd_soc_platform *platform)
ARRAY_SIZE(aptx_dec_license_controls));
snd_soc_add_platform_controls(platform, stereo_channel_reverse_control,
ARRAY_SIZE(stereo_channel_reverse_control));
+ snd_soc_add_platform_controls(
+ platform, msm_routing_feature_support_mixer_controls,
+ ARRAY_SIZE(msm_routing_feature_support_mixer_controls));
+
return 0;
}
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.c
index f3ec45b8f9b1..76d8f8d9e33c 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.c
@@ -556,12 +556,14 @@ static int msm_voice_slowtalk_put(struct snd_kcontrol *kcontrol,
{
int st_enable = ucontrol->value.integer.value[0];
uint32_t session_id = ucontrol->value.integer.value[1];
+ struct module_instance_info mod_inst_info = {0};
pr_debug("%s: st enable=%d session_id=%#x\n", __func__, st_enable,
session_id);
- voc_set_pp_enable(session_id,
- MODULE_ID_VOICE_MODULE_ST, st_enable);
+ mod_inst_info.module_id = MODULE_ID_VOICE_MODULE_ST;
+ mod_inst_info.instance_id = INSTANCE_ID_0;
+ voc_set_pp_enable(session_id, mod_inst_info, st_enable);
return 0;
}
diff --git a/sound/soc/msm/qdsp6v2/msm-qti-pp-config.c b/sound/soc/msm/qdsp6v2/msm-qti-pp-config.c
index 65f5167d9dee..bcfb090d556b 100644
--- a/sound/soc/msm/qdsp6v2/msm-qti-pp-config.c
+++ b/sound/soc/msm/qdsp6v2/msm-qti-pp-config.c
@@ -18,6 +18,7 @@
#include <sound/q6adm-v2.h>
#include <sound/q6asm-v2.h>
#include <sound/q6afe-v2.h>
+#include <sound/q6common.h>
#include <sound/asound.h>
#include <sound/q6audio-v2.h>
#include <sound/tlv.h>
@@ -327,14 +328,13 @@ static int msm_qti_pp_get_rms_value_control(struct snd_kcontrol *kcontrol,
int be_idx = 0, copp_idx;
char *param_value;
int *update_param_value;
- uint32_t param_length = sizeof(uint32_t);
- uint32_t param_payload_len = RMS_PAYLOAD_LEN * sizeof(uint32_t);
+ uint32_t param_size = (RMS_PAYLOAD_LEN + 1) * sizeof(uint32_t);
struct msm_pcm_routing_bdai_data msm_bedai;
- param_value = kzalloc(param_length + param_payload_len, GFP_KERNEL);
- if (!param_value) {
- pr_err("%s, param memory alloc failed\n", __func__);
+ struct param_hdr_v3 param_hdr = {0};
+
+ param_value = kzalloc(param_size, GFP_KERNEL);
+ if (!param_value)
return -ENOMEM;
- }
msm_pcm_routing_acquire_lock();
for (be_idx = 0; be_idx < MSM_BACKEND_DAI_MAX; be_idx++) {
msm_pcm_routing_get_bedai_info(be_idx, &msm_bedai);
@@ -354,11 +354,12 @@ static int msm_qti_pp_get_rms_value_control(struct snd_kcontrol *kcontrol,
rc = -EINVAL;
goto get_rms_value_err;
}
- rc = adm_get_params(SLIMBUS_0_TX, copp_idx,
- RMS_MODULEID_APPI_PASSTHRU,
- RMS_PARAM_FIRST_SAMPLE,
- param_length + param_payload_len,
- param_value);
+ param_hdr.module_id = RMS_MODULEID_APPI_PASSTHRU;
+ param_hdr.instance_id = INSTANCE_ID_0;
+ param_hdr.param_id = RMS_PARAM_FIRST_SAMPLE;
+ param_hdr.param_size = param_size;
+ rc = adm_get_pp_params(SLIMBUS_0_TX, copp_idx, ADM_CLIENT_ID_DEFAULT,
+ NULL, &param_hdr, param_value);
if (rc) {
pr_err("%s: get parameters failed rc=%d\n", __func__, rc);
rc = -EINVAL;
@@ -655,64 +656,82 @@ static void msm_qti_pp_asphere_init_state(void)
static int msm_qti_pp_asphere_send_params(int port_id, int copp_idx, bool force)
{
- char *params_value = NULL;
- uint32_t *update_params_value = NULL;
- uint32_t param_size = sizeof(uint32_t) +
- sizeof(struct adm_param_data_v5);
- int params_length = 0, param_count = 0, ret = 0;
+ u8 *packed_params = NULL;
+ u32 packed_params_size = 0;
+ u32 param_size = 0;
+ struct param_hdr_v3 param_hdr = {0};
bool set_enable = force ||
(asphere_state.enabled != asphere_state.enabled_prev);
bool set_strength = asphere_state.enabled == 1 && (set_enable ||
(asphere_state.strength != asphere_state.strength_prev));
+ int param_count = 0;
+ int ret = 0;
if (set_enable)
param_count++;
if (set_strength)
param_count++;
- params_length = param_count * param_size;
+
+ if (param_count == 0) {
+ pr_debug("%s: Nothing to send, exiting\n", __func__);
+ return 0;
+ }
pr_debug("%s: port_id %d, copp_id %d, forced %d, param_count %d\n",
- __func__, port_id, copp_idx, force, param_count);
+ __func__, port_id, copp_idx, force, param_count);
pr_debug("%s: enable prev:%u cur:%u, strength prev:%u cur:%u\n",
__func__, asphere_state.enabled_prev, asphere_state.enabled,
asphere_state.strength_prev, asphere_state.strength);
- if (params_length > 0)
- params_value = kzalloc(params_length, GFP_KERNEL);
- if (!params_value) {
- pr_err("%s, params memory alloc failed\n", __func__);
+ packed_params_size =
+ param_count * (sizeof(struct param_hdr_v3) + sizeof(uint32_t));
+ packed_params = kzalloc(packed_params_size, GFP_KERNEL);
+ if (!packed_params)
return -ENOMEM;
- }
- update_params_value = (uint32_t *)params_value;
- params_length = 0;
+
+ packed_params_size = 0;
+ param_hdr.module_id = AUDPROC_MODULE_ID_AUDIOSPHERE;
+ param_hdr.instance_id = INSTANCE_ID_0;
if (set_strength) {
/* add strength command */
- *update_params_value++ = AUDPROC_MODULE_ID_AUDIOSPHERE;
- *update_params_value++ = AUDPROC_PARAM_ID_AUDIOSPHERE_STRENGTH;
- *update_params_value++ = sizeof(uint32_t);
- *update_params_value++ = asphere_state.strength;
- params_length += param_size;
+ param_hdr.param_id = AUDPROC_PARAM_ID_AUDIOSPHERE_STRENGTH;
+ param_hdr.param_size = sizeof(asphere_state.strength);
+ ret = q6common_pack_pp_params(packed_params +
+ packed_params_size,
+ &param_hdr,
+ (u8 *) &asphere_state.strength,
+ &param_size);
+ if (ret) {
+ pr_err("%s: Failed to pack params, error %d\n",
+ __func__, ret);
+ goto done;
+ }
+ packed_params_size += param_size;
}
if (set_enable) {
/* add enable command */
- *update_params_value++ = AUDPROC_MODULE_ID_AUDIOSPHERE;
- *update_params_value++ = AUDPROC_PARAM_ID_AUDIOSPHERE_ENABLE;
- *update_params_value++ = sizeof(uint32_t);
- *update_params_value++ = asphere_state.enabled;
- params_length += param_size;
- }
- pr_debug("%s, param length: %d\n", __func__, params_length);
- if (params_length) {
- ret = adm_send_params_v5(port_id, copp_idx,
- params_value, params_length);
+ param_hdr.param_id = AUDPROC_PARAM_ID_AUDIOSPHERE_ENABLE;
+ param_hdr.param_size = sizeof(asphere_state.enabled);
+ q6common_pack_pp_params(packed_params + packed_params_size,
+ &param_hdr,
+ (u8 *) &asphere_state.enabled,
+ &param_size);
if (ret) {
- pr_err("%s: setting param failed with err=%d\n",
- __func__, ret);
- kfree(params_value);
- return -EINVAL;
+ pr_err("%s: Failed to pack params, error %d\n",
+ __func__, ret);
+ goto done;
}
+ packed_params_size += param_size;
}
- kfree(params_value);
+
+ pr_debug("%s: packed data size: %d\n", __func__, packed_params_size);
+ ret = adm_set_pp_params(port_id, copp_idx, NULL, packed_params,
+ packed_params_size);
+ if (ret)
+ pr_err("%s: set param failed with err=%d\n", __func__, ret);
+
+done:
+ kfree(packed_params);
return 0;
}
diff --git a/sound/soc/msm/qdsp6v2/q6adm.c b/sound/soc/msm/qdsp6v2/q6adm.c
index 018681309f2e..dc66c5ad93d5 100644
--- a/sound/soc/msm/qdsp6v2/q6adm.c
+++ b/sound/soc/msm/qdsp6v2/q6adm.c
@@ -22,6 +22,7 @@
#include <sound/q6adm-v2.h>
#include <sound/q6audio-v2.h>
#include <sound/q6afe-v2.h>
+#include <sound/q6common.h>
#include <sound/audio_cal_utils.h>
#include <sound/asound.h>
#include "msm-dts-srs-tm-config.h"
@@ -32,8 +33,8 @@
#define RESET_COPP_ID 99
#define INVALID_COPP_ID 0xFF
/* Used for inband payload copy, max size is 4k */
-/* 2 is to account for module & param ID in payload */
-#define ADM_GET_PARAMETER_LENGTH (4096 - APR_HDR_SIZE - 2 * sizeof(uint32_t))
+/* 3 is to account for module, instance & param ID in payload */
+#define ADM_GET_PARAMETER_LENGTH (4096 - APR_HDR_SIZE - 3 * sizeof(uint32_t))
#define ULL_SUPPORTED_BITS_PER_SAMPLE 16
#define ULL_SUPPORTED_SAMPLE_RATE 48000
@@ -119,8 +120,8 @@ static struct adm_multi_ch_map multi_ch_maps[2] = {
};
static int adm_get_parameters[MAX_COPPS_PER_PORT * ADM_GET_PARAMETER_LENGTH];
-static int adm_module_topo_list[
- MAX_COPPS_PER_PORT * ADM_GET_TOPO_MODULE_LIST_LENGTH];
+static int adm_module_topo_list[MAX_COPPS_PER_PORT *
+ ADM_GET_TOPO_MODULE_INSTANCE_LIST_LENGTH];
int adm_validate_and_get_port_index(int port_id)
{
@@ -258,10 +259,12 @@ static int adm_get_next_available_copp(int port_idx)
int srs_trumedia_open(int port_id, int copp_idx, __s32 srs_tech_id,
void *srs_params)
{
- struct adm_cmd_set_pp_params_inband_v5 *adm_params = NULL;
- struct adm_cmd_set_pp_params_v5 *adm_params_ = NULL;
- __s32 sz = 0, param_id, module_id = SRS_TRUMEDIA_MODULE_ID, outband = 0;
- int ret = 0, port_idx;
+ struct param_hdr_v3 param_hdr = {0};
+ struct mem_mapping_hdr mem_hdr = {0};
+ u32 total_param_size = 0;
+ bool outband = false;
+ int port_idx;
+ int ret = 0;
pr_debug("SRS - %s", __func__);
@@ -271,246 +274,92 @@ int srs_trumedia_open(int port_id, int copp_idx, __s32 srs_tech_id,
pr_err("%s: Invalid port_id %#x\n", __func__, port_id);
return -EINVAL;
}
+
+ param_hdr.module_id = SRS_TRUMEDIA_MODULE_ID;
+ param_hdr.instance_id = INSTANCE_ID_0;
+
switch (srs_tech_id) {
case SRS_ID_GLOBAL: {
- struct srs_trumedia_params_GLOBAL *glb_params = NULL;
- sz = sizeof(struct adm_cmd_set_pp_params_inband_v5) +
+ param_hdr.param_id = SRS_TRUMEDIA_PARAMS;
+ param_hdr.param_size =
sizeof(struct srs_trumedia_params_GLOBAL);
- adm_params = kzalloc(sz, GFP_KERNEL);
- if (!adm_params) {
- pr_err("%s, adm params memory alloc failed\n",
- __func__);
- return -ENOMEM;
- }
- adm_params->payload_size =
- sizeof(struct srs_trumedia_params_GLOBAL) +
- sizeof(struct adm_param_data_v5);
- param_id = SRS_TRUMEDIA_PARAMS;
- adm_params->params.param_size =
- sizeof(struct srs_trumedia_params_GLOBAL);
- glb_params = (struct srs_trumedia_params_GLOBAL *)
- ((u8 *)adm_params +
- sizeof(struct adm_cmd_set_pp_params_inband_v5));
- memcpy(glb_params, srs_params,
- sizeof(struct srs_trumedia_params_GLOBAL));
break;
}
case SRS_ID_WOWHD: {
- struct srs_trumedia_params_WOWHD *whd_params = NULL;
- sz = sizeof(struct adm_cmd_set_pp_params_inband_v5) +
- sizeof(struct srs_trumedia_params_WOWHD);
- adm_params = kzalloc(sz, GFP_KERNEL);
- if (!adm_params) {
- pr_err("%s, adm params memory alloc failed\n",
- __func__);
- return -ENOMEM;
- }
- adm_params->payload_size =
- sizeof(struct srs_trumedia_params_WOWHD) +
- sizeof(struct adm_param_data_v5);
- param_id = SRS_TRUMEDIA_PARAMS_WOWHD;
- adm_params->params.param_size =
- sizeof(struct srs_trumedia_params_WOWHD);
- whd_params = (struct srs_trumedia_params_WOWHD *)
- ((u8 *)adm_params +
- sizeof(struct adm_cmd_set_pp_params_inband_v5));
- memcpy(whd_params, srs_params,
- sizeof(struct srs_trumedia_params_WOWHD));
+ param_hdr.param_id = SRS_TRUMEDIA_PARAMS_WOWHD;
+ param_hdr.param_size = sizeof(struct srs_trumedia_params_WOWHD);
break;
}
case SRS_ID_CSHP: {
- struct srs_trumedia_params_CSHP *chp_params = NULL;
- sz = sizeof(struct adm_cmd_set_pp_params_inband_v5) +
- sizeof(struct srs_trumedia_params_CSHP);
- adm_params = kzalloc(sz, GFP_KERNEL);
- if (!adm_params) {
- pr_err("%s, adm params memory alloc failed\n",
- __func__);
- return -ENOMEM;
- }
- adm_params->payload_size =
- sizeof(struct srs_trumedia_params_CSHP) +
- sizeof(struct adm_param_data_v5);
- param_id = SRS_TRUMEDIA_PARAMS_CSHP;
- adm_params->params.param_size =
- sizeof(struct srs_trumedia_params_CSHP);
- chp_params = (struct srs_trumedia_params_CSHP *)
- ((u8 *)adm_params +
- sizeof(struct adm_cmd_set_pp_params_inband_v5));
- memcpy(chp_params, srs_params,
- sizeof(struct srs_trumedia_params_CSHP));
+ param_hdr.param_id = SRS_TRUMEDIA_PARAMS_CSHP;
+ param_hdr.param_size = sizeof(struct srs_trumedia_params_CSHP);
break;
}
case SRS_ID_HPF: {
- struct srs_trumedia_params_HPF *hpf_params = NULL;
- sz = sizeof(struct adm_cmd_set_pp_params_inband_v5) +
- sizeof(struct srs_trumedia_params_HPF);
- adm_params = kzalloc(sz, GFP_KERNEL);
- if (!adm_params) {
- pr_err("%s, adm params memory alloc failed\n",
- __func__);
- return -ENOMEM;
- }
- adm_params->payload_size =
- sizeof(struct srs_trumedia_params_HPF) +
- sizeof(struct adm_param_data_v5);
- param_id = SRS_TRUMEDIA_PARAMS_HPF;
- adm_params->params.param_size =
- sizeof(struct srs_trumedia_params_HPF);
- hpf_params = (struct srs_trumedia_params_HPF *)
- ((u8 *)adm_params +
- sizeof(struct adm_cmd_set_pp_params_inband_v5));
- memcpy(hpf_params, srs_params,
- sizeof(struct srs_trumedia_params_HPF));
+ param_hdr.param_id = SRS_TRUMEDIA_PARAMS_HPF;
+ param_hdr.param_size = sizeof(struct srs_trumedia_params_HPF);
break;
}
case SRS_ID_AEQ: {
- int *update_params_ptr = (int *)this_adm.outband_memmap.kvaddr;
- outband = 1;
- adm_params = kzalloc(sizeof(struct adm_cmd_set_pp_params_v5),
- GFP_KERNEL);
- adm_params_ = (struct adm_cmd_set_pp_params_v5 *)adm_params;
- if (!adm_params_) {
- pr_err("%s, adm params memory alloc failed\n",
- __func__);
- return -ENOMEM;
- }
+ u8 *update_params_ptr = (u8 *) this_adm.outband_memmap.kvaddr;
+
+ outband = true;
- sz = sizeof(struct srs_trumedia_params_AEQ);
if (update_params_ptr == NULL) {
pr_err("ADM_SRS_TRUMEDIA - %s: null memmap for AEQ params\n",
__func__);
ret = -EINVAL;
goto fail_cmd;
}
- param_id = SRS_TRUMEDIA_PARAMS_AEQ;
- *update_params_ptr++ = module_id;
- *update_params_ptr++ = param_id;
- *update_params_ptr++ = sz;
- memcpy(update_params_ptr, srs_params, sz);
- adm_params_->payload_size = sz + 12;
+ param_hdr.param_id = SRS_TRUMEDIA_PARAMS_AEQ;
+ param_hdr.param_size = sizeof(struct srs_trumedia_params_AEQ);
+ ret = q6common_pack_pp_params(update_params_ptr, &param_hdr,
+ srs_params, &total_param_size);
+ if (ret) {
+ pr_err("%s: Failed to pack param header and data, error %d\n",
+ __func__, ret);
+ goto fail_cmd;
+ }
break;
}
case SRS_ID_HL: {
- struct srs_trumedia_params_HL *hl_params = NULL;
- sz = sizeof(struct adm_cmd_set_pp_params_inband_v5) +
- sizeof(struct srs_trumedia_params_HL);
- adm_params = kzalloc(sz, GFP_KERNEL);
- if (!adm_params) {
- pr_err("%s, adm params memory alloc failed\n",
- __func__);
- return -ENOMEM;
- }
- adm_params->payload_size =
- sizeof(struct srs_trumedia_params_HL) +
- sizeof(struct adm_param_data_v5);
- param_id = SRS_TRUMEDIA_PARAMS_HL;
- adm_params->params.param_size =
- sizeof(struct srs_trumedia_params_HL);
- hl_params = (struct srs_trumedia_params_HL *)
- ((u8 *)adm_params +
- sizeof(struct adm_cmd_set_pp_params_inband_v5));
- memcpy(hl_params, srs_params,
- sizeof(struct srs_trumedia_params_HL));
+ param_hdr.param_id = SRS_TRUMEDIA_PARAMS_HL;
+ param_hdr.param_size = sizeof(struct srs_trumedia_params_HL);
break;
}
case SRS_ID_GEQ: {
- struct srs_trumedia_params_GEQ *geq_params = NULL;
- sz = sizeof(struct adm_cmd_set_pp_params_inband_v5) +
- sizeof(struct srs_trumedia_params_GEQ);
- adm_params = kzalloc(sz, GFP_KERNEL);
- if (!adm_params) {
- pr_err("%s, adm params memory alloc failed\n",
- __func__);
- return -ENOMEM;
- }
- adm_params->payload_size =
- sizeof(struct srs_trumedia_params_GEQ) +
- sizeof(struct adm_param_data_v5);
- param_id = SRS_TRUMEDIA_PARAMS_GEQ;
- adm_params->params.param_size =
- sizeof(struct srs_trumedia_params_GEQ);
- geq_params = (struct srs_trumedia_params_GEQ *)
- ((u8 *)adm_params +
- sizeof(struct adm_cmd_set_pp_params_inband_v5));
- memcpy(geq_params, srs_params,
- sizeof(struct srs_trumedia_params_GEQ));
- pr_debug("SRS - %s: GEQ params prepared\n", __func__);
+ param_hdr.param_id = SRS_TRUMEDIA_PARAMS_GEQ;
+ param_hdr.param_size = sizeof(struct srs_trumedia_params_GEQ);
break;
}
default:
goto fail_cmd;
}
- adm_params->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
- APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
- adm_params->hdr.src_svc = APR_SVC_ADM;
- adm_params->hdr.src_domain = APR_DOMAIN_APPS;
- adm_params->hdr.src_port = port_id;
- adm_params->hdr.dest_svc = APR_SVC_ADM;
- adm_params->hdr.dest_domain = APR_DOMAIN_ADSP;
- adm_params->hdr.dest_port =
- atomic_read(&this_adm.copp.id[port_idx][copp_idx]);
- adm_params->hdr.token = port_idx << 16 | copp_idx;
- adm_params->hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5;
if (outband && this_adm.outband_memmap.paddr) {
- adm_params->hdr.pkt_size =
- sizeof(struct adm_cmd_set_pp_params_v5);
- adm_params->payload_addr_lsw = lower_32_bits(
- this_adm.outband_memmap.paddr);
- adm_params->payload_addr_msw = msm_audio_populate_upper_32_bits(
- this_adm.outband_memmap.paddr);
- adm_params->mem_map_handle = atomic_read(&this_adm.
- mem_map_handles[ADM_SRS_TRUMEDIA]);
+ mem_hdr.data_payload_addr_lsw =
+ lower_32_bits(this_adm.outband_memmap.paddr);
+ mem_hdr.data_payload_addr_msw =
+ msm_audio_populate_upper_32_bits(
+ this_adm.outband_memmap.paddr);
+ mem_hdr.mem_map_handle = atomic_read(
+ &this_adm.mem_map_handles[ADM_SRS_TRUMEDIA]);
+
+ ret = adm_set_pp_params(port_id, copp_idx, &mem_hdr, NULL,
+ total_param_size);
} else {
- adm_params->hdr.pkt_size = sz;
- adm_params->payload_addr_lsw = 0;
- adm_params->payload_addr_msw = 0;
- adm_params->mem_map_handle = 0;
-
- adm_params->params.module_id = module_id;
- adm_params->params.param_id = param_id;
- adm_params->params.reserved = 0;
+ ret = adm_pack_and_set_one_pp_param(port_id, copp_idx,
+ param_hdr,
+ (u8 *) srs_params);
}
- pr_debug("SRS - %s: Command was sent now check Q6 - port id = %d, size %d, module id %x, param id %x.\n",
- __func__, adm_params->hdr.dest_port,
- adm_params->payload_size, module_id, param_id);
-
- atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1);
- ret = apr_send_pkt(this_adm.apr, (uint32_t *)adm_params);
- if (ret < 0) {
+ if (ret < 0)
pr_err("SRS - %s: ADM enable for port %d failed\n", __func__,
port_id);
- ret = -EINVAL;
- goto fail_cmd;
- }
- /* Wait for the callback with copp id */
- ret = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
- atomic_read(&this_adm.copp.stat
- [port_idx][copp_idx]) >= 0,
- msecs_to_jiffies(TIMEOUT_MS));
- if (!ret) {
- pr_err("%s: SRS set params timed out port = %d\n",
- __func__, port_id);
- ret = -EINVAL;
- goto fail_cmd;
- } else if (atomic_read(&this_adm.copp.stat
- [port_idx][copp_idx]) > 0) {
- pr_err("%s: DSP returned error[%s]\n",
- __func__, adsp_err_get_err_str(
- atomic_read(&this_adm.copp.stat
- [port_idx][copp_idx])));
- ret = adsp_err_get_lnx_err_code(
- atomic_read(&this_adm.copp.stat
- [port_idx][copp_idx]));
- goto fail_cmd;
- }
fail_cmd:
- kfree(adm_params);
return ret;
}
@@ -570,7 +419,7 @@ int adm_programable_channel_mixer(int port_id, int copp_idx, int session_id,
int channel_index)
{
struct adm_cmd_set_pspd_mtmx_strtr_params_v5 *adm_params = NULL;
- struct adm_param_data_v5 data_v5;
+ struct param_hdr_v3 data_v5;
int ret = 0, port_idx, sz = 0, param_size = 0;
u16 *adm_pspd_params;
u16 *ptr;
@@ -602,8 +451,8 @@ int adm_programable_channel_mixer(int port_id, int copp_idx, int session_id,
roundup(param_size, 4);
sz = sizeof(struct adm_cmd_set_pspd_mtmx_strtr_params_v5) +
- sizeof(struct default_chmixer_param_id_coeff) +
- sizeof(struct adm_param_data_v5) + param_size;
+ sizeof(struct default_chmixer_param_id_coeff) +
+ sizeof(struct param_hdr_v3) + param_size;
pr_debug("%s: sz = %d\n", __func__, sz);
adm_params = kzalloc(sz, GFP_KERNEL);
if (!adm_params)
@@ -626,8 +475,8 @@ int adm_programable_channel_mixer(int port_id, int copp_idx, int session_id,
data_v5.reserved = 0;
data_v5.param_size = param_size;
adm_params->payload_size =
- sizeof(struct default_chmixer_param_id_coeff) +
- sizeof(struct adm_param_data_v5) + data_v5.param_size;
+ sizeof(struct default_chmixer_param_id_coeff) +
+ sizeof(struct param_hdr_v3) + data_v5.param_size;
adm_pspd_params = (u16 *)((u8 *)adm_params +
sizeof(struct adm_cmd_set_pspd_mtmx_strtr_params_v5));
memcpy(adm_pspd_params, &data_v5, sizeof(data_v5));
@@ -861,286 +710,267 @@ set_stereo_to_custom_stereo_return:
return rc;
}
-int adm_dolby_dap_send_params(int port_id, int copp_idx, char *params,
- uint32_t params_length)
+/*
+ * With pre-packed data, only the opcode differes from V5 and V6.
+ * Use q6common_pack_pp_params to pack the data correctly.
+ */
+int adm_set_pp_params(int port_id, int copp_idx,
+ struct mem_mapping_hdr *mem_hdr, u8 *param_data,
+ u32 param_size)
{
- struct adm_cmd_set_pp_params_v5 *adm_params = NULL;
- int sz, rc = 0;
- int port_idx;
+ struct adm_cmd_set_pp_params *adm_set_params = NULL;
+ int size = sizeof(struct adm_cmd_set_pp_params);
+ int port_idx = 0;
+ atomic_t *copp_stat = NULL;
+ int ret = 0;
- pr_debug("%s:\n", __func__);
port_id = afe_convert_virtual_to_portid(port_id);
port_idx = adm_validate_and_get_port_index(port_id);
- if (port_idx < 0) {
- pr_err("%s: Invalid port_id 0x%x\n", __func__, port_id);
+ if (port_idx < 0 || port_idx >= AFE_MAX_PORTS) {
+ pr_err("%s: Invalid port_idx 0x%x\n", __func__, port_idx);
+ return -EINVAL;
+ } else if (copp_idx < 0 || copp_idx >= MAX_COPPS_PER_PORT) {
+ pr_err("%s: Invalid copp_idx 0x%x\n", __func__, copp_idx);
return -EINVAL;
}
- sz = sizeof(struct adm_cmd_set_pp_params_v5) + params_length;
- adm_params = kzalloc(sz, GFP_KERNEL);
- if (!adm_params) {
- pr_err("%s, adm params memory alloc failed", __func__);
+ /* Only add params_size in inband case */
+ if (param_data != NULL)
+ size += param_size;
+ adm_set_params = kzalloc(size, GFP_KERNEL);
+ if (!adm_set_params)
return -ENOMEM;
- }
- memcpy(((u8 *)adm_params + sizeof(struct adm_cmd_set_pp_params_v5)),
- params, params_length);
- adm_params->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
- APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
- adm_params->hdr.pkt_size = sz;
- adm_params->hdr.src_svc = APR_SVC_ADM;
- adm_params->hdr.src_domain = APR_DOMAIN_APPS;
- adm_params->hdr.src_port = port_id;
- adm_params->hdr.dest_svc = APR_SVC_ADM;
- adm_params->hdr.dest_domain = APR_DOMAIN_ADSP;
- adm_params->hdr.dest_port =
- atomic_read(&this_adm.copp.id[port_idx][copp_idx]);
- adm_params->hdr.token = port_idx << 16 | copp_idx;
- adm_params->hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5;
- adm_params->payload_addr_lsw = 0;
- adm_params->payload_addr_msw = 0;
- adm_params->mem_map_handle = 0;
- adm_params->payload_size = params_length;
+ adm_set_params->apr_hdr.hdr_field =
+ APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ adm_set_params->apr_hdr.pkt_size = size;
+ adm_set_params->apr_hdr.src_svc = APR_SVC_ADM;
+ adm_set_params->apr_hdr.src_domain = APR_DOMAIN_APPS;
+ adm_set_params->apr_hdr.src_port = port_id;
+ adm_set_params->apr_hdr.dest_svc = APR_SVC_ADM;
+ adm_set_params->apr_hdr.dest_domain = APR_DOMAIN_ADSP;
+ adm_set_params->apr_hdr.dest_port =
+ atomic_read(&this_adm.copp.id[port_idx][copp_idx]);
+ adm_set_params->apr_hdr.token = port_idx << 16 | copp_idx;
+
+ if (q6common_is_instance_id_supported())
+ adm_set_params->apr_hdr.opcode = ADM_CMD_SET_PP_PARAMS_V6;
+ else
+ adm_set_params->apr_hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5;
+
+ adm_set_params->payload_size = param_size;
+
+ if (mem_hdr != NULL) {
+ /* Out of Band Case */
+ adm_set_params->mem_hdr = *mem_hdr;
+ } else if (param_data != NULL) {
+ /* In band case. Parameter data must be pre-packed with its
+ * header before calling this function. Use
+ * q6common_pack_pp_params to pack parameter data and header
+ * correctly.
+ */
+ memcpy(&adm_set_params->param_data, param_data, param_size);
+ } else {
+ pr_err("%s: Received NULL pointers for both memory header and param data\n",
+ __func__);
+ ret = -EINVAL;
+ goto done;
+ }
- atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1);
- rc = apr_send_pkt(this_adm.apr, (uint32_t *)adm_params);
- if (rc < 0) {
- pr_err("%s: Set params failed port = 0x%x rc %d\n",
- __func__, port_id, rc);
- rc = -EINVAL;
- goto dolby_dap_send_param_return;
+ copp_stat = &this_adm.copp.stat[port_idx][copp_idx];
+ atomic_set(copp_stat, -1);
+ ret = apr_send_pkt(this_adm.apr, (uint32_t *) adm_set_params);
+ if (ret < 0) {
+ pr_err("%s: Set params APR send failed port = 0x%x ret %d\n",
+ __func__, port_id, ret);
+ goto done;
}
- /* Wait for the callback */
- rc = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
- atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) >= 0,
- msecs_to_jiffies(TIMEOUT_MS));
- if (!rc) {
- pr_err("%s: Set params timed out port = 0x%x\n",
- __func__, port_id);
- rc = -EINVAL;
- goto dolby_dap_send_param_return;
- } else if (atomic_read(&this_adm.copp.stat
- [port_idx][copp_idx]) > 0) {
- pr_err("%s: DSP returned error[%s]\n",
- __func__, adsp_err_get_err_str(
- atomic_read(&this_adm.copp.stat
- [port_idx][copp_idx])));
- rc = adsp_err_get_lnx_err_code(
- atomic_read(&this_adm.copp.stat
- [port_idx][copp_idx]));
- goto dolby_dap_send_param_return;
+ ret = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
+ atomic_read(copp_stat) >= 0,
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: Set params timed out port = 0x%x\n", __func__,
+ port_id);
+ ret = -ETIMEDOUT;
+ goto done;
}
- rc = 0;
-dolby_dap_send_param_return:
- kfree(adm_params);
- return rc;
+ if (atomic_read(copp_stat) > 0) {
+ pr_err("%s: DSP returned error[%s]\n", __func__,
+ adsp_err_get_err_str(atomic_read(copp_stat)));
+ ret = adsp_err_get_lnx_err_code(atomic_read(copp_stat));
+ goto done;
+ }
+
+ ret = 0;
+done:
+ kfree(adm_set_params);
+ return ret;
}
+EXPORT_SYMBOL(adm_set_pp_params);
-int adm_send_params_v5(int port_id, int copp_idx, char *params,
- uint32_t params_length)
+int adm_pack_and_set_one_pp_param(int port_id, int copp_idx,
+ struct param_hdr_v3 param_hdr, u8 *param_data)
{
- struct adm_cmd_set_pp_params_v5 *adm_params = NULL;
- int rc = 0;
- int sz, port_idx;
-
- pr_debug("%s:\n", __func__);
- port_id = afe_convert_virtual_to_portid(port_id);
- port_idx = adm_validate_and_get_port_index(port_id);
- if (port_idx < 0) {
- pr_err("%s: Invalid port_id 0x%x\n", __func__, port_id);
- return -EINVAL;
- }
+ u8 *packed_data = NULL;
+ u32 total_size = 0;
+ int ret = 0;
- sz = sizeof(struct adm_cmd_set_pp_params_v5) + params_length;
- adm_params = kzalloc(sz, GFP_KERNEL);
- if (!adm_params) {
- pr_err("%s, adm params memory alloc failed", __func__);
+ total_size = sizeof(union param_hdrs) + param_hdr.param_size;
+ packed_data = kzalloc(total_size, GFP_KERNEL);
+ if (!packed_data)
return -ENOMEM;
- }
- memcpy(((u8 *)adm_params + sizeof(struct adm_cmd_set_pp_params_v5)),
- params, params_length);
- adm_params->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
- APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
- adm_params->hdr.pkt_size = sz;
- adm_params->hdr.src_svc = APR_SVC_ADM;
- adm_params->hdr.src_domain = APR_DOMAIN_APPS;
- adm_params->hdr.src_port = port_id;
- adm_params->hdr.dest_svc = APR_SVC_ADM;
- adm_params->hdr.dest_domain = APR_DOMAIN_ADSP;
- adm_params->hdr.dest_port =
- atomic_read(&this_adm.copp.id[port_idx][copp_idx]);
- adm_params->hdr.token = port_idx << 16 | copp_idx;
- adm_params->hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5;
- adm_params->payload_addr_lsw = 0;
- adm_params->payload_addr_msw = 0;
- adm_params->mem_map_handle = 0;
- adm_params->payload_size = params_length;
-
- atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1);
- rc = apr_send_pkt(this_adm.apr, (uint32_t *)adm_params);
- if (rc < 0) {
- pr_err("%s: Set params failed port = 0x%x rc %d\n",
- __func__, port_id, rc);
- rc = -EINVAL;
- goto send_param_return;
- }
- /* Wait for the callback */
- rc = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
- atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) >= 0,
- msecs_to_jiffies(TIMEOUT_MS));
- if (!rc) {
- pr_err("%s: Set params timed out port = 0x%x\n",
- __func__, port_id);
- rc = -EINVAL;
- goto send_param_return;
- } else if (atomic_read(&this_adm.copp.stat
- [port_idx][copp_idx]) > 0) {
- pr_err("%s: DSP returned error[%s]\n",
- __func__, adsp_err_get_err_str(
- atomic_read(&this_adm.copp.stat
- [port_idx][copp_idx])));
- rc = adsp_err_get_lnx_err_code(
- atomic_read(&this_adm.copp.stat
- [port_idx][copp_idx]));
- goto send_param_return;
+ ret = q6common_pack_pp_params(packed_data, &param_hdr, param_data,
+ &total_size);
+ if (ret) {
+ pr_err("%s: Failed to pack parameter data, error %d\n",
+ __func__, ret);
+ goto done;
}
- rc = 0;
-send_param_return:
- kfree(adm_params);
- return rc;
+
+ ret = adm_set_pp_params(port_id, copp_idx, NULL, packed_data,
+ total_size);
+ if (ret)
+ pr_err("%s: Failed to set parameter data, error %d\n", __func__,
+ ret);
+done:
+ kfree(packed_data);
+ return ret;
}
+EXPORT_SYMBOL(adm_pack_and_set_one_pp_param);
-int adm_get_params_v2(int port_id, int copp_idx, uint32_t module_id,
- uint32_t param_id, uint32_t params_length,
- char *params, uint32_t client_id)
+/*
+ * Only one parameter can be requested at a time. Therefore, packing and sending
+ * the request can be handled locally.
+ */
+int adm_get_pp_params(int port_id, int copp_idx, uint32_t client_id,
+ struct mem_mapping_hdr *mem_hdr,
+ struct param_hdr_v3 *param_hdr, u8 *returned_param_data)
{
- struct adm_cmd_get_pp_params_v5 *adm_params = NULL;
- int rc = 0, i = 0;
- int port_idx, idx;
- int *params_data = (int *)params;
- uint64_t sz = 0;
+ struct adm_cmd_get_pp_params adm_get_params;
+ int total_size = 0;
+ int get_param_array_sz = ARRAY_SIZE(adm_get_parameters);
+ int returned_param_size = 0;
+ int returned_param_size_in_bytes = 0;
+ int port_idx = 0;
+ int idx = 0;
+ atomic_t *copp_stat = NULL;
+ int ret = 0;
- port_id = afe_convert_virtual_to_portid(port_id);
- port_idx = adm_validate_and_get_port_index(port_id);
- if (port_idx < 0) {
- pr_err("%s: Invalid port_id 0x%x\n", __func__, port_id);
+ if (param_hdr == NULL) {
+ pr_err("%s: Received NULL pointer for parameter header\n",
+ __func__);
return -EINVAL;
}
- sz = (uint64_t)sizeof(struct adm_cmd_get_pp_params_v5) +
- (uint64_t)params_length;
- /*
- * Check if the value of "sz" (which is ultimately assigned to
- * "hdr.pkt_size") crosses U16_MAX.
- */
- if (sz > U16_MAX) {
- pr_err("%s: Invalid params_length\n", __func__);
+ port_id = afe_convert_virtual_to_portid(port_id);
+ port_idx = adm_validate_and_get_port_index(port_id);
+ if (port_idx < 0 || port_idx >= AFE_MAX_PORTS) {
+ pr_err("%s: Invalid port_idx 0x%x\n", __func__, port_idx);
return -EINVAL;
}
- adm_params = kzalloc(sz, GFP_KERNEL);
- if (!adm_params) {
- pr_err("%s: adm params memory alloc failed", __func__);
- return -ENOMEM;
+ if (copp_idx < 0 || copp_idx >= MAX_COPPS_PER_PORT) {
+ pr_err("%s: Invalid copp_idx 0x%x\n", __func__, copp_idx);
+ return -EINVAL;
}
- memcpy(((u8 *)adm_params + sizeof(struct adm_cmd_get_pp_params_v5)),
- params, params_length);
- adm_params->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
- APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
- adm_params->hdr.pkt_size = sz;
- adm_params->hdr.src_svc = APR_SVC_ADM;
- adm_params->hdr.src_domain = APR_DOMAIN_APPS;
- adm_params->hdr.src_port = port_id;
- adm_params->hdr.dest_svc = APR_SVC_ADM;
- adm_params->hdr.dest_domain = APR_DOMAIN_ADSP;
- adm_params->hdr.dest_port =
- atomic_read(&this_adm.copp.id[port_idx][copp_idx]);
- adm_params->hdr.token = port_idx << 16 | client_id << 8 | copp_idx;
- adm_params->hdr.opcode = ADM_CMD_GET_PP_PARAMS_V5;
- adm_params->data_payload_addr_lsw = 0;
- adm_params->data_payload_addr_msw = 0;
- adm_params->mem_map_handle = 0;
- adm_params->module_id = module_id;
- adm_params->param_id = param_id;
- adm_params->param_max_size = params_length;
- adm_params->reserved = 0;
+ memset(&adm_get_params, 0, sizeof(adm_get_params));
- atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1);
- rc = apr_send_pkt(this_adm.apr, (uint32_t *)adm_params);
- if (rc < 0) {
- pr_err("%s: Failed to Get Params on port_id 0x%x %d\n",
- __func__, port_id, rc);
- rc = -EINVAL;
- goto adm_get_param_return;
+ if (mem_hdr != NULL)
+ adm_get_params.mem_hdr = *mem_hdr;
+
+ q6common_pack_pp_params((u8 *) &adm_get_params.param_hdr, param_hdr,
+ NULL, &total_size);
+
+ /* Pack APR header after filling body so total_size has correct value */
+ adm_get_params.apr_hdr.pkt_size = total_size;
+ adm_get_params.apr_hdr.src_svc = APR_SVC_ADM;
+ adm_get_params.apr_hdr.src_domain = APR_DOMAIN_APPS;
+ adm_get_params.apr_hdr.src_port = port_id;
+ adm_get_params.apr_hdr.dest_svc = APR_SVC_ADM;
+ adm_get_params.apr_hdr.dest_domain = APR_DOMAIN_ADSP;
+ adm_get_params.apr_hdr.dest_port =
+ atomic_read(&this_adm.copp.id[port_idx][copp_idx]);
+ adm_get_params.apr_hdr.token =
+ port_idx << 16 | client_id << 8 | copp_idx;
+
+ if (q6common_is_instance_id_supported())
+ adm_get_params.apr_hdr.opcode = ADM_CMD_GET_PP_PARAMS_V6;
+ else
+ adm_get_params.apr_hdr.opcode = ADM_CMD_GET_PP_PARAMS_V5;
+
+ copp_stat = &this_adm.copp.stat[port_idx][copp_idx];
+ atomic_set(copp_stat, -1);
+ ret = apr_send_pkt(this_adm.apr, (uint32_t *) &adm_get_params);
+ if (ret) {
+ pr_err("%s: Get params APR send failed port = 0x%x ret %d\n",
+ __func__, port_id, ret);
+ ret = -EINVAL;
+ goto done;
}
- /* Wait for the callback with copp id */
- rc = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
- atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) >= 0,
- msecs_to_jiffies(TIMEOUT_MS));
- if (!rc) {
- pr_err("%s: get params timed out port_id = 0x%x\n", __func__,
- port_id);
- rc = -EINVAL;
- goto adm_get_param_return;
- } else if (atomic_read(&this_adm.copp.stat
- [port_idx][copp_idx]) > 0) {
- pr_err("%s: DSP returned error[%s]\n",
- __func__, adsp_err_get_err_str(
- atomic_read(&this_adm.copp.stat
- [port_idx][copp_idx])));
- rc = adsp_err_get_lnx_err_code(
- atomic_read(&this_adm.copp.stat
- [port_idx][copp_idx]));
- goto adm_get_param_return;
+ ret = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
+ atomic_read(copp_stat) >= 0,
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: Get params timed out port = 0x%x\n", __func__,
+ port_id);
+ ret = -ETIMEDOUT;
+ goto done;
+ }
+ if (atomic_read(copp_stat) > 0) {
+ pr_err("%s: DSP returned error[%s]\n", __func__,
+ adsp_err_get_err_str(atomic_read(copp_stat)));
+ ret = adsp_err_get_lnx_err_code(atomic_read(copp_stat));
+ goto done;
}
- idx = ADM_GET_PARAMETER_LENGTH * copp_idx;
- if (adm_get_parameters[idx] < 0) {
- pr_err("%s: Size is invalid %d\n", __func__,
- adm_get_parameters[idx]);
- rc = -EINVAL;
- goto adm_get_param_return;
- }
- if ((params_data) &&
- (ARRAY_SIZE(adm_get_parameters) >
- idx) &&
- (ARRAY_SIZE(adm_get_parameters) >=
- 1+adm_get_parameters[idx]+idx) &&
- (params_length/sizeof(uint32_t) >=
- adm_get_parameters[idx])) {
- for (i = 0; i < adm_get_parameters[idx]; i++)
- params_data[i] = adm_get_parameters[1+i+idx];
+ ret = 0;
- } else {
- pr_err("%s: Get param data not copied! get_param array size %zd, index %d, params array size %zd, index %d\n",
- __func__, ARRAY_SIZE(adm_get_parameters),
- (1+adm_get_parameters[idx]+idx),
- params_length/sizeof(int),
- adm_get_parameters[idx]);
+ /* Copy data to caller if sent in band */
+ if (!returned_param_data) {
+ pr_debug("%s: Received NULL pointer for param destination, not copying payload\n",
+ __func__);
+ return 0;
}
- rc = 0;
-adm_get_param_return:
- kfree(adm_params);
- return rc;
-}
+ idx = ADM_GET_PARAMETER_LENGTH * copp_idx;
+ returned_param_size = adm_get_parameters[idx];
+ if (returned_param_size < 0 ||
+ returned_param_size + idx + 1 > get_param_array_sz) {
+ pr_err("%s: Invalid parameter size %d\n", __func__,
+ returned_param_size);
+ return -EINVAL;
+ }
-int adm_get_params(int port_id, int copp_idx, uint32_t module_id,
- uint32_t param_id, uint32_t params_length, char *params)
-{
- return adm_get_params_v2(port_id, copp_idx, module_id, param_id,
- params_length, params, 0);
+ returned_param_size_in_bytes = returned_param_size * sizeof(uint32_t);
+ if (param_hdr->param_size < returned_param_size_in_bytes) {
+ pr_err("%s: Provided buffer is not big enough, provided buffer size(%d) size needed(%d)\n",
+ __func__, param_hdr->param_size,
+ returned_param_size_in_bytes);
+ return -EINVAL;
+ }
+
+ memcpy(returned_param_data, &adm_get_parameters[idx + 1],
+ returned_param_size_in_bytes);
+done:
+ return ret;
}
+EXPORT_SYMBOL(adm_get_pp_params);
-int adm_get_pp_topo_module_list(int port_id, int copp_idx, int32_t param_length,
- char *params)
+int adm_get_pp_topo_module_list_v2(int port_id, int copp_idx,
+ int32_t param_length,
+ int32_t *returned_params)
{
- struct adm_cmd_get_pp_topo_module_list_t *adm_pp_module_list = NULL;
- int sz, rc = 0, i = 0;
- int port_idx, idx;
- int32_t *params_data = (int32_t *)params;
+ struct adm_cmd_get_pp_topo_module_list adm_get_module_list;
+ bool iid_supported = q6common_is_instance_id_supported();
int *topo_list;
+ int num_modules = 0;
+ int list_size = 0;
+ int port_idx, idx;
+ int i = 0;
+ atomic_t *copp_stat = NULL;
+ int ret = 0;
pr_debug("%s : port_id %x", __func__, port_id);
port_id = afe_convert_virtual_to_portid(port_id);
@@ -1149,86 +979,102 @@ int adm_get_pp_topo_module_list(int port_id, int copp_idx, int32_t param_length,
pr_err("%s: Invalid port_id 0x%x\n", __func__, port_id);
return -EINVAL;
}
-
if (copp_idx < 0 || copp_idx >= MAX_COPPS_PER_PORT) {
pr_err("%s: Invalid copp_num: %d\n", __func__, copp_idx);
return -EINVAL;
}
- sz = sizeof(struct adm_cmd_get_pp_topo_module_list_t) + param_length;
- adm_pp_module_list = kzalloc(sz, GFP_KERNEL);
- if (!adm_pp_module_list) {
- pr_err("%s, adm params memory alloc failed", __func__);
- return -ENOMEM;
- }
+ memset(&adm_get_module_list, 0, sizeof(adm_get_module_list));
- memcpy(((u8 *)adm_pp_module_list +
- sizeof(struct adm_cmd_get_pp_topo_module_list_t)),
- params, param_length);
- adm_pp_module_list->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
- APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
- adm_pp_module_list->hdr.pkt_size = sz;
- adm_pp_module_list->hdr.src_svc = APR_SVC_ADM;
- adm_pp_module_list->hdr.src_domain = APR_DOMAIN_APPS;
- adm_pp_module_list->hdr.src_port = port_id;
- adm_pp_module_list->hdr.dest_svc = APR_SVC_ADM;
- adm_pp_module_list->hdr.dest_domain = APR_DOMAIN_ADSP;
- adm_pp_module_list->hdr.dest_port =
- atomic_read(&this_adm.copp.id[port_idx][copp_idx]);
- adm_pp_module_list->hdr.token = port_idx << 16 | copp_idx;
- adm_pp_module_list->hdr.opcode = ADM_CMD_GET_PP_TOPO_MODULE_LIST;
- adm_pp_module_list->param_max_size = param_length;
- /* Payload address and mmap handle set to zero by kzalloc */
+ adm_get_module_list.apr_hdr.pkt_size = sizeof(adm_get_module_list);
+ adm_get_module_list.apr_hdr.src_svc = APR_SVC_ADM;
+ adm_get_module_list.apr_hdr.src_domain = APR_DOMAIN_APPS;
+ adm_get_module_list.apr_hdr.src_port = port_id;
+ adm_get_module_list.apr_hdr.dest_svc = APR_SVC_ADM;
+ adm_get_module_list.apr_hdr.dest_domain = APR_DOMAIN_ADSP;
+ adm_get_module_list.apr_hdr.dest_port =
+ atomic_read(&this_adm.copp.id[port_idx][copp_idx]);
+ adm_get_module_list.apr_hdr.token = port_idx << 16 | copp_idx;
+ /*
+ * Out of band functionality is not currently utilized.
+ * Assume in band.
+ */
+ if (iid_supported) {
+ adm_get_module_list.apr_hdr.opcode =
+ ADM_CMD_GET_PP_TOPO_MODULE_LIST_V2;
+ adm_get_module_list.param_max_size = param_length;
+ } else {
+ adm_get_module_list.apr_hdr.opcode =
+ ADM_CMD_GET_PP_TOPO_MODULE_LIST;
- atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1);
+ if (param_length > U16_MAX) {
+ pr_err("%s: Invalid param length for V1 %d\n", __func__,
+ param_length);
+ return -EINVAL;
+ }
+ adm_get_module_list.param_max_size = param_length << 16;
+ }
- rc = apr_send_pkt(this_adm.apr, (uint32_t *)adm_pp_module_list);
- if (rc < 0) {
- pr_err("%s: Failed to Get Params on port %d\n", __func__,
- port_id);
- rc = -EINVAL;
- goto adm_pp_module_list_l;
+ copp_stat = &this_adm.copp.stat[port_idx][copp_idx];
+ atomic_set(copp_stat, -1);
+ ret = apr_send_pkt(this_adm.apr, (uint32_t *) &adm_get_module_list);
+ if (ret) {
+ pr_err("%s: APR send pkt failed for port_id: 0x%x failed ret %d\n",
+ __func__, port_id, ret);
+ ret = -EINVAL;
+ goto done;
}
- /* Wait for the callback with copp id */
- rc = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
- atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) >= 0,
- msecs_to_jiffies(TIMEOUT_MS));
- if (!rc) {
- pr_err("%s: get params timed out port = %d\n", __func__,
- port_id);
- rc = -EINVAL;
- goto adm_pp_module_list_l;
- } else if (atomic_read(&this_adm.copp.stat
- [port_idx][copp_idx]) > 0) {
- pr_err("%s: DSP returned error[%s]\n",
- __func__, adsp_err_get_err_str(
- atomic_read(&this_adm.copp.stat
- [port_idx][copp_idx])));
- rc = adsp_err_get_lnx_err_code(
- atomic_read(&this_adm.copp.stat
- [port_idx][copp_idx]));
- goto adm_pp_module_list_l;
- }
- if (params_data) {
- idx = ADM_GET_TOPO_MODULE_LIST_LENGTH * copp_idx;
- topo_list = (int *)(adm_module_topo_list + idx);
- if (param_length <= ADM_GET_TOPO_MODULE_LIST_LENGTH &&
- idx <
- (MAX_COPPS_PER_PORT * ADM_GET_TOPO_MODULE_LIST_LENGTH))
- memcpy(params_data, topo_list, param_length);
- else
- pr_debug("%s: i/p size:%d > MAX param size:%d\n",
- __func__, param_length,
- (int)ADM_GET_TOPO_MODULE_LIST_LENGTH);
- for (i = 1; i <= params_data[0]; i++)
- pr_debug("module = 0x%x\n", params_data[i]);
+ ret = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
+ atomic_read(copp_stat) >= 0,
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: Timeout for port_id: 0x%x\n", __func__, port_id);
+ ret = -ETIMEDOUT;
+ goto done;
}
- rc = 0;
-adm_pp_module_list_l:
- kfree(adm_pp_module_list);
- pr_debug("%s : rc = %d ", __func__, rc);
- return rc;
+ if (atomic_read(copp_stat) > 0) {
+ pr_err("%s: DSP returned error[%s]\n", __func__,
+ adsp_err_get_err_str(atomic_read(copp_stat)));
+ ret = adsp_err_get_lnx_err_code(atomic_read(copp_stat));
+ goto done;
+ }
+
+ ret = 0;
+
+ if (returned_params) {
+ /*
+ * When processing ADM_CMDRSP_GET_PP_TOPO_MODULE_LIST IID is
+ * added since it is not present. Therefore, there is no need to
+ * do anything different if IID is not supported here as it is
+ * already taken care of.
+ */
+ idx = ADM_GET_TOPO_MODULE_INSTANCE_LIST_LENGTH * copp_idx;
+ num_modules = adm_module_topo_list[idx];
+ if (num_modules < 0 || num_modules > MAX_MODULES_IN_TOPO) {
+ pr_err("%s: Invalid number of modules returned %d\n",
+ __func__, num_modules);
+ return -EINVAL;
+ }
+
+ list_size = num_modules * sizeof(struct module_instance_info);
+ if (param_length < list_size) {
+ pr_err("%s: Provided buffer not big enough to hold module-instance list, provided size %d, needed size %d\n",
+ __func__, param_length, list_size);
+ return -EINVAL;
+ }
+
+ topo_list = (int32_t *) (&adm_module_topo_list[idx]);
+ memcpy(returned_params, topo_list, list_size);
+ for (i = 1; i <= num_modules; i += 2) {
+ pr_debug("module = 0x%x instance = 0x%x\n",
+ returned_params[i], returned_params[i + 1]);
+ }
+ }
+done:
+ return ret;
}
+EXPORT_SYMBOL(adm_get_pp_topo_module_list_v2);
+
static void adm_callback_debug_print(struct apr_client_data *data)
{
uint32_t *payload;
@@ -1288,13 +1134,122 @@ int adm_get_multi_ch_map(char *channel_map, int path)
return 0;
}
+static int adm_process_get_param_response(u32 opcode, u32 idx, u32 *payload,
+ u32 payload_size)
+{
+ struct adm_cmd_rsp_get_pp_params_v5 *v5_rsp = NULL;
+ struct adm_cmd_rsp_get_pp_params_v6 *v6_rsp = NULL;
+ u32 *param_data = NULL;
+ int data_size;
+ int struct_size;
+
+ if (payload == NULL) {
+ pr_err("%s: Payload is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ switch (opcode) {
+ case ADM_CMDRSP_GET_PP_PARAMS_V5:
+ struct_size = sizeof(struct adm_cmd_rsp_get_pp_params_v5);
+ v5_rsp = (struct adm_cmd_rsp_get_pp_params_v5 *) payload;
+ data_size = v5_rsp->param_hdr.param_size;
+ param_data = v5_rsp->param_data;
+ break;
+ case ADM_CMDRSP_GET_PP_PARAMS_V6:
+ struct_size = sizeof(struct adm_cmd_rsp_get_pp_params_v6);
+ v6_rsp = (struct adm_cmd_rsp_get_pp_params_v6 *) payload;
+ data_size = v6_rsp->param_hdr.param_size;
+ param_data = v6_rsp->param_data;
+ break;
+ default:
+ pr_err("%s: Invalid opcode %d\n", __func__, opcode);
+ return -EINVAL;
+ }
+
+ /*
+ * Just store the returned parameter data, not the header. The calling
+ * function is expected to know what it asked for. Therefore, there is
+ * no difference between V5 and V6.
+ */
+ if ((payload_size >= struct_size + data_size) &&
+ (ARRAY_SIZE(adm_get_parameters) > idx) &&
+ (ARRAY_SIZE(adm_get_parameters) >= idx + 1 + data_size)) {
+ /*
+ * data_size is expressed in number of bytes, store in number of
+ * ints
+ */
+ adm_get_parameters[idx] =
+ data_size / sizeof(*adm_get_parameters);
+ pr_debug("%s: GET_PP PARAM: received parameter length: 0x%x\n",
+ __func__, adm_get_parameters[idx]);
+ /* store params after param_size */
+ memcpy(&adm_get_parameters[idx + 1], param_data, data_size);
+ return 0;
+ }
+
+ pr_err("%s: Invlaid parameter combination, payload_size %d, idx %d\n",
+ __func__, payload_size, idx);
+ return -EINVAL;
+}
+
+static int adm_process_get_topo_list_response(u32 opcode, int copp_idx,
+ u32 num_modules, u32 *payload,
+ u32 payload_size)
+{
+ u32 *fill_list = NULL;
+ int idx = 0;
+ int i = 0;
+ int j = 0;
+
+ if (payload == NULL) {
+ pr_err("%s: Payload is NULL\n", __func__);
+ return -EINVAL;
+ } else if (copp_idx < 0 || copp_idx >= MAX_COPPS_PER_PORT)
+ pr_err("%s: Invalid COPP index %d\n", __func__, copp_idx);
+ return -EINVAL;
+
+ idx = ADM_GET_TOPO_MODULE_INSTANCE_LIST_LENGTH * copp_idx;
+ fill_list = adm_module_topo_list + idx;
+ *fill_list++ = num_modules;
+ for (i = 0; i < num_modules; i++) {
+ if (j > payload_size / sizeof(u32)) {
+ pr_err("%s: Invalid number of modules specified %d\n",
+ __func__, num_modules);
+ return -EINVAL;
+ }
+
+ /* store module ID */
+ *fill_list++ = payload[j];
+ j++;
+
+ switch (opcode) {
+ case ADM_CMDRSP_GET_PP_TOPO_MODULE_LIST_V2:
+ /* store instance ID */
+ *fill_list++ = payload[j];
+ j++;
+ break;
+ case ADM_CMDRSP_GET_PP_TOPO_MODULE_LIST:
+ /* Insert IID 0 when repacking */
+ *fill_list++ = INSTANCE_ID_0;
+ break;
+ default:
+ pr_err("%s: Invalid opcode %d\n", __func__, opcode);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static int32_t adm_callback(struct apr_client_data *data, void *priv)
{
uint32_t *payload;
int i, j, port_idx, copp_idx, idx, client_id;
+ int num_modules;
+ int ret;
if (data == NULL) {
- pr_err("%s: data paramter is null\n", __func__);
+ pr_err("%s: data parameter is null\n", __func__);
return -EINVAL;
}
@@ -1312,7 +1267,8 @@ static int32_t adm_callback(struct apr_client_data *data, void *priv)
RESET_COPP_ID);
atomic_set(&this_adm.copp.cnt[i][j], 0);
atomic_set(
- &this_adm.copp.topology[i][j], 0);
+ &this_adm.copp.topology[i][j],
+ 0);
atomic_set(&this_adm.copp.mode[i][j],
0);
atomic_set(&this_adm.copp.stat[i][j],
@@ -1320,8 +1276,8 @@ static int32_t adm_callback(struct apr_client_data *data, void *priv)
atomic_set(&this_adm.copp.rate[i][j],
0);
atomic_set(
- &this_adm.copp.channels[i][j],
- 0);
+ &this_adm.copp.channels[i][j],
+ 0);
atomic_set(
&this_adm.copp.bit_width[i][j], 0);
atomic_set(
@@ -1392,8 +1348,9 @@ static int32_t adm_callback(struct apr_client_data *data, void *priv)
}
switch (payload[0]) {
case ADM_CMD_SET_PP_PARAMS_V5:
- pr_debug("%s: ADM_CMD_SET_PP_PARAMS_V5\n",
- __func__);
+ case ADM_CMD_SET_PP_PARAMS_V6:
+ pr_debug("%s: ADM_CMD_SET_PP_PARAMS\n",
+ __func__);
if (client_id == ADM_CLIENT_ID_SOURCE_TRACKING)
this_adm.sourceTrackingData.
apr_cmd_status = payload[1];
@@ -1450,8 +1407,9 @@ static int32_t adm_callback(struct apr_client_data *data, void *priv)
}
break;
case ADM_CMD_GET_PP_PARAMS_V5:
- pr_debug("%s: ADM_CMD_GET_PP_PARAMS_V5\n",
- __func__);
+ case ADM_CMD_GET_PP_PARAMS_V6:
+ pr_debug("%s: ADM_CMD_GET_PP_PARAMS\n",
+ __func__);
/* Should only come here if there is an APR */
/* error or malformed APR packet. Otherwise */
/* response will be returned as */
@@ -1488,11 +1446,12 @@ static int32_t adm_callback(struct apr_client_data *data, void *priv)
&this_adm.copp.wait[port_idx][copp_idx]);
break;
case ADM_CMD_GET_PP_TOPO_MODULE_LIST:
+ case ADM_CMD_GET_PP_TOPO_MODULE_LIST_V2:
pr_debug("%s:ADM_CMD_GET_PP_TOPO_MODULE_LIST\n",
__func__);
if (payload[1] != 0)
- pr_err("%s: ADM get topo list error = %d,\n",
- __func__, payload[1]);
+ pr_err("%s: ADM get topo list error = %d\n",
+ __func__, payload[1]);
break;
default:
pr_err("%s: Unknown Cmd: 0x%x\n", __func__,
@@ -1527,80 +1486,60 @@ static int32_t adm_callback(struct apr_client_data *data, void *priv)
}
break;
case ADM_CMDRSP_GET_PP_PARAMS_V5:
- pr_debug("%s: ADM_CMDRSP_GET_PP_PARAMS_V5\n", __func__);
- if (payload[0] != 0)
- pr_err("%s: ADM_CMDRSP_GET_PP_PARAMS_V5 returned error = 0x%x\n",
- __func__, payload[0]);
+ case ADM_CMDRSP_GET_PP_PARAMS_V6:
+ pr_debug("%s: ADM_CMDRSP_GET_PP_PARAMS\n", __func__);
if (client_id == ADM_CLIENT_ID_SOURCE_TRACKING)
this_adm.sourceTrackingData.apr_cmd_status =
- payload[0];
+ payload[0];
else if (rtac_make_adm_callback(payload,
- data->payload_size))
+ data->payload_size))
break;
idx = ADM_GET_PARAMETER_LENGTH * copp_idx;
- if ((payload[0] == 0) && (data->payload_size >
- (4 * sizeof(*payload))) &&
- (data->payload_size - 4 >=
- payload[3]) &&
- (ARRAY_SIZE(adm_get_parameters) >
- idx) &&
- (ARRAY_SIZE(adm_get_parameters)-idx-1 >=
- payload[3])) {
- adm_get_parameters[idx] = payload[3] /
- sizeof(uint32_t);
- /*
- * payload[3] is param_size which is
- * expressed in number of bytes
- */
- pr_debug("%s: GET_PP PARAM:received parameter length: 0x%x\n",
- __func__, adm_get_parameters[idx]);
- /* storing param size then params */
- for (i = 0; i < payload[3] /
- sizeof(uint32_t); i++)
- adm_get_parameters[idx+1+i] =
- payload[4+i];
- } else if (payload[0] == 0) {
+ if (payload[0] == 0 && data->payload_size > 0) {
+ pr_debug("%s: Received parameter data in band\n",
+ __func__);
+ ret = adm_process_get_param_response(
+ data->opcode, idx, payload,
+ data->payload_size);
+ if (ret)
+ pr_err("%s: Failed to process get param response, error %d\n",
+ __func__, ret);
+ } else if (payload[0] == 0 && data->payload_size == 0) {
adm_get_parameters[idx] = -1;
- pr_err("%s: Out of band case, setting size to %d\n",
+ pr_debug("%s: Out of band case, setting size to %d\n",
__func__, adm_get_parameters[idx]);
} else {
adm_get_parameters[idx] = -1;
- pr_err("%s: GET_PP_PARAMS failed, setting size to %d\n",
- __func__, adm_get_parameters[idx]);
+ pr_err("%s: ADM_CMDRSP_GET_PP_PARAMS returned error 0x%x\n",
+ __func__, payload[0]);
}
- atomic_set(&this_adm.copp.stat
- [port_idx][copp_idx], payload[0]);
+ atomic_set(&this_adm.copp.stat[port_idx][copp_idx],
+ payload[0]);
wake_up(&this_adm.copp.wait[port_idx][copp_idx]);
break;
case ADM_CMDRSP_GET_PP_TOPO_MODULE_LIST:
+ case ADM_CMDRSP_GET_PP_TOPO_MODULE_LIST_V2:
pr_debug("%s: ADM_CMDRSP_GET_PP_TOPO_MODULE_LIST\n",
__func__);
- if (payload[0] != 0) {
- pr_err("%s: ADM_CMDRSP_GET_PP_TOPO_MODULE_LIST",
- __func__);
- pr_err(":err = 0x%x\n", payload[0]);
- } else if (payload[1] >
- ((ADM_GET_TOPO_MODULE_LIST_LENGTH /
- sizeof(uint32_t)) - 1)) {
- pr_err("%s: ADM_CMDRSP_GET_PP_TOPO_MODULE_LIST",
- __func__);
- pr_err(":size = %d\n", payload[1]);
+ num_modules = payload[1];
+ pr_debug("%s: Num modules %d\n", __func__, num_modules);
+ if (payload[0]) {
+ pr_err("%s: ADM_CMDRSP_GET_PP_TOPO_MODULE_LIST, error = %d\n",
+ __func__, payload[0]);
+ } else if (num_modules > MAX_MODULES_IN_TOPO) {
+ pr_err("%s: ADM_CMDRSP_GET_PP_TOPO_MODULE_LIST invalid num modules received, num modules = %d\n",
+ __func__, num_modules);
} else {
- idx = ADM_GET_TOPO_MODULE_LIST_LENGTH *
- copp_idx;
- pr_debug("%s:Num modules payload[1] %d\n",
- __func__, payload[1]);
- adm_module_topo_list[idx] = payload[1];
- for (i = 1; i <= payload[1]; i++) {
- adm_module_topo_list[idx+i] =
- payload[1+i];
- pr_debug("%s:payload[%d] = %x\n",
- __func__, (i+1), payload[1+i]);
- }
+ ret = adm_process_get_topo_list_response(
+ data->opcode, copp_idx, num_modules,
+ payload, data->payload_size);
+ if (ret)
+ pr_err("%s: Failed to process get topo modules list response, error %d\n",
+ __func__, ret);
}
- atomic_set(&this_adm.copp.stat
- [port_idx][copp_idx], payload[0]);
+ atomic_set(&this_adm.copp.stat[port_idx][copp_idx],
+ payload[0]);
wake_up(&this_adm.copp.wait[port_idx][copp_idx]);
break;
case ADM_CMDRSP_SHARED_MEM_MAP_REGIONS:
@@ -1882,21 +1821,16 @@ done:
}
static int send_adm_cal_block(int port_id, int copp_idx,
- struct cal_block_data *cal_block, int perf_mode,
- int app_type, int acdb_id, int sample_rate)
+ struct cal_block_data *cal_block, int perf_mode)
{
- s32 result = 0;
- struct adm_cmd_set_pp_params_v5 adm_params;
- int port_idx;
+ struct mem_mapping_hdr mem_hdr = {0};
+ int payload_size = 0;
+ int port_idx = 0;
+ int topology;
+ int result = 0;
+
+ pr_debug("%s: Port id 0x%x,\n", __func__, port_id);
- pr_debug("%s: Port id 0x%x sample_rate %d ,\n", __func__,
- port_id, sample_rate);
- port_id = afe_convert_virtual_to_portid(port_id);
- port_idx = adm_validate_and_get_port_index(port_id);
- if (port_idx < 0) {
- pr_err("%s: Invalid port_id 0x%x\n", __func__, port_id);
- return -EINVAL;
- }
if (!cal_block) {
pr_debug("%s: No ADM cal to send for port_id = 0x%x!\n",
__func__, port_id);
@@ -1904,75 +1838,38 @@ static int send_adm_cal_block(int port_id, int copp_idx,
goto done;
}
if (cal_block->cal_data.size <= 0) {
- pr_debug("%s: No ADM cal send for port_id = 0x%x!\n",
- __func__, port_id);
+ pr_debug("%s: No ADM cal sent for port_id = 0x%x!\n", __func__,
+ port_id);
result = -EINVAL;
goto done;
}
+ port_id = afe_convert_virtual_to_portid(port_id);
+ port_idx = adm_validate_and_get_port_index(port_id);
+ if (port_idx < 0 || port_idx >= AFE_MAX_PORTS) {
+ pr_err("%s: Invalid port_id 0x%x\n", __func__, port_id);
+ return -EINVAL;
+ } else if (copp_idx < 0 || copp_idx >= MAX_COPPS_PER_PORT) {
+ pr_err("%s: Invalid copp_idx 0x%x\n", __func__, copp_idx);
+ return -EINVAL;
+ }
+
+ topology = atomic_read(&this_adm.copp.topology[port_idx][copp_idx]);
if (perf_mode == LEGACY_PCM_MODE &&
- ((atomic_read(&this_adm.copp.topology[port_idx][copp_idx])) ==
- DS2_ADM_COPP_TOPOLOGY_ID)) {
+ topology == DS2_ADM_COPP_TOPOLOGY_ID) {
pr_err("%s: perf_mode %d, topology 0x%x\n", __func__, perf_mode,
- atomic_read(
- &this_adm.copp.topology[port_idx][copp_idx]));
+ topology);
goto done;
}
- adm_params.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
- APR_HDR_LEN(20), APR_PKT_VER);
- adm_params.hdr.pkt_size = sizeof(adm_params);
- adm_params.hdr.src_svc = APR_SVC_ADM;
- adm_params.hdr.src_domain = APR_DOMAIN_APPS;
- adm_params.hdr.src_port = port_id;
- adm_params.hdr.dest_svc = APR_SVC_ADM;
- adm_params.hdr.dest_domain = APR_DOMAIN_ADSP;
-
- adm_params.hdr.token = port_idx << 16 | copp_idx;
- adm_params.hdr.dest_port =
- atomic_read(&this_adm.copp.id[port_idx][copp_idx]);
- adm_params.hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5;
- adm_params.payload_addr_lsw = lower_32_bits(cal_block->cal_data.paddr);
- adm_params.payload_addr_msw = msm_audio_populate_upper_32_bits(
- cal_block->cal_data.paddr);
- adm_params.mem_map_handle = cal_block->map_data.q6map_handle;
- adm_params.payload_size = cal_block->cal_data.size;
+ mem_hdr.data_payload_addr_lsw =
+ lower_32_bits(cal_block->cal_data.paddr);
+ mem_hdr.data_payload_addr_msw =
+ msm_audio_populate_upper_32_bits(cal_block->cal_data.paddr);
+ mem_hdr.mem_map_handle = cal_block->map_data.q6map_handle;
+ payload_size = cal_block->cal_data.size;
- atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1);
- pr_debug("%s: Sending SET_PARAMS payload = 0x%pK, size = %d\n",
- __func__, &cal_block->cal_data.paddr,
- adm_params.payload_size);
- result = apr_send_pkt(this_adm.apr, (uint32_t *)&adm_params);
- if (result < 0) {
- pr_err("%s: Set params failed port 0x%x result %d\n",
- __func__, port_id, result);
- pr_debug("%s: Set params failed port = 0x%x payload = 0x%pK result %d\n",
- __func__, port_id, &cal_block->cal_data.paddr, result);
- result = -EINVAL;
- goto done;
- }
- /* Wait for the callback */
- result = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
- atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) >= 0,
- msecs_to_jiffies(TIMEOUT_MS));
- if (!result) {
- pr_err("%s: Set params timed out port = 0x%x\n",
- __func__, port_id);
- pr_debug("%s: Set params timed out port = 0x%x, payload = 0x%pK\n",
- __func__, port_id, &cal_block->cal_data.paddr);
- result = -EINVAL;
- goto done;
- } else if (atomic_read(&this_adm.copp.stat
- [port_idx][copp_idx]) > 0) {
- pr_err("%s: DSP returned error[%s]\n",
- __func__, adsp_err_get_err_str(
- atomic_read(&this_adm.copp.stat
- [port_idx][copp_idx])));
- result = adsp_err_get_lnx_err_code(
- atomic_read(&this_adm.copp.stat
- [port_idx][copp_idx]));
- goto done;
- }
+ adm_set_pp_params(port_id, copp_idx, &mem_hdr, NULL, payload_size);
done:
return result;
@@ -2089,8 +1986,7 @@ static int adm_remap_and_send_cal_block(int cal_index, int port_id,
__func__, cal_index);
goto done;
}
- ret = send_adm_cal_block(port_id, copp_idx, cal_block, perf_mode,
- app_type, acdb_id, sample_rate);
+ ret = send_adm_cal_block(port_id, copp_idx, cal_block, perf_mode);
if (ret < 0)
pr_debug("%s: No cal sent for cal_index %d, port_id = 0x%x! ret %d sample_rate %d\n",
__func__, cal_index, port_id, ret, sample_rate);
@@ -2600,10 +2496,10 @@ int adm_open(int port_id, int path, int rate, int channel_mode, int topology,
void adm_copp_mfc_cfg(int port_id, int copp_idx, int dst_sample_rate)
{
- struct audproc_mfc_output_media_fmt mfc_cfg;
+ struct audproc_mfc_param_media_fmt mfc_cfg = {0};
struct adm_cmd_device_open_v5 open;
+ struct param_hdr_v3 param_hdr = {0};
int port_idx;
- int sz = 0;
int rc = 0;
int i = 0;
@@ -2620,32 +2516,13 @@ void adm_copp_mfc_cfg(int port_id, int copp_idx, int dst_sample_rate)
goto fail_cmd;
}
- sz = sizeof(struct audproc_mfc_output_media_fmt);
+ memset(&open, 0, sizeof(open));
+
+ param_hdr.module_id = AUDPROC_MODULE_ID_MFC;
+ param_hdr.instance_id = INSTANCE_ID_0;
+ param_hdr.param_id = AUDPROC_PARAM_ID_MFC_OUTPUT_MEDIA_FORMAT;
+ param_hdr.param_size = sizeof(mfc_cfg);
- mfc_cfg.params.hdr.hdr_field =
- APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
- APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
- mfc_cfg.params.hdr.pkt_size = sz;
- mfc_cfg.params.hdr.src_svc = APR_SVC_ADM;
- mfc_cfg.params.hdr.src_domain = APR_DOMAIN_APPS;
- mfc_cfg.params.hdr.src_port = port_id;
- mfc_cfg.params.hdr.dest_svc = APR_SVC_ADM;
- mfc_cfg.params.hdr.dest_domain = APR_DOMAIN_ADSP;
- mfc_cfg.params.hdr.dest_port =
- atomic_read(&this_adm.copp.id[port_idx][copp_idx]);
- mfc_cfg.params.hdr.token = port_idx << 16 | copp_idx;
- mfc_cfg.params.hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5;
- mfc_cfg.params.payload_addr_lsw = 0;
- mfc_cfg.params.payload_addr_msw = 0;
- mfc_cfg.params.mem_map_handle = 0;
- mfc_cfg.params.payload_size = sizeof(mfc_cfg) -
- sizeof(mfc_cfg.params);
- mfc_cfg.data.module_id = AUDPROC_MODULE_ID_MFC;
- mfc_cfg.data.param_id =
- AUDPROC_PARAM_ID_MFC_OUTPUT_MEDIA_FORMAT;
- mfc_cfg.data.param_size = mfc_cfg.params.payload_size -
- sizeof(mfc_cfg.data);
- mfc_cfg.data.reserved = 0;
mfc_cfg.sampling_rate = dst_sample_rate;
mfc_cfg.bits_per_sample =
atomic_read(&this_adm.copp.bit_width[port_idx][copp_idx]);
@@ -2671,31 +2548,12 @@ void adm_copp_mfc_cfg(int port_id, int copp_idx, int dst_sample_rate)
mfc_cfg.bits_per_sample, mfc_cfg.num_channels,
mfc_cfg.sampling_rate);
- rc = apr_send_pkt(this_adm.apr, (uint32_t *)&mfc_cfg);
+ rc = adm_pack_and_set_one_pp_param(port_id, copp_idx, param_hdr,
+ (uint8_t *) &mfc_cfg);
+ if (rc)
+ pr_err("%s: Failed to set media format configuration data, err %d\n",
+ __func__, rc);
- if (rc < 0) {
- pr_err("%s: port_id: for[0x%x] failed %d\n",
- __func__, port_id, rc);
- goto fail_cmd;
- }
- /* Wait for the callback with copp id */
- rc = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
- atomic_read(&this_adm.copp.stat
- [port_idx][copp_idx]) >= 0,
- msecs_to_jiffies(TIMEOUT_MS));
- if (!rc) {
- pr_err("%s: mfc_cfg Set params timed out for port_id: for [0x%x]\n",
- __func__, port_id);
- goto fail_cmd;
- } else if (atomic_read(&this_adm.copp.stat
- [port_idx][copp_idx]) > 0) {
- pr_err("%s: DSP returned error[%s]\n",
- __func__, adsp_err_get_err_str(
- atomic_read(&this_adm.copp.stat
- [port_idx][copp_idx])));
- goto fail_cmd;
- }
- rc = 0;
fail_cmd:
return;
}
@@ -3545,134 +3403,43 @@ err:
int adm_set_volume(int port_id, int copp_idx, int volume)
{
- struct audproc_volume_ctrl_master_gain audproc_vol;
- int sz = 0;
+ struct audproc_volume_ctrl_master_gain audproc_vol = {0};
+ struct param_hdr_v3 param_hdr = {0};
int rc = 0;
- int port_idx;
pr_debug("%s: port_id %d, volume %d\n", __func__, port_id, volume);
- port_id = afe_convert_virtual_to_portid(port_id);
- port_idx = adm_validate_and_get_port_index(port_id);
- if (port_idx < 0) {
- pr_err("%s: Invalid port_id %#x\n", __func__, port_id);
- rc = -EINVAL;
- goto fail_cmd;
- }
- if (copp_idx < 0 || copp_idx >= MAX_COPPS_PER_PORT) {
- pr_err("%s: Invalid copp_num: %d\n", __func__, copp_idx);
- return -EINVAL;
- }
+ param_hdr.module_id = AUDPROC_MODULE_ID_VOL_CTRL;
+ param_hdr.instance_id = INSTANCE_ID_0;
+ param_hdr.param_id = AUDPROC_PARAM_ID_VOL_CTRL_MASTER_GAIN;
+ param_hdr.param_size = sizeof(audproc_vol);
- sz = sizeof(struct audproc_volume_ctrl_master_gain);
- audproc_vol.params.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
- APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
- audproc_vol.params.hdr.pkt_size = sz;
- audproc_vol.params.hdr.src_svc = APR_SVC_ADM;
- audproc_vol.params.hdr.src_domain = APR_DOMAIN_APPS;
- audproc_vol.params.hdr.src_port = port_id;
- audproc_vol.params.hdr.dest_svc = APR_SVC_ADM;
- audproc_vol.params.hdr.dest_domain = APR_DOMAIN_ADSP;
- audproc_vol.params.hdr.dest_port =
- atomic_read(&this_adm.copp.id[port_idx][copp_idx]);
- audproc_vol.params.hdr.token = port_idx << 16 | copp_idx;
- audproc_vol.params.hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5;
- audproc_vol.params.payload_addr_lsw = 0;
- audproc_vol.params.payload_addr_msw = 0;
- audproc_vol.params.mem_map_handle = 0;
- audproc_vol.params.payload_size = sizeof(audproc_vol) -
- sizeof(audproc_vol.params);
- audproc_vol.data.module_id = AUDPROC_MODULE_ID_VOL_CTRL;
- audproc_vol.data.param_id = AUDPROC_PARAM_ID_VOL_CTRL_MASTER_GAIN;
- audproc_vol.data.param_size = audproc_vol.params.payload_size -
- sizeof(audproc_vol.data);
- audproc_vol.data.reserved = 0;
audproc_vol.master_gain = volume;
- atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1);
- rc = apr_send_pkt(this_adm.apr, (uint32_t *)&audproc_vol);
- if (rc < 0) {
- pr_err("%s: Set params failed port = %#x\n",
- __func__, port_id);
- rc = -EINVAL;
- goto fail_cmd;
- }
- /* Wait for the callback */
- rc = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
- atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) >= 0,
- msecs_to_jiffies(TIMEOUT_MS));
- if (!rc) {
- pr_err("%s: Vol cntrl Set params timed out port = %#x\n",
- __func__, port_id);
- rc = -EINVAL;
- goto fail_cmd;
- } else if (atomic_read(&this_adm.copp.stat
- [port_idx][copp_idx]) > 0) {
- pr_err("%s: DSP returned error[%s]\n",
- __func__, adsp_err_get_err_str(
- atomic_read(&this_adm.copp.stat
- [port_idx][copp_idx])));
- rc = adsp_err_get_lnx_err_code(
- atomic_read(&this_adm.copp.stat
- [port_idx][copp_idx]));
- goto fail_cmd;
- }
- rc = 0;
-fail_cmd:
+ rc = adm_pack_and_set_one_pp_param(port_id, copp_idx, param_hdr,
+ (uint8_t *) &audproc_vol);
+ if (rc)
+ pr_err("%s: Failed to set volume, err %d\n", __func__, rc);
+
return rc;
}
int adm_set_softvolume(int port_id, int copp_idx,
struct audproc_softvolume_params *softvol_param)
{
- struct audproc_soft_step_volume_params audproc_softvol;
- int sz = 0;
+ struct audproc_soft_step_volume_params audproc_softvol = {0};
+ struct param_hdr_v3 param_hdr = {0};
int rc = 0;
- int port_idx;
pr_debug("%s: period %d step %d curve %d\n", __func__,
softvol_param->period, softvol_param->step,
softvol_param->rampingcurve);
- port_id = afe_convert_virtual_to_portid(port_id);
- port_idx = adm_validate_and_get_port_index(port_id);
- if (port_idx < 0) {
- pr_err("%s: Invalid port_id %#x\n", __func__, port_id);
- rc = -EINVAL;
- goto fail_cmd;
- }
+ param_hdr.module_id = AUDPROC_MODULE_ID_VOL_CTRL;
+ param_hdr.instance_id = INSTANCE_ID_0;
+ param_hdr.param_id = AUDPROC_PARAM_ID_SOFT_VOL_STEPPING_PARAMETERS;
+ param_hdr.param_size = sizeof(audproc_softvol);
- if (copp_idx < 0 || copp_idx >= MAX_COPPS_PER_PORT) {
- pr_err("%s: Invalid copp_num: %d\n", __func__, copp_idx);
- return -EINVAL;
- }
-
- sz = sizeof(struct audproc_soft_step_volume_params);
-
- audproc_softvol.params.hdr.hdr_field =
- APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
- APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
- audproc_softvol.params.hdr.pkt_size = sz;
- audproc_softvol.params.hdr.src_svc = APR_SVC_ADM;
- audproc_softvol.params.hdr.src_domain = APR_DOMAIN_APPS;
- audproc_softvol.params.hdr.src_port = port_id;
- audproc_softvol.params.hdr.dest_svc = APR_SVC_ADM;
- audproc_softvol.params.hdr.dest_domain = APR_DOMAIN_ADSP;
- audproc_softvol.params.hdr.dest_port =
- atomic_read(&this_adm.copp.id[port_idx][copp_idx]);
- audproc_softvol.params.hdr.token = port_idx << 16 | copp_idx;
- audproc_softvol.params.hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5;
- audproc_softvol.params.payload_addr_lsw = 0;
- audproc_softvol.params.payload_addr_msw = 0;
- audproc_softvol.params.mem_map_handle = 0;
- audproc_softvol.params.payload_size = sizeof(audproc_softvol) -
- sizeof(audproc_softvol.params);
- audproc_softvol.data.module_id = AUDPROC_MODULE_ID_VOL_CTRL;
- audproc_softvol.data.param_id =
- AUDPROC_PARAM_ID_SOFT_VOL_STEPPING_PARAMETERS;
- audproc_softvol.data.param_size = audproc_softvol.params.payload_size -
- sizeof(audproc_softvol.data);
- audproc_softvol.data.reserved = 0;
audproc_softvol.period = softvol_param->period;
audproc_softvol.step = softvol_param->step;
audproc_softvol.ramping_curve = softvol_param->rampingcurve;
@@ -3681,315 +3448,122 @@ int adm_set_softvolume(int port_id, int copp_idx,
audproc_softvol.period, audproc_softvol.step,
audproc_softvol.ramping_curve);
- atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1);
- rc = apr_send_pkt(this_adm.apr, (uint32_t *)&audproc_softvol);
- if (rc < 0) {
- pr_err("%s: Set params failed port = %#x\n",
- __func__, port_id);
- rc = -EINVAL;
- goto fail_cmd;
- }
- /* Wait for the callback */
- rc = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
- atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) >= 0,
- msecs_to_jiffies(TIMEOUT_MS));
- if (!rc) {
- pr_err("%s: Soft volume Set params timed out port = %#x\n",
- __func__, port_id);
- rc = -EINVAL;
- goto fail_cmd;
- } else if (atomic_read(&this_adm.copp.stat
- [port_idx][copp_idx]) > 0) {
- pr_err("%s: DSP returned error[%s]\n",
- __func__, adsp_err_get_err_str(
- atomic_read(&this_adm.copp.stat
- [port_idx][copp_idx])));
- rc = adsp_err_get_lnx_err_code(
- atomic_read(&this_adm.copp.stat
- [port_idx][copp_idx]));
- goto fail_cmd;
- }
- rc = 0;
-fail_cmd:
+ rc = adm_pack_and_set_one_pp_param(port_id, copp_idx, param_hdr,
+ (uint8_t *) &audproc_softvol);
+ if (rc)
+ pr_err("%s: Failed to set soft volume, err %d\n", __func__, rc);
+
return rc;
}
int adm_set_mic_gain(int port_id, int copp_idx, int volume)
{
- struct adm_set_mic_gain_params mic_gain_params;
+ struct admx_mic_gain mic_gain_params = {0};
+ struct param_hdr_v3 param_hdr = {0};
int rc = 0;
- int sz, port_idx;
- pr_debug("%s:\n", __func__);
- port_id = afe_convert_virtual_to_portid(port_id);
- port_idx = adm_validate_and_get_port_index(port_id);
- if (port_idx < 0) {
- pr_err("%s: Invalid port_id 0x%x\n", __func__, port_id);
- return -EINVAL;
- }
+ pr_debug("%s: Setting mic gain to %d at port_id 0x%x\n", __func__,
+ volume, port_id);
- sz = sizeof(struct adm_set_mic_gain_params);
+ param_hdr.module_id = ADM_MODULE_IDX_MIC_GAIN_CTRL;
+ param_hdr.instance_id = INSTANCE_ID_0;
+ param_hdr.param_id = ADM_PARAM_IDX_MIC_GAIN;
+ param_hdr.param_size = sizeof(mic_gain_params);
- mic_gain_params.params.hdr.hdr_field =
- APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
- APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
- mic_gain_params.params.hdr.pkt_size = sz;
- mic_gain_params.params.hdr.src_svc = APR_SVC_ADM;
- mic_gain_params.params.hdr.src_domain = APR_DOMAIN_APPS;
- mic_gain_params.params.hdr.src_port = port_id;
- mic_gain_params.params.hdr.dest_svc = APR_SVC_ADM;
- mic_gain_params.params.hdr.dest_domain = APR_DOMAIN_ADSP;
- mic_gain_params.params.hdr.dest_port =
- atomic_read(&this_adm.copp.id[port_idx][copp_idx]);
- mic_gain_params.params.hdr.token = port_idx << 16 | copp_idx;
- mic_gain_params.params.hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5;
- mic_gain_params.params.payload_addr_lsw = 0;
- mic_gain_params.params.payload_addr_msw = 0;
- mic_gain_params.params.mem_map_handle = 0;
- mic_gain_params.params.payload_size =
- sizeof(struct adm_param_data_v5) +
- sizeof(struct admx_mic_gain);
- mic_gain_params.data.module_id = ADM_MODULE_IDX_MIC_GAIN_CTRL;
- mic_gain_params.data.param_id = ADM_PARAM_IDX_MIC_GAIN;
- mic_gain_params.data.param_size =
- sizeof(struct admx_mic_gain);
- mic_gain_params.data.reserved = 0;
- mic_gain_params.mic_gain_data.tx_mic_gain = volume;
- mic_gain_params.mic_gain_data.reserved = 0;
- pr_debug("%s: Mic Gain set to %d at port_id 0x%x\n",
- __func__, volume, port_id);
+ mic_gain_params.tx_mic_gain = volume;
+
+ rc = adm_pack_and_set_one_pp_param(port_id, copp_idx, param_hdr,
+ (uint8_t *) &mic_gain_params);
+ if (rc)
+ pr_err("%s: Failed to set mic gain, err %d\n", __func__, rc);
- atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1);
- rc = apr_send_pkt(this_adm.apr, (uint32_t *)&mic_gain_params);
- if (rc < 0) {
- pr_err("%s: Set params failed port = %#x\n",
- __func__, port_id);
- rc = -EINVAL;
- goto fail_cmd;
- }
- /* Wait for the callback */
- rc = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
- atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) >= 0,
- msecs_to_jiffies(TIMEOUT_MS));
- if (!rc) {
- pr_err("%s: Mic Gain Set params timed out port = %#x\n",
- __func__, port_id);
- rc = -EINVAL;
- goto fail_cmd;
- } else if (atomic_read(&this_adm.copp.stat
- [port_idx][copp_idx]) > 0) {
- pr_err("%s: DSP returned error[%s]\n",
- __func__, adsp_err_get_err_str(
- atomic_read(&this_adm.copp.stat
- [port_idx][copp_idx])));
- rc = adsp_err_get_lnx_err_code(
- atomic_read(&this_adm.copp.stat
- [port_idx][copp_idx]));
- goto fail_cmd;
- }
- rc = 0;
-fail_cmd:
return rc;
}
int adm_send_set_multichannel_ec_primary_mic_ch(int port_id, int copp_idx,
int primary_mic_ch)
{
- struct adm_set_sec_primary_ch_params sec_primary_ch_params;
+ struct admx_sec_primary_mic_ch sec_primary_ch_params = {0};
+ struct param_hdr_v3 param_hdr = {0};
int rc = 0;
- int sz, port_idx;
pr_debug("%s port_id 0x%x, copp_idx 0x%x, primary_mic_ch %d\n",
__func__, port_id, copp_idx, primary_mic_ch);
- port_id = afe_convert_virtual_to_portid(port_id);
- port_idx = adm_validate_and_get_port_index(port_id);
- if (port_idx < 0) {
- pr_err("%s: Invalid port_id 0x%x\n", __func__, port_id);
- return -EINVAL;
- }
- if (copp_idx < 0 || copp_idx >= MAX_COPPS_PER_PORT) {
- pr_err("%s: Invalid copp_idx 0x%x\n", __func__, copp_idx);
- return -EINVAL;
- }
+ param_hdr.module_id = AUDPROC_MODULE_ID_VOICE_TX_SECNS;
+ param_hdr.instance_id = INSTANCE_ID_0;
+ param_hdr.param_id = AUDPROC_PARAM_IDX_SEC_PRIMARY_MIC_CH;
+ param_hdr.param_size = sizeof(sec_primary_ch_params);
- sz = sizeof(struct adm_set_sec_primary_ch_params);
+ sec_primary_ch_params.version = 0;
+ sec_primary_ch_params.sec_primary_mic_ch = primary_mic_ch;
- sec_primary_ch_params.params.hdr.hdr_field =
- APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
- APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
- sec_primary_ch_params.params.hdr.pkt_size = sz;
- sec_primary_ch_params.params.hdr.src_svc = APR_SVC_ADM;
- sec_primary_ch_params.params.hdr.src_domain = APR_DOMAIN_APPS;
- sec_primary_ch_params.params.hdr.src_port = port_id;
- sec_primary_ch_params.params.hdr.dest_svc = APR_SVC_ADM;
- sec_primary_ch_params.params.hdr.dest_domain = APR_DOMAIN_ADSP;
- sec_primary_ch_params.params.hdr.dest_port =
- atomic_read(&this_adm.copp.id[port_idx][copp_idx]);
- sec_primary_ch_params.params.hdr.token = port_idx << 16 | copp_idx;
- sec_primary_ch_params.params.hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5;
- sec_primary_ch_params.params.payload_addr_lsw = 0;
- sec_primary_ch_params.params.payload_addr_msw = 0;
- sec_primary_ch_params.params.mem_map_handle = 0;
- sec_primary_ch_params.params.payload_size =
- sizeof(struct adm_param_data_v5) +
- sizeof(struct admx_sec_primary_mic_ch);
- sec_primary_ch_params.data.module_id =
- AUDPROC_MODULE_ID_VOICE_TX_SECNS;
- sec_primary_ch_params.data.param_id =
- AUDPROC_PARAM_IDX_SEC_PRIMARY_MIC_CH;
- sec_primary_ch_params.data.param_size =
- sizeof(struct admx_sec_primary_mic_ch);
- sec_primary_ch_params.data.reserved = 0;
- sec_primary_ch_params.sec_primary_mic_ch_data.version = 0;
- sec_primary_ch_params.sec_primary_mic_ch_data.reserved = 0;
- sec_primary_ch_params.sec_primary_mic_ch_data.sec_primary_mic_ch =
- primary_mic_ch;
- sec_primary_ch_params.sec_primary_mic_ch_data.reserved1 = 0;
+ rc = adm_pack_and_set_one_pp_param(port_id, copp_idx, param_hdr,
+ (uint8_t *) &sec_primary_ch_params);
+ if (rc)
+ pr_err("%s: Failed to set primary mic chanel, err %d\n",
+ __func__, rc);
- atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1);
- rc = apr_send_pkt(this_adm.apr, (uint32_t *)&sec_primary_ch_params);
- if (rc < 0) {
- pr_err("%s: Set params failed port = %#x\n",
- __func__, port_id);
- rc = -EINVAL;
- goto fail_cmd;
- }
- /* Wait for the callback */
- rc = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
- atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) >= 0,
- msecs_to_jiffies(TIMEOUT_MS));
- if (!rc) {
- pr_err("%s: Mic Set params timed out port = %#x\n",
- __func__, port_id);
- rc = -EINVAL;
- goto fail_cmd;
- } else if (atomic_read(&this_adm.copp.stat
- [port_idx][copp_idx]) > 0) {
- pr_err("%s: DSP returned error[%s]\n",
- __func__, adsp_err_get_err_str(
- atomic_read(&this_adm.copp.stat
- [port_idx][copp_idx])));
- rc = adsp_err_get_lnx_err_code(
- atomic_read(&this_adm.copp.stat
- [port_idx][copp_idx]));
- goto fail_cmd;
- }
- rc = 0;
-fail_cmd:
return rc;
}
-
-int adm_param_enable(int port_id, int copp_idx, int module_id, int enable)
+int adm_param_enable(int port_id, int copp_idx, int module_id, int enable)
{
- struct audproc_enable_param_t adm_mod_enable;
- int sz = 0;
- int rc = 0;
- int port_idx;
+ struct module_instance_info mod_inst_info = {0};
- pr_debug("%s port_id %d, module_id 0x%x, enable %d\n",
- __func__, port_id, module_id, enable);
- port_id = afe_convert_virtual_to_portid(port_id);
- port_idx = adm_validate_and_get_port_index(port_id);
- if (port_idx < 0) {
- pr_err("%s: Invalid port_id %#x\n", __func__, port_id);
- rc = -EINVAL;
- goto fail_cmd;
- }
+ mod_inst_info.module_id = module_id;
+ mod_inst_info.instance_id = INSTANCE_ID_0;
- if (copp_idx < 0 || copp_idx >= MAX_COPPS_PER_PORT) {
- pr_err("%s: Invalid copp_num: %d\n", __func__, copp_idx);
+ return adm_param_enable_v2(port_id, copp_idx, mod_inst_info, enable);
+}
+
+int adm_param_enable_v2(int port_id, int copp_idx,
+ struct module_instance_info mod_inst_info, int enable)
+{
+ uint32_t enable_param;
+ struct param_hdr_v3 param_hdr = {0};
+ int rc = 0;
+
+ if (enable < 0 || enable > 1) {
+ pr_err("%s: Invalid value for enable %d\n", __func__, enable);
return -EINVAL;
}
- sz = sizeof(struct audproc_enable_param_t);
+ pr_debug("%s port_id %d, module_id 0x%x, instance_id 0x%x, enable %d\n",
+ __func__, port_id, mod_inst_info.module_id,
+ mod_inst_info.instance_id, enable);
- adm_mod_enable.pp_params.hdr.hdr_field =
- APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
- APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
- adm_mod_enable.pp_params.hdr.pkt_size = sz;
- adm_mod_enable.pp_params.hdr.src_svc = APR_SVC_ADM;
- adm_mod_enable.pp_params.hdr.src_domain = APR_DOMAIN_APPS;
- adm_mod_enable.pp_params.hdr.src_port = port_id;
- adm_mod_enable.pp_params.hdr.dest_svc = APR_SVC_ADM;
- adm_mod_enable.pp_params.hdr.dest_domain = APR_DOMAIN_ADSP;
- adm_mod_enable.pp_params.hdr.dest_port =
- atomic_read(&this_adm.copp.id[port_idx][copp_idx]);
- adm_mod_enable.pp_params.hdr.token = port_idx << 16 | copp_idx;
- adm_mod_enable.pp_params.hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5;
- adm_mod_enable.pp_params.payload_addr_lsw = 0;
- adm_mod_enable.pp_params.payload_addr_msw = 0;
- adm_mod_enable.pp_params.mem_map_handle = 0;
- adm_mod_enable.pp_params.payload_size = sizeof(adm_mod_enable) -
- sizeof(adm_mod_enable.pp_params) +
- sizeof(adm_mod_enable.pp_params.params);
- adm_mod_enable.pp_params.params.module_id = module_id;
- adm_mod_enable.pp_params.params.param_id = AUDPROC_PARAM_ID_ENABLE;
- adm_mod_enable.pp_params.params.param_size =
- adm_mod_enable.pp_params.payload_size -
- sizeof(adm_mod_enable.pp_params.params);
- adm_mod_enable.pp_params.params.reserved = 0;
- adm_mod_enable.enable = enable;
+ param_hdr.module_id = mod_inst_info.module_id;
+ param_hdr.instance_id = mod_inst_info.instance_id;
+ param_hdr.param_id = AUDPROC_PARAM_ID_ENABLE;
+ param_hdr.param_size = sizeof(enable_param);
- atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1);
+ enable_param = enable;
+
+ rc = adm_pack_and_set_one_pp_param(port_id, copp_idx, param_hdr,
+ (uint8_t *) &enable_param);
+ if (rc)
+ pr_err("%s: Failed to set enable of module(%d) instance(%d) to %d, err %d\n",
+ __func__, mod_inst_info.module_id,
+ mod_inst_info.instance_id, enable, rc);
- rc = apr_send_pkt(this_adm.apr, (uint32_t *)&adm_mod_enable);
- if (rc < 0) {
- pr_err("%s: Set params failed port = %#x\n",
- __func__, port_id);
- rc = -EINVAL;
- goto fail_cmd;
- }
- /* Wait for the callback */
- rc = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
- atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) >= 0,
- msecs_to_jiffies(TIMEOUT_MS));
- if (!rc) {
- pr_err("%s: module %x enable %d timed out on port = %#x\n",
- __func__, module_id, enable, port_id);
- rc = -EINVAL;
- goto fail_cmd;
- } else if (atomic_read(&this_adm.copp.stat
- [port_idx][copp_idx]) > 0) {
- pr_err("%s: DSP returned error[%s]\n",
- __func__, adsp_err_get_err_str(
- atomic_read(&this_adm.copp.stat
- [port_idx][copp_idx])));
- rc = adsp_err_get_lnx_err_code(
- atomic_read(&this_adm.copp.stat
- [port_idx][copp_idx]));
- goto fail_cmd;
- }
- rc = 0;
-fail_cmd:
return rc;
}
+/* Parameter data must be pre-packed at the specified location with its
+ * header before calling this function. Use
+ * q6common_pack_pp_params to pack parameter data and header
+ * correctly.
+ */
int adm_send_calibration(int port_id, int copp_idx, int path, int perf_mode,
int cal_type, char *params, int size)
{
- struct adm_cmd_set_pp_params_v5 *adm_params = NULL;
- int sz, rc = 0;
- int port_idx;
+ int rc = 0;
pr_debug("%s:port_id %d, path %d, perf_mode %d, cal_type %d, size %d\n",
__func__, port_id, path, perf_mode, cal_type, size);
- port_id = afe_convert_virtual_to_portid(port_id);
- port_idx = adm_validate_and_get_port_index(port_id);
- if (port_idx < 0) {
- pr_err("%s: Invalid port_id %#x\n", __func__, port_id);
- rc = -EINVAL;
- goto end;
- }
-
- if (copp_idx < 0 || copp_idx >= MAX_COPPS_PER_PORT) {
- pr_err("%s: Invalid copp_num: %d\n", __func__, copp_idx);
- return -EINVAL;
- }
-
/* Maps audio_dev_ctrl path definition to ACDB definition */
if (get_cal_path(path) != RX_DEVICE) {
pr_err("%s: acdb_path %d\n", __func__, path);
@@ -3997,64 +3571,9 @@ int adm_send_calibration(int port_id, int copp_idx, int path, int perf_mode,
goto end;
}
- sz = sizeof(struct adm_cmd_set_pp_params_v5) + size;
- adm_params = kzalloc(sz, GFP_KERNEL);
- if (!adm_params) {
- pr_err("%s, adm params memory alloc failed", __func__);
- rc = -ENOMEM;
- goto end;
- }
-
- memcpy(((u8 *)adm_params + sizeof(struct adm_cmd_set_pp_params_v5)),
- params, size);
-
- adm_params->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
- APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
- adm_params->hdr.pkt_size = sz;
- adm_params->hdr.src_svc = APR_SVC_ADM;
- adm_params->hdr.src_domain = APR_DOMAIN_APPS;
- adm_params->hdr.src_port = port_id;
- adm_params->hdr.dest_svc = APR_SVC_ADM;
- adm_params->hdr.dest_domain = APR_DOMAIN_ADSP;
- adm_params->hdr.dest_port =
- atomic_read(&this_adm.copp.id[port_idx][copp_idx]);
- adm_params->hdr.token = port_idx << 16 | copp_idx;
- adm_params->hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5;
- /* payload address and mmap handle initialized to zero by kzalloc */
- adm_params->payload_size = size;
-
- atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1);
- rc = apr_send_pkt(this_adm.apr, (uint32_t *)adm_params);
- if (rc < 0) {
- pr_err("%s: Set params failed port = %#x\n",
- __func__, port_id);
- rc = -EINVAL;
- goto end;
- }
- /* Wait for the callback */
- rc = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
- atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) >= 0,
- msecs_to_jiffies(TIMEOUT_MS));
- if (!rc) {
- pr_err("%s: Set params timed out port = %#x\n",
- __func__, port_id);
- rc = -EINVAL;
- goto end;
- } else if (atomic_read(&this_adm.copp.stat
- [port_idx][copp_idx]) > 0) {
- pr_err("%s: DSP returned error[%s]\n",
- __func__, adsp_err_get_err_str(
- atomic_read(&this_adm.copp.stat
- [port_idx][copp_idx])));
- rc = adsp_err_get_lnx_err_code(
- atomic_read(&this_adm.copp.stat
- [port_idx][copp_idx]));
- goto end;
- }
- rc = 0;
+ rc = adm_set_pp_params(port_id, copp_idx, NULL, (u8 *) params, size);
end:
- kfree(adm_params);
return rc;
}
@@ -4236,155 +3755,52 @@ end:
int adm_send_compressed_device_mute(int port_id, int copp_idx, bool mute_on)
{
- struct adm_set_compressed_device_mute mute_params;
+ u32 mute_param = mute_on ? 1 : 0;
+ struct param_hdr_v3 param_hdr = {0};
int ret = 0;
- int port_idx;
pr_debug("%s port_id: 0x%x, copp_idx %d, mute_on: %d\n",
__func__, port_id, copp_idx, mute_on);
- port_id = afe_convert_virtual_to_portid(port_id);
- port_idx = adm_validate_and_get_port_index(port_id);
- if (port_idx < 0 || port_idx >= AFE_MAX_PORTS) {
- pr_err("%s: Invalid port_id %#x copp_idx %d\n",
- __func__, port_id, copp_idx);
- ret = -EINVAL;
- goto end;
- }
- mute_params.command.hdr.hdr_field =
- APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
- APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
- mute_params.command.hdr.pkt_size =
- sizeof(struct adm_set_compressed_device_mute);
- mute_params.command.hdr.src_svc = APR_SVC_ADM;
- mute_params.command.hdr.src_domain = APR_DOMAIN_APPS;
- mute_params.command.hdr.src_port = port_id;
- mute_params.command.hdr.dest_svc = APR_SVC_ADM;
- mute_params.command.hdr.dest_domain = APR_DOMAIN_ADSP;
- mute_params.command.hdr.dest_port =
- atomic_read(&this_adm.copp.id[port_idx][copp_idx]);
- mute_params.command.hdr.token = port_idx << 16 | copp_idx;
- mute_params.command.hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5;
- mute_params.command.payload_addr_lsw = 0;
- mute_params.command.payload_addr_msw = 0;
- mute_params.command.mem_map_handle = 0;
- mute_params.command.payload_size = sizeof(mute_params) -
- sizeof(mute_params.command);
- mute_params.params.module_id = AUDPROC_MODULE_ID_COMPRESSED_MUTE;
- mute_params.params.param_id = AUDPROC_PARAM_ID_COMPRESSED_MUTE;
- mute_params.params.param_size = mute_params.command.payload_size -
- sizeof(mute_params.params);
- mute_params.params.reserved = 0;
- mute_params.mute_on = mute_on;
+ param_hdr.module_id = AUDPROC_MODULE_ID_COMPRESSED_MUTE;
+ param_hdr.instance_id = INSTANCE_ID_0;
+ param_hdr.param_id = AUDPROC_PARAM_ID_COMPRESSED_MUTE;
+ param_hdr.param_size = sizeof(mute_param);
- atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1);
- ret = apr_send_pkt(this_adm.apr, (uint32_t *)&mute_params);
- if (ret < 0) {
- pr_err("%s: device mute for port %d copp %d failed, ret %d\n",
- __func__, port_id, copp_idx, ret);
- ret = -EINVAL;
- goto end;
- }
+ ret = adm_pack_and_set_one_pp_param(port_id, copp_idx, param_hdr,
+ (uint8_t *) &mute_param);
+ if (ret)
+ pr_err("%s: Failed to set mute, err %d\n", __func__, ret);
- /* Wait for the callback */
- ret = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
- atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) >= 0,
- msecs_to_jiffies(TIMEOUT_MS));
- if (!ret) {
- pr_err("%s: send device mute for port %d copp %d failed\n",
- __func__, port_id, copp_idx);
- ret = -EINVAL;
- goto end;
- } else if (atomic_read(&this_adm.copp.stat
- [port_idx][copp_idx]) > 0) {
- pr_err("%s: DSP returned error[%s]\n",
- __func__, adsp_err_get_err_str(
- atomic_read(&this_adm.copp.stat
- [port_idx][copp_idx])));
- ret = adsp_err_get_lnx_err_code(
- atomic_read(&this_adm.copp.stat
- [port_idx][copp_idx]));
- goto end;
- }
- ret = 0;
-end:
return ret;
}
int adm_send_compressed_device_latency(int port_id, int copp_idx, int latency)
{
- struct adm_set_compressed_device_latency latency_params;
- int port_idx;
+ u32 latency_param;
+ struct param_hdr_v3 param_hdr = {0};
int ret = 0;
pr_debug("%s port_id: 0x%x, copp_idx %d latency: %d\n", __func__,
port_id, copp_idx, latency);
- port_id = afe_convert_virtual_to_portid(port_id);
- port_idx = adm_validate_and_get_port_index(port_id);
- if (port_idx < 0 || port_idx >= AFE_MAX_PORTS) {
- pr_err("%s: Invalid port_id %#x copp_idx %d\n",
- __func__, port_id, copp_idx);
- ret = -EINVAL;
- goto end;
+
+ if (latency < 0) {
+ pr_err("%s: Invalid value for latency %d", __func__, latency);
+ return -EINVAL;
}
- latency_params.command.hdr.hdr_field =
- APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
- APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
- latency_params.command.hdr.pkt_size =
- sizeof(struct adm_set_compressed_device_latency);
- latency_params.command.hdr.src_svc = APR_SVC_ADM;
- latency_params.command.hdr.src_domain = APR_DOMAIN_APPS;
- latency_params.command.hdr.src_port = port_id;
- latency_params.command.hdr.dest_svc = APR_SVC_ADM;
- latency_params.command.hdr.dest_domain = APR_DOMAIN_ADSP;
- latency_params.command.hdr.dest_port =
- atomic_read(&this_adm.copp.id[port_idx][copp_idx]);
- latency_params.command.hdr.token = port_idx << 16 | copp_idx;
- latency_params.command.hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5;
- latency_params.command.payload_addr_lsw = 0;
- latency_params.command.payload_addr_msw = 0;
- latency_params.command.mem_map_handle = 0;
- latency_params.command.payload_size = sizeof(latency_params) -
- sizeof(latency_params.command);
- latency_params.params.module_id = AUDPROC_MODULE_ID_COMPRESSED_LATENCY;
- latency_params.params.param_id = AUDPROC_PARAM_ID_COMPRESSED_LATENCY;
- latency_params.params.param_size = latency_params.command.payload_size -
- sizeof(latency_params.params);
- latency_params.params.reserved = 0;
- latency_params.latency = latency;
+ param_hdr.module_id = AUDPROC_MODULE_ID_COMPRESSED_LATENCY;
+ param_hdr.instance_id = INSTANCE_ID_0;
+ param_hdr.param_id = AUDPROC_PARAM_ID_COMPRESSED_LATENCY;
+ param_hdr.param_size = sizeof(latency_param);
- atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1);
- ret = apr_send_pkt(this_adm.apr, (uint32_t *)&latency_params);
- if (ret < 0) {
- pr_err("%s: send device latency err %d for port %d copp %d\n",
- __func__, port_id, copp_idx, ret);
- ret = -EINVAL;
- goto end;
- }
+ latency_param = latency;
+
+ ret = adm_pack_and_set_one_pp_param(port_id, copp_idx, param_hdr,
+ (uint8_t *) &latency_param);
+ if (ret)
+ pr_err("%s: Failed to set latency, err %d\n", __func__, ret);
- /* Wait for the callback */
- ret = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
- atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) >= 0,
- msecs_to_jiffies(TIMEOUT_MS));
- if (!ret) {
- pr_err("%s: send device latency for port %d failed\n", __func__,
- port_id);
- ret = -EINVAL;
- goto end;
- } else if (atomic_read(&this_adm.copp.stat
- [port_idx][copp_idx]) > 0) {
- pr_err("%s: DSP returned error[%s]\n",
- __func__, adsp_err_get_err_str(
- atomic_read(&this_adm.copp.stat
- [port_idx][copp_idx])));
- ret = adsp_err_get_lnx_err_code(
- atomic_read(&this_adm.copp.stat
- [port_idx][copp_idx]));
- goto end;
- }
- ret = 0;
-end:
return ret;
}
@@ -4403,9 +3819,10 @@ end:
int adm_swap_speaker_channels(int port_id, int copp_idx,
int sample_rate, bool spk_swap)
{
- struct audproc_mfc_output_media_fmt mfc_cfg;
+ struct audproc_mfc_param_media_fmt mfc_cfg;
+ struct param_hdr_v3 param_hdr = {0};
uint16_t num_channels;
- int port_idx;
+ int port_idx = 0;
int ret = 0;
pr_debug("%s: Enter, port_id %d, copp_idx %d\n",
@@ -4414,50 +3831,26 @@ int adm_swap_speaker_channels(int port_id, int copp_idx,
port_idx = adm_validate_and_get_port_index(port_id);
if (port_idx < 0 || port_idx >= AFE_MAX_PORTS) {
pr_err("%s: Invalid port_id %#x\n", __func__, port_id);
- ret = -EINVAL;
- goto done;
- }
-
- if (copp_idx < 0 || copp_idx >= MAX_COPPS_PER_PORT) {
- pr_err("%s: Invalid copp_num: %d\n", __func__, copp_idx);
- ret = -EINVAL;
- goto done;
+ return -EINVAL;
+ } else if (copp_idx < 0 || copp_idx >= MAX_COPPS_PER_PORT) {
+ pr_err("%s: Invalid copp_idx 0x%x\n", __func__, copp_idx);
+ return -EINVAL;
}
- num_channels = atomic_read(
- &this_adm.copp.channels[port_idx][copp_idx]);
+ num_channels = atomic_read(&this_adm.copp.channels[port_idx][copp_idx]);
if (num_channels != 2) {
pr_debug("%s: Invalid number of channels: %d\n",
__func__, num_channels);
- ret = -EINVAL;
- goto done;
+ return -EINVAL;
}
memset(&mfc_cfg, 0, sizeof(mfc_cfg));
- mfc_cfg.params.hdr.hdr_field =
- APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
- APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
- mfc_cfg.params.hdr.pkt_size =
- sizeof(mfc_cfg);
- mfc_cfg.params.hdr.src_svc = APR_SVC_ADM;
- mfc_cfg.params.hdr.src_domain = APR_DOMAIN_APPS;
- mfc_cfg.params.hdr.src_port = port_id;
- mfc_cfg.params.hdr.dest_svc = APR_SVC_ADM;
- mfc_cfg.params.hdr.dest_domain = APR_DOMAIN_ADSP;
- mfc_cfg.params.hdr.dest_port =
- atomic_read(&this_adm.copp.id[port_idx][copp_idx]);
- mfc_cfg.params.hdr.token = port_idx << 16 | copp_idx;
- mfc_cfg.params.hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5;
- mfc_cfg.params.payload_addr_lsw = 0;
- mfc_cfg.params.payload_addr_msw = 0;
- mfc_cfg.params.mem_map_handle = 0;
- mfc_cfg.params.payload_size = sizeof(mfc_cfg) -
- sizeof(mfc_cfg.params);
- mfc_cfg.data.module_id = AUDPROC_MODULE_ID_MFC;
- mfc_cfg.data.param_id = AUDPROC_PARAM_ID_MFC_OUTPUT_MEDIA_FORMAT;
- mfc_cfg.data.param_size = mfc_cfg.params.payload_size -
- sizeof(mfc_cfg.data);
- mfc_cfg.data.reserved = 0;
+
+ param_hdr.module_id = AUDPROC_MODULE_ID_MFC;
+ param_hdr.instance_id = INSTANCE_ID_0;
+ param_hdr.param_id = AUDPROC_PARAM_ID_MFC_OUTPUT_MEDIA_FORMAT;
+ param_hdr.param_size = sizeof(mfc_cfg);
+
mfc_cfg.sampling_rate = sample_rate;
mfc_cfg.bits_per_sample =
atomic_read(&this_adm.copp.bit_width[port_idx][copp_idx]);
@@ -4476,153 +3869,56 @@ int adm_swap_speaker_channels(int port_id, int copp_idx,
(uint16_t) PCM_CHANNEL_FR;
}
- atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1);
- pr_debug("%s: mfc config: port_idx %d copp_idx %d copp SR %d copp BW %d copp chan %d\n",
- __func__, port_idx, copp_idx, mfc_cfg.sampling_rate,
- mfc_cfg.bits_per_sample, mfc_cfg.num_channels);
-
- ret = apr_send_pkt(this_adm.apr, (uint32_t *)&mfc_cfg);
+ ret = adm_pack_and_set_one_pp_param(port_id, copp_idx, param_hdr,
+ (u8 *) &mfc_cfg);
if (ret < 0) {
- pr_err("%s: port_id: for[0x%x] failed %d\n",
- __func__, port_id, ret);
- goto done;
- }
- /* Wait for the callback with copp id */
- ret = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
- atomic_read(&this_adm.copp.stat
- [port_idx][copp_idx]) >= 0,
- msecs_to_jiffies(TIMEOUT_MS));
- if (!ret) {
- pr_err("%s: mfc_cfg Set params timed out for port_id: for [0x%x]\n",
- __func__, port_id);
- ret = -ETIMEDOUT;
- goto done;
- }
-
- if (atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) > 0) {
- pr_err("%s: DSP returned error[%s]\n",
- __func__, adsp_err_get_err_str(
- atomic_read(&this_adm.copp.stat
- [port_idx][copp_idx])));
- ret = adsp_err_get_lnx_err_code(
- atomic_read(&this_adm.copp.stat
- [port_idx][copp_idx]));
- goto done;
+ pr_err("%s: Failed to set swap speaker channels on port[0x%x] failed %d\n",
+ __func__, port_id, ret);
+ return ret;
}
pr_debug("%s: mfc_cfg Set params returned success", __func__);
- ret = 0;
-
-done:
- return ret;
+ return 0;
}
EXPORT_SYMBOL(adm_swap_speaker_channels);
int adm_set_sound_focus(int port_id, int copp_idx,
struct sound_focus_param soundFocusData)
{
- struct adm_set_fluence_soundfocus_param soundfocus_params;
- int sz = 0;
+ struct adm_param_fluence_soundfocus_t soundfocus_params;
+ struct param_hdr_v3 param_hdr = {0};
int ret = 0;
- int port_idx;
int i;
pr_debug("%s: Enter, port_id %d, copp_idx %d\n",
__func__, port_id, copp_idx);
- port_id = afe_convert_virtual_to_portid(port_id);
- port_idx = adm_validate_and_get_port_index(port_id);
- if (port_idx < 0) {
- pr_err("%s: Invalid port_id %#x\n", __func__, port_id);
+ param_hdr.module_id = VOICEPROC_MODULE_ID_GENERIC_TX;
+ param_hdr.instance_id = INSTANCE_ID_0;
+ param_hdr.param_id = VOICEPROC_PARAM_ID_FLUENCE_SOUNDFOCUS;
+ param_hdr.param_size = sizeof(soundfocus_params);
- ret = -EINVAL;
- goto done;
- }
-
- if (copp_idx < 0 || copp_idx >= MAX_COPPS_PER_PORT) {
- pr_err("%s: Invalid copp_num: %d\n", __func__, copp_idx);
-
- ret = -EINVAL;
- goto done;
- }
-
- sz = sizeof(struct adm_set_fluence_soundfocus_param);
- soundfocus_params.params.hdr.hdr_field =
- APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE),
- APR_PKT_VER);
- soundfocus_params.params.hdr.pkt_size = sz;
- soundfocus_params.params.hdr.src_svc = APR_SVC_ADM;
- soundfocus_params.params.hdr.src_domain = APR_DOMAIN_APPS;
- soundfocus_params.params.hdr.src_port = port_id;
- soundfocus_params.params.hdr.dest_svc = APR_SVC_ADM;
- soundfocus_params.params.hdr.dest_domain = APR_DOMAIN_ADSP;
- soundfocus_params.params.hdr.dest_port =
- atomic_read(&this_adm.copp.id[port_idx][copp_idx]);
- soundfocus_params.params.hdr.token = port_idx << 16 |
- ADM_CLIENT_ID_SOURCE_TRACKING << 8 | copp_idx;
- soundfocus_params.params.hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5;
- soundfocus_params.params.payload_addr_lsw = 0;
- soundfocus_params.params.payload_addr_msw = 0;
- soundfocus_params.params.mem_map_handle = 0;
- soundfocus_params.params.payload_size = sizeof(soundfocus_params) -
- sizeof(soundfocus_params.params);
- soundfocus_params.data.module_id = VOICEPROC_MODULE_ID_GENERIC_TX;
- soundfocus_params.data.param_id = VOICEPROC_PARAM_ID_FLUENCE_SOUNDFOCUS;
- soundfocus_params.data.param_size =
- soundfocus_params.params.payload_size -
- sizeof(soundfocus_params.data);
- soundfocus_params.data.reserved = 0;
-
- memset(&(soundfocus_params.soundfocus_data), 0xFF,
- sizeof(struct adm_param_fluence_soundfocus_t));
+ memset(&(soundfocus_params), 0xFF, sizeof(soundfocus_params));
for (i = 0; i < MAX_SECTORS; i++) {
- soundfocus_params.soundfocus_data.start_angles[i] =
+ soundfocus_params.start_angles[i] =
soundFocusData.start_angle[i];
- soundfocus_params.soundfocus_data.enables[i] =
- soundFocusData.enable[i];
+ soundfocus_params.enables[i] = soundFocusData.enable[i];
pr_debug("%s: start_angle[%d] = %d\n",
__func__, i, soundFocusData.start_angle[i]);
pr_debug("%s: enable[%d] = %d\n",
__func__, i, soundFocusData.enable[i]);
}
- soundfocus_params.soundfocus_data.gain_step =
- soundFocusData.gain_step;
+ soundfocus_params.gain_step = soundFocusData.gain_step;
pr_debug("%s: gain_step = %d\n", __func__, soundFocusData.gain_step);
- soundfocus_params.soundfocus_data.reserved = 0;
+ soundfocus_params.reserved = 0;
- atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1);
- ret = apr_send_pkt(this_adm.apr, (uint32_t *)&soundfocus_params);
- if (ret < 0) {
- pr_err("%s: Set params failed\n", __func__);
+ ret = adm_pack_and_set_one_pp_param(port_id, copp_idx, param_hdr,
+ (uint8_t *) &soundfocus_params);
+ if (ret)
+ pr_err("%s: Failed to set sound focus params, err %d\n",
+ __func__, ret);
- ret = -EINVAL;
- goto done;
- }
- /* Wait for the callback */
- ret = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
- atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) >= 0,
- msecs_to_jiffies(TIMEOUT_MS));
- if (!ret) {
- pr_err("%s: Set params timed out\n", __func__);
-
- ret = -EINVAL;
- goto done;
- }
-
- if (this_adm.sourceTrackingData.apr_cmd_status != 0) {
- pr_err("%s - set params returned error [%s]\n",
- __func__, adsp_err_get_err_str(
- this_adm.sourceTrackingData.apr_cmd_status));
-
- ret = adsp_err_get_lnx_err_code(
- this_adm.sourceTrackingData.apr_cmd_status);
- goto done;
- }
-
- ret = 0;
-
-done:
pr_debug("%s: Exit, ret=%d\n", __func__, ret);
return ret;
@@ -4633,30 +3929,28 @@ int adm_get_sound_focus(int port_id, int copp_idx,
{
int ret = 0, i;
char *params_value;
- uint32_t param_payload_len = sizeof(struct adm_param_data_v5) +
- sizeof(struct adm_param_fluence_soundfocus_t);
- struct adm_param_fluence_soundfocus_t *soundfocus_params;
+ uint32_t max_param_size = 0;
+ struct adm_param_fluence_soundfocus_t *soundfocus_params = NULL;
+ struct param_hdr_v3 param_hdr = {0};
pr_debug("%s: Enter, port_id %d, copp_idx %d\n",
__func__, port_id, copp_idx);
- params_value = kzalloc(param_payload_len, GFP_KERNEL);
- if (!params_value) {
- pr_err("%s, params memory alloc failed\n", __func__);
+ max_param_size = sizeof(struct adm_param_fluence_soundfocus_t) +
+ sizeof(union param_hdrs);
+ params_value = kzalloc(max_param_size, GFP_KERNEL);
+ if (!params_value)
+ return -ENOMEM;
- ret = -ENOMEM;
- goto done;
- }
- ret = adm_get_params_v2(port_id, copp_idx,
- VOICEPROC_MODULE_ID_GENERIC_TX,
- VOICEPROC_PARAM_ID_FLUENCE_SOUNDFOCUS,
- param_payload_len,
- params_value,
- ADM_CLIENT_ID_SOURCE_TRACKING);
+ param_hdr.module_id = VOICEPROC_MODULE_ID_GENERIC_TX;
+ param_hdr.instance_id = INSTANCE_ID_0;
+ param_hdr.param_id = VOICEPROC_PARAM_ID_FLUENCE_SOUNDFOCUS;
+ param_hdr.param_size = max_param_size;
+ ret = adm_get_pp_params(port_id, copp_idx,
+ ADM_CLIENT_ID_SOURCE_TRACKING, NULL, &param_hdr,
+ params_value);
if (ret) {
pr_err("%s: get parameters failed ret:%d\n", __func__, ret);
-
- kfree(params_value);
ret = -EINVAL;
goto done;
}
@@ -4665,8 +3959,6 @@ int adm_get_sound_focus(int port_id, int copp_idx,
pr_err("%s - get params returned error [%s]\n",
__func__, adsp_err_get_err_str(
this_adm.sourceTrackingData.apr_cmd_status));
-
- kfree(params_value);
ret = adsp_err_get_lnx_err_code(
this_adm.sourceTrackingData.apr_cmd_status);
goto done;
@@ -4686,11 +3978,10 @@ int adm_get_sound_focus(int port_id, int copp_idx,
soundFocusData->gain_step = soundfocus_params->gain_step;
pr_debug("%s: gain_step = %d\n", __func__, soundFocusData->gain_step);
- kfree(params_value);
-
done:
pr_debug("%s: Exit, ret = %d\n", __func__, ret);
+ kfree(params_value);
return ret;
}
@@ -4755,9 +4046,12 @@ done:
int adm_get_source_tracking(int port_id, int copp_idx,
struct source_tracking_param *sourceTrackingData)
{
- struct adm_cmd_get_pp_params_v5 admp;
- int p_idx, ret = 0, i;
- struct adm_param_fluence_sourcetracking_t *source_tracking_params;
+ struct adm_param_fluence_sourcetracking_t *source_tracking_params =
+ NULL;
+ struct mem_mapping_hdr mem_hdr = {0};
+ struct param_hdr_v3 param_hdr = {0};
+ int i = 0;
+ int ret = 0;
pr_debug("%s: Enter, port_id %d, copp_idx %d\n",
__func__, port_id, copp_idx);
@@ -4771,68 +4065,34 @@ int adm_get_source_tracking(int port_id, int copp_idx,
}
}
- port_id = afe_convert_virtual_to_portid(port_id);
- p_idx = adm_validate_and_get_port_index(port_id);
- if (p_idx < 0) {
- pr_err("%s - invalid port index %i, port id %i, copp idx %i\n",
- __func__, p_idx, port_id, copp_idx);
-
- ret = -EINVAL;
- goto done;
- }
-
- admp.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
- APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
- admp.hdr.pkt_size = sizeof(admp);
- admp.hdr.src_svc = APR_SVC_ADM;
- admp.hdr.src_domain = APR_DOMAIN_APPS;
- admp.hdr.src_port = port_id;
- admp.hdr.dest_svc = APR_SVC_ADM;
- admp.hdr.dest_domain = APR_DOMAIN_ADSP;
- admp.hdr.dest_port = atomic_read(&this_adm.copp.id[p_idx][copp_idx]);
- admp.hdr.token = p_idx << 16 | ADM_CLIENT_ID_SOURCE_TRACKING << 8 |
- copp_idx;
- admp.hdr.opcode = ADM_CMD_GET_PP_PARAMS_V5;
- admp.data_payload_addr_lsw =
+ mem_hdr.data_payload_addr_lsw =
lower_32_bits(this_adm.sourceTrackingData.memmap.paddr);
- admp.data_payload_addr_msw =
- msm_audio_populate_upper_32_bits(
- this_adm.sourceTrackingData.memmap.paddr);
- admp.mem_map_handle = atomic_read(&this_adm.mem_map_handles[
- ADM_MEM_MAP_INDEX_SOURCE_TRACKING]);
- admp.module_id = VOICEPROC_MODULE_ID_GENERIC_TX;
- admp.param_id = VOICEPROC_PARAM_ID_FLUENCE_SOURCETRACKING;
- admp.param_max_size = sizeof(struct adm_param_fluence_sourcetracking_t)
- + sizeof(struct adm_param_data_v5);
- admp.reserved = 0;
-
- atomic_set(&this_adm.copp.stat[p_idx][copp_idx], -1);
-
- ret = apr_send_pkt(this_adm.apr, (uint32_t *)&admp);
- if (ret < 0) {
- pr_err("%s - failed to get Source Tracking Params\n",
- __func__);
-
- ret = -EINVAL;
- goto done;
- }
- ret = wait_event_timeout(this_adm.copp.wait[p_idx][copp_idx],
- atomic_read(&this_adm.copp.stat[p_idx][copp_idx]) >= 0,
- msecs_to_jiffies(TIMEOUT_MS));
- if (!ret) {
- pr_err("%s - get params timed out\n", __func__);
+ mem_hdr.data_payload_addr_msw = msm_audio_populate_upper_32_bits(
+ this_adm.sourceTrackingData.memmap.paddr);
+ mem_hdr.mem_map_handle = atomic_read(
+ &this_adm.mem_map_handles[ADM_MEM_MAP_INDEX_SOURCE_TRACKING]);
+
+ param_hdr.module_id = VOICEPROC_MODULE_ID_GENERIC_TX;
+ param_hdr.instance_id = INSTANCE_ID_0;
+ param_hdr.param_id = VOICEPROC_PARAM_ID_FLUENCE_SOURCETRACKING;
+ /*
+ * This size should be the max size of the calibration data + header.
+ * Use the union size to ensure max size is used.
+ */
+ param_hdr.param_size =
+ sizeof(struct adm_param_fluence_sourcetracking_t) +
+ sizeof(union param_hdrs);
- ret = -EINVAL;
- goto done;
- } else if (atomic_read(&this_adm.copp.stat
- [p_idx][copp_idx]) > 0) {
- pr_err("%s: DSP returned error[%s]\n",
- __func__, adsp_err_get_err_str(
- atomic_read(&this_adm.copp.stat
- [p_idx][copp_idx])));
- ret = adsp_err_get_lnx_err_code(
- atomic_read(&this_adm.copp.stat
- [p_idx][copp_idx]));
+ /*
+ * Retrieving parameters out of band, so no need to provide a buffer for
+ * the returned parameter data as it will be at the memory location
+ * provided.
+ */
+ ret = adm_get_pp_params(port_id, copp_idx,
+ ADM_CLIENT_ID_SOURCE_TRACKING, &mem_hdr,
+ &param_hdr, NULL);
+ if (ret) {
+ pr_err("%s: Failed to get params, error %d\n", __func__, ret);
goto done;
}
@@ -4846,9 +4106,11 @@ int adm_get_source_tracking(int port_id, int copp_idx,
goto done;
}
- source_tracking_params = (struct adm_param_fluence_sourcetracking_t *)
- (this_adm.sourceTrackingData.memmap.kvaddr +
- sizeof(struct adm_param_data_v5));
+ /* How do we know what the param data was retrieved with for hdr size */
+ source_tracking_params =
+ (struct adm_param_fluence_sourcetracking_t
+ *) (this_adm.sourceTrackingData.memmap.kvaddr +
+ sizeof(struct param_hdr_v1));
for (i = 0; i < MAX_SECTORS; i++) {
sourceTrackingData->vad[i] = source_tracking_params->vad[i];
pr_debug("%s: vad[%d] = %d\n",
@@ -4882,49 +4144,24 @@ done:
static int __init adm_init(void)
{
int i = 0, j;
- this_adm.apr = NULL;
+
this_adm.ec_ref_rx = -1;
- this_adm.num_ec_ref_rx_chans = 0;
- this_adm.ec_ref_rx_bit_width = 0;
- this_adm.ec_ref_rx_sampling_rate = 0;
- atomic_set(&this_adm.matrix_map_stat, 0);
init_waitqueue_head(&this_adm.matrix_map_wait);
- atomic_set(&this_adm.adm_stat, 0);
init_waitqueue_head(&this_adm.adm_wait);
for (i = 0; i < AFE_MAX_PORTS; i++) {
for (j = 0; j < MAX_COPPS_PER_PORT; j++) {
atomic_set(&this_adm.copp.id[i][j], RESET_COPP_ID);
- atomic_set(&this_adm.copp.cnt[i][j], 0);
- atomic_set(&this_adm.copp.topology[i][j], 0);
- atomic_set(&this_adm.copp.mode[i][j], 0);
- atomic_set(&this_adm.copp.stat[i][j], 0);
- atomic_set(&this_adm.copp.rate[i][j], 0);
- atomic_set(&this_adm.copp.channels[i][j], 0);
- atomic_set(&this_adm.copp.bit_width[i][j], 0);
- atomic_set(&this_adm.copp.app_type[i][j], 0);
- atomic_set(&this_adm.copp.acdb_id[i][j], 0);
init_waitqueue_head(&this_adm.copp.wait[i][j]);
- atomic_set(&this_adm.copp.adm_delay_stat[i][j], 0);
init_waitqueue_head(
&this_adm.copp.adm_delay_wait[i][j]);
- atomic_set(&this_adm.copp.topology[i][j], 0);
- this_adm.copp.adm_delay[i][j] = 0;
- this_adm.copp.adm_status[i][j] = 0;
}
}
if (adm_init_cal_data())
pr_err("%s: could not init cal data!\n", __func__);
- this_adm.sourceTrackingData.ion_client = NULL;
- this_adm.sourceTrackingData.ion_handle = NULL;
- this_adm.sourceTrackingData.memmap.size = 0;
- this_adm.sourceTrackingData.memmap.kvaddr = NULL;
- this_adm.sourceTrackingData.memmap.paddr = 0;
this_adm.sourceTrackingData.apr_cmd_status = -1;
- atomic_set(&this_adm.mem_map_handles[ADM_MEM_MAP_INDEX_SOURCE_TRACKING],
- 0);
return 0;
}
diff --git a/sound/soc/msm/qdsp6v2/q6afe.c b/sound/soc/msm/qdsp6v2/q6afe.c
index f0a78dc8aee8..93553f53d68b 100644
--- a/sound/soc/msm/qdsp6v2/q6afe.c
+++ b/sound/soc/msm/qdsp6v2/q6afe.c
@@ -23,6 +23,7 @@
#include <sound/apr_audio-v2.h>
#include <sound/q6afe-v2.h>
#include <sound/q6audio-v2.h>
+#include <sound/q6common.h>
#include "msm-pcm-routing-v2.h"
#include <sound/audio_cal_utils.h>
#include <sound/adsp_err.h>
@@ -190,100 +191,125 @@ static void afe_callback_debug_print(struct apr_client_data *data)
__func__, data->opcode, data->payload_size);
}
-static void av_dev_drift_afe_cb_handler(uint32_t *payload,
+static void av_dev_drift_afe_cb_handler(uint32_t opcode, uint32_t *payload,
uint32_t payload_size)
{
u32 param_id;
- struct afe_av_dev_drift_get_param_resp *resp =
- (struct afe_av_dev_drift_get_param_resp *) payload;
-
- if (!(&(resp->pdata))) {
- pr_err("%s: Error: resp pdata is NULL\n", __func__);
+ size_t expected_size =
+ sizeof(u32) + sizeof(struct afe_param_id_dev_timing_stats);
+
+ /* Get param ID depending on command type */
+ param_id = (opcode == AFE_PORT_CMDRSP_GET_PARAM_V3) ? payload[3] :
+ payload[2];
+ if (param_id != AFE_PARAM_ID_DEV_TIMING_STATS) {
+ pr_err("%s: Unrecognized param ID %d\n", __func__, param_id);
return;
}
- param_id = resp->pdata.param_id;
- if (param_id == AFE_PARAM_ID_DEV_TIMING_STATS) {
- if (payload_size < sizeof(this_afe.av_dev_drift_resp)) {
- pr_err("%s: Error: received size %d, resp size %zu\n",
- __func__, payload_size,
- sizeof(this_afe.av_dev_drift_resp));
+ switch (opcode) {
+ case AFE_PORT_CMDRSP_GET_PARAM_V2:
+ expected_size += sizeof(struct param_hdr_v1);
+ if (payload_size < expected_size) {
+ pr_err("%s: Error: received size %d, expected size %zu\n",
+ __func__, payload_size, expected_size);
+ return;
+ }
+ /* Repack response to add IID */
+ this_afe.av_dev_drift_resp.status = payload[0];
+ this_afe.av_dev_drift_resp.pdata.module_id = payload[1];
+ this_afe.av_dev_drift_resp.pdata.instance_id = INSTANCE_ID_0;
+ this_afe.av_dev_drift_resp.pdata.param_id = payload[2];
+ this_afe.av_dev_drift_resp.pdata.param_size = payload[3];
+ memcpy(&this_afe.av_dev_drift_resp.timing_stats, &payload[4],
+ sizeof(struct afe_param_id_dev_timing_stats));
+ break;
+ case AFE_PORT_CMDRSP_GET_PARAM_V3:
+ expected_size += sizeof(struct param_hdr_v3);
+ if (payload_size < expected_size) {
+ pr_err("%s: Error: received size %d, expected size %zu\n",
+ __func__, payload_size, expected_size);
return;
}
memcpy(&this_afe.av_dev_drift_resp, payload,
sizeof(this_afe.av_dev_drift_resp));
- if (!this_afe.av_dev_drift_resp.status) {
- atomic_set(&this_afe.state, 0);
- } else {
- pr_debug("%s: av_dev_drift_resp status: %d", __func__,
- this_afe.av_dev_drift_resp.status);
- atomic_set(&this_afe.state, -1);
- }
+ break;
+ default:
+ pr_err("%s: Unrecognized command %d\n", __func__, opcode);
+ return;
+ }
+
+ if (!this_afe.av_dev_drift_resp.status) {
+ atomic_set(&this_afe.state, 0);
+ } else {
+ pr_debug("%s: av_dev_drift_resp status: %d", __func__,
+ this_afe.av_dev_drift_resp.status);
+ atomic_set(&this_afe.state, -1);
}
}
-static int32_t sp_make_afe_callback(uint32_t *payload, uint32_t payload_size)
+static int32_t sp_make_afe_callback(uint32_t opcode, uint32_t *payload,
+ uint32_t payload_size)
{
- u32 param_id;
- struct afe_spkr_prot_calib_get_resp *resp =
- (struct afe_spkr_prot_calib_get_resp *) payload;
-
- if (!(&(resp->pdata))) {
- pr_err("%s: Error: resp pdata is NULL\n", __func__);
+ struct param_hdr_v3 param_hdr = {0};
+ u32 *data_dest = NULL;
+ u32 *data_start = NULL;
+ size_t expected_size = sizeof(u32);
+
+ /* Set command specific details */
+ switch (opcode) {
+ case AFE_PORT_CMDRSP_GET_PARAM_V2:
+ expected_size += sizeof(struct param_hdr_v1);
+ param_hdr.module_id = payload[1];
+ param_hdr.instance_id = INSTANCE_ID_0;
+ param_hdr.param_id = payload[2];
+ param_hdr.param_size = payload[3];
+ data_start = &payload[4];
+ break;
+ case AFE_PORT_CMDRSP_GET_PARAM_V3:
+ expected_size += sizeof(struct param_hdr_v3);
+ memcpy(&param_hdr, &payload[1], sizeof(struct param_hdr_v3));
+ data_start = &payload[5];
+ break;
+ default:
+ pr_err("%s: Unrecognized command %d\n", __func__, opcode);
return -EINVAL;
}
- param_id = resp->pdata.param_id;
- if (param_id == AFE_PARAM_ID_CALIB_RES_CFG_V2) {
- if (payload_size < sizeof(this_afe.calib_data)) {
- pr_err("%s: Error: received size %d, calib_data size %zu\n",
- __func__, payload_size,
- sizeof(this_afe.calib_data));
- return -EINVAL;
- }
- memcpy(&this_afe.calib_data, payload,
- sizeof(this_afe.calib_data));
- if (!this_afe.calib_data.status) {
- atomic_set(&this_afe.state, 0);
- } else {
- pr_debug("%s: calib resp status: %d", __func__,
- this_afe.calib_data.status);
- atomic_set(&this_afe.state, -1);
- }
+ switch (param_hdr.param_id) {
+ case AFE_PARAM_ID_CALIB_RES_CFG_V2:
+ expected_size += sizeof(struct asm_calib_res_cfg);
+ data_dest = (u32 *) &this_afe.calib_data;
+ break;
+ case AFE_PARAM_ID_SP_V2_TH_VI_FTM_PARAMS:
+ expected_size += sizeof(struct afe_sp_th_vi_ftm_params);
+ data_dest = (u32 *) &this_afe.th_vi_resp;
+ break;
+ case AFE_PARAM_ID_SP_V2_EX_VI_FTM_PARAMS:
+ expected_size += sizeof(struct afe_sp_ex_vi_ftm_params);
+ data_dest = (u32 *) &this_afe.ex_vi_resp;
+ break;
+ default:
+ pr_err("%s: Unrecognized param ID %d\n", __func__,
+ param_hdr.param_id);
+ return -EINVAL;
}
- if (param_id == AFE_PARAM_ID_SP_V2_TH_VI_FTM_PARAMS) {
- if (payload_size < sizeof(this_afe.th_vi_resp)) {
- pr_err("%s: Error: received size %d, th_vi_resp size %zu\n",
- __func__, payload_size,
- sizeof(this_afe.th_vi_resp));
- return -EINVAL;
- }
- memcpy(&this_afe.th_vi_resp, payload,
- sizeof(this_afe.th_vi_resp));
- if (!this_afe.th_vi_resp.status) {
- atomic_set(&this_afe.state, 0);
- } else {
- pr_debug("%s: th vi resp status: %d", __func__,
- this_afe.th_vi_resp.status);
- atomic_set(&this_afe.state, -1);
- }
+
+ if (payload_size < expected_size) {
+ pr_err("%s: Error: received size %d, expected size %zu for param %d\n",
+ __func__, payload_size, expected_size,
+ param_hdr.param_id);
+ return -EINVAL;
}
- if (param_id == AFE_PARAM_ID_SP_V2_EX_VI_FTM_PARAMS) {
- if (payload_size < sizeof(this_afe.ex_vi_resp)) {
- pr_err("%s: Error: received size %d, ex_vi_resp size %zu\n",
- __func__, payload_size,
- sizeof(this_afe.ex_vi_resp));
- return -EINVAL;
- }
- memcpy(&this_afe.ex_vi_resp, payload,
- sizeof(this_afe.ex_vi_resp));
- if (!this_afe.ex_vi_resp.status) {
- atomic_set(&this_afe.state, 0);
- } else {
- pr_debug("%s: ex vi resp status: %d", __func__,
- this_afe.ex_vi_resp.status);
- atomic_set(&this_afe.state, -1);
- }
+
+ data_dest[0] = payload[0];
+ memcpy(&data_dest[1], &param_hdr, sizeof(struct param_hdr_v3));
+ memcpy(&data_dest[5], data_start, param_hdr.param_size);
+
+ if (!data_dest[0]) {
+ atomic_set(&this_afe.state, 0);
+ } else {
+ pr_debug("%s: status: %d", __func__, data_dest[0]);
+ atomic_set(&this_afe.state, -1);
}
return 0;
@@ -341,8 +367,10 @@ static int32_t afe_callback(struct apr_client_data *data, void *priv)
return 0;
}
afe_callback_debug_print(data);
- if (data->opcode == AFE_PORT_CMDRSP_GET_PARAM_V2) {
+ if (data->opcode == AFE_PORT_CMDRSP_GET_PARAM_V2 ||
+ data->opcode == AFE_PORT_CMDRSP_GET_PARAM_V3) {
uint32_t *payload = data->payload;
+ uint32_t param_id;
if (!payload || (data->token >= AFE_MAX_PORTS)) {
pr_err("%s: Error: size %d payload %pK token %d\n",
@@ -351,15 +379,18 @@ static int32_t afe_callback(struct apr_client_data *data, void *priv)
return -EINVAL;
}
- if (payload[2] == AFE_PARAM_ID_DEV_TIMING_STATS) {
- av_dev_drift_afe_cb_handler(data->payload,
+ param_id = (data->opcode == AFE_PORT_CMDRSP_GET_PARAM_V3) ?
+ payload[3] :
+ payload[2];
+ if (param_id == AFE_PARAM_ID_DEV_TIMING_STATS) {
+ av_dev_drift_afe_cb_handler(data->opcode, data->payload,
data->payload_size);
} else {
if (rtac_make_afe_callback(data->payload,
data->payload_size))
return 0;
- if (sp_make_afe_callback(data->payload,
+ if (sp_make_afe_callback(data->opcode, data->payload,
data->payload_size))
return -EINVAL;
}
@@ -380,8 +411,9 @@ static int32_t afe_callback(struct apr_client_data *data, void *priv)
}
switch (payload[0]) {
case AFE_PORT_CMD_SET_PARAM_V2:
+ case AFE_PORT_CMD_SET_PARAM_V3:
if (rtac_make_afe_callback(payload,
- data->payload_size))
+ data->payload_size))
return 0;
case AFE_PORT_CMD_DEVICE_STOP:
case AFE_PORT_CMD_DEVICE_START:
@@ -392,6 +424,7 @@ static int32_t afe_callback(struct apr_client_data *data, void *priv)
case AFE_SERVICE_CMD_UNREGISTER_RT_PORT_DRIVER:
case AFE_PORTS_CMD_DTMF_CTL:
case AFE_SVC_CMD_SET_PARAM:
+ case AFE_SVC_CMD_SET_PARAM_V2:
atomic_set(&this_afe.state, 0);
wake_up(&this_afe.wait[data->token]);
break;
@@ -409,6 +442,28 @@ static int32_t afe_callback(struct apr_client_data *data, void *priv)
pr_debug("%s: AFE_CMD_ADD_TOPOLOGIES cmd 0x%x\n",
__func__, payload[1]);
break;
+ case AFE_PORT_CMD_GET_PARAM_V2:
+ case AFE_PORT_CMD_GET_PARAM_V3:
+ /*
+ * Should only come here if there is an APR
+ * error or malformed APR packet. Otherwise
+ * response will be returned as
+ * AFE_PORT_CMDRSP_GET_PARAM_V2/3
+ */
+ pr_debug("%s: AFE Get Param opcode 0x%x token 0x%x src %d dest %d\n",
+ __func__, data->opcode, data->token,
+ data->src_port, data->dest_port);
+ if (payload[1] != 0) {
+ pr_err("%s: ADM Get Param failed with error %d\n",
+ __func__, payload[1]);
+ if (rtac_make_afe_callback(
+ payload,
+ data->payload_size))
+ return 0;
+ }
+ atomic_set(&this_afe.state, payload[1]);
+ wake_up(&this_afe.wait[data->token]);
+ break;
default:
pr_err("%s: Unknown cmd 0x%x\n", __func__,
payload[0]);
@@ -749,11 +804,402 @@ static int afe_apr_send_pkt(void *data, wait_queue_head_t *wait)
return ret;
}
+/* This function shouldn't be called directly. Instead call q6afe_set_params. */
+static int q6afe_set_params_v2(u16 port_id, int index,
+ struct mem_mapping_hdr *mem_hdr,
+ u8 *packed_param_data, u32 packed_data_size)
+{
+ struct afe_port_cmd_set_param_v2 *set_param = NULL;
+ uint32_t size = sizeof(struct afe_port_cmd_set_param_v2);
+ int rc = 0;
+
+ if (packed_param_data != NULL)
+ size += packed_data_size;
+ set_param = kzalloc(size, GFP_KERNEL);
+ if (set_param == NULL)
+ return -ENOMEM;
+
+ set_param->apr_hdr.hdr_field =
+ APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ set_param->apr_hdr.pkt_size = size;
+ set_param->apr_hdr.src_port = 0;
+ set_param->apr_hdr.dest_port = 0;
+ set_param->apr_hdr.token = index;
+ set_param->apr_hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
+ set_param->port_id = port_id;
+ if (packed_data_size > U16_MAX) {
+ pr_err("%s: Invalid data size for set params V2 %d\n", __func__,
+ packed_data_size);
+ rc = -EINVAL;
+ goto done;
+ }
+ set_param->payload_size = packed_data_size;
+ if (mem_hdr != NULL) {
+ set_param->mem_hdr = *mem_hdr;
+ } else if (packed_param_data != NULL) {
+ memcpy(&set_param->param_data, packed_param_data,
+ packed_data_size);
+ } else {
+ pr_err("%s: Both memory header and param data are NULL\n",
+ __func__);
+ rc = -EINVAL;
+ goto done;
+ }
+
+ rc = afe_apr_send_pkt(set_param, &this_afe.wait[index]);
+done:
+ kfree(set_param);
+ return rc;
+}
+
+/* This function shouldn't be called directly. Instead call q6afe_set_params. */
+static int q6afe_set_params_v3(u16 port_id, int index,
+ struct mem_mapping_hdr *mem_hdr,
+ u8 *packed_param_data, u32 packed_data_size)
+{
+ struct afe_port_cmd_set_param_v3 *set_param = NULL;
+ uint32_t size = sizeof(struct afe_port_cmd_set_param_v3);
+ int rc = 0;
+
+ if (packed_param_data != NULL)
+ size += packed_data_size;
+ set_param = kzalloc(size, GFP_KERNEL);
+ if (set_param == NULL)
+ return -ENOMEM;
+
+ set_param->apr_hdr.hdr_field =
+ APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ set_param->apr_hdr.pkt_size = size;
+ set_param->apr_hdr.src_port = 0;
+ set_param->apr_hdr.dest_port = 0;
+ set_param->apr_hdr.token = index;
+ set_param->apr_hdr.opcode = AFE_PORT_CMD_SET_PARAM_V3;
+ set_param->port_id = port_id;
+ set_param->payload_size = packed_data_size;
+ if (mem_hdr != NULL) {
+ set_param->mem_hdr = *mem_hdr;
+ } else if (packed_param_data != NULL) {
+ memcpy(&set_param->param_data, packed_param_data,
+ packed_data_size);
+ } else {
+ pr_err("%s: Both memory header and param data are NULL\n",
+ __func__);
+ rc = -EINVAL;
+ goto done;
+ }
+
+ rc = afe_apr_send_pkt(set_param, &this_afe.wait[index]);
+done:
+ kfree(set_param);
+ return rc;
+}
+
+static int q6afe_set_params(u16 port_id, int index,
+ struct mem_mapping_hdr *mem_hdr,
+ u8 *packed_param_data, u32 packed_data_size)
+{
+ int ret = 0;
+
+ ret = afe_q6_interface_prepare();
+ if (ret != 0) {
+ pr_err("%s: Q6 interface prepare failed %d\n", __func__, ret);
+ return ret;
+ ;
+ }
+
+ port_id = q6audio_get_port_id(port_id);
+ ret = q6audio_validate_port(port_id);
+ if (ret < 0) {
+ pr_err("%s: Not a valid port id = 0x%x ret %d\n", __func__,
+ port_id, ret);
+ return -EINVAL;
+ }
+
+ if (index < 0 || index >= AFE_MAX_PORTS) {
+ pr_err("%s: AFE port index[%d] invalid\n", __func__, index);
+ return -EINVAL;
+ }
+
+ if (q6common_is_instance_id_supported())
+ return q6afe_set_params_v3(port_id, index, mem_hdr,
+ packed_param_data, packed_data_size);
+ else
+ return q6afe_set_params_v2(port_id, index, mem_hdr,
+ packed_param_data, packed_data_size);
+}
+
+static int q6afe_pack_and_set_param_in_band(u16 port_id, int index,
+ struct param_hdr_v3 param_hdr,
+ u8 *param_data)
+{
+ u8 *packed_param_data = NULL;
+ int packed_data_size = sizeof(union param_hdrs) + param_hdr.param_size;
+ int ret;
+
+ packed_param_data = kzalloc(packed_data_size, GFP_KERNEL);
+ if (packed_param_data == NULL)
+ return -ENOMEM;
+
+ ret = q6common_pack_pp_params(packed_param_data, &param_hdr, param_data,
+ &packed_data_size);
+ if (ret) {
+ pr_err("%s: Failed to pack param header and data, error %d\n",
+ __func__, ret);
+ goto fail_cmd;
+ }
+
+ ret = q6afe_set_params(port_id, index, NULL, packed_param_data,
+ packed_data_size);
+
+fail_cmd:
+ kfree(packed_param_data);
+ return ret;
+}
+
+/* This function shouldn't be called directly. Instead call q6afe_get_param. */
+static int q6afe_get_params_v2(u16 port_id, int index,
+ struct mem_mapping_hdr *mem_hdr,
+ struct param_hdr_v3 *param_hdr)
+{
+ struct afe_port_cmd_get_param_v2 afe_get_param;
+ u32 param_size = param_hdr->param_size;
+
+ memset(&afe_get_param, 0, sizeof(afe_get_param));
+ afe_get_param.apr_hdr.hdr_field =
+ APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ afe_get_param.apr_hdr.pkt_size = sizeof(afe_get_param) + param_size;
+ afe_get_param.apr_hdr.src_port = 0;
+ afe_get_param.apr_hdr.dest_port = 0;
+ afe_get_param.apr_hdr.token = index;
+ afe_get_param.apr_hdr.opcode = AFE_PORT_CMD_GET_PARAM_V2;
+ afe_get_param.port_id = port_id;
+ afe_get_param.payload_size = sizeof(struct param_hdr_v1) + param_size;
+ if (mem_hdr != NULL)
+ afe_get_param.mem_hdr = *mem_hdr;
+ /* Set MID and PID in command */
+ afe_get_param.module_id = param_hdr->module_id;
+ afe_get_param.param_id = param_hdr->param_id;
+ /* Set param header in payload */
+ afe_get_param.param_hdr.module_id = param_hdr->module_id;
+ afe_get_param.param_hdr.param_id = param_hdr->param_id;
+ afe_get_param.param_hdr.param_size = param_size;
+
+ return afe_apr_send_pkt(&afe_get_param, &this_afe.wait[index]);
+}
+
+/* This function shouldn't be called directly. Instead call q6afe_get_param. */
+static int q6afe_get_params_v3(u16 port_id, int index,
+ struct mem_mapping_hdr *mem_hdr,
+ struct param_hdr_v3 *param_hdr)
+{
+ struct afe_port_cmd_get_param_v3 afe_get_param;
+
+ memset(&afe_get_param, 0, sizeof(afe_get_param));
+ afe_get_param.apr_hdr.hdr_field =
+ APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ afe_get_param.apr_hdr.pkt_size = sizeof(afe_get_param);
+ afe_get_param.apr_hdr.src_port = 0;
+ afe_get_param.apr_hdr.dest_port = 0;
+ afe_get_param.apr_hdr.token = index;
+ afe_get_param.apr_hdr.opcode = AFE_PORT_CMD_GET_PARAM_V3;
+ afe_get_param.port_id = port_id;
+ if (mem_hdr != NULL)
+ afe_get_param.mem_hdr = *mem_hdr;
+ /* Set param header in command, no payload in V3 */
+ afe_get_param.param_hdr = *param_hdr;
+
+ return afe_apr_send_pkt(&afe_get_param, &this_afe.wait[index]);
+}
+
+/*
+ * Calling functions copy param data directly from this_afe. Do not copy data
+ * back to caller here.
+ */
+static int q6afe_get_params(u16 port_id, struct mem_mapping_hdr *mem_hdr,
+ struct param_hdr_v3 *param_hdr)
+{
+ int index;
+ int ret;
+
+ ret = afe_q6_interface_prepare();
+ if (ret != 0) {
+ pr_err("%s: Q6 interface prepare failed %d\n", __func__, ret);
+ return ret;
+ }
+
+ port_id = q6audio_get_port_id(port_id);
+ ret = q6audio_validate_port(port_id);
+ if (ret < 0) {
+ pr_err("%s: Not a valid port id = 0x%x ret %d\n", __func__,
+ port_id, ret);
+ return -EINVAL;
+ }
+
+ index = q6audio_get_port_index(port_id);
+ if (index < 0 || index >= AFE_MAX_PORTS) {
+ pr_err("%s: AFE port index[%d] invalid\n", __func__, index);
+ return -EINVAL;
+ }
+
+ if (q6common_is_instance_id_supported())
+ return q6afe_get_params_v3(port_id, index, NULL, param_hdr);
+ else
+ return q6afe_get_params_v2(port_id, index, NULL, param_hdr);
+}
+
+/*
+ * This function shouldn't be called directly. Instead call
+ * q6afe_svc_set_params.
+ */
+static int q6afe_svc_set_params_v1(int index, struct mem_mapping_hdr *mem_hdr,
+ u8 *packed_param_data, u32 packed_data_size)
+{
+ struct afe_svc_cmd_set_param_v1 *svc_set_param = NULL;
+ uint32_t size = sizeof(struct afe_svc_cmd_set_param_v1);
+ int rc = 0;
+
+ if (packed_param_data != NULL)
+ size += packed_data_size;
+ svc_set_param = kzalloc(size, GFP_KERNEL);
+ if (svc_set_param == NULL)
+ return -ENOMEM;
+
+ svc_set_param->apr_hdr.hdr_field =
+ APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ svc_set_param->apr_hdr.pkt_size = size;
+ svc_set_param->apr_hdr.src_port = 0;
+ svc_set_param->apr_hdr.dest_port = 0;
+ svc_set_param->apr_hdr.token = IDX_GLOBAL_CFG;
+ svc_set_param->apr_hdr.opcode = AFE_SVC_CMD_SET_PARAM;
+ svc_set_param->payload_size = packed_data_size;
+
+ if (mem_hdr != NULL) {
+ /* Out of band case. */
+ svc_set_param->mem_hdr = *mem_hdr;
+ } else if (packed_param_data != NULL) {
+ /* In band case. */
+ memcpy(&svc_set_param->param_data, packed_param_data,
+ packed_data_size);
+ } else {
+ pr_err("%s: Both memory header and param data are NULL\n",
+ __func__);
+ rc = -EINVAL;
+ goto done;
+ }
+
+ rc = afe_apr_send_pkt(svc_set_param, &this_afe.wait[index]);
+done:
+ kfree(svc_set_param);
+ return rc;
+}
+
+/*
+ * This function shouldn't be called directly. Instead call
+ * q6afe_svc_set_params.
+ */
+static int q6afe_svc_set_params_v2(int index, struct mem_mapping_hdr *mem_hdr,
+ u8 *packed_param_data, u32 packed_data_size)
+{
+ struct afe_svc_cmd_set_param_v2 *svc_set_param = NULL;
+ uint16_t size = sizeof(struct afe_svc_cmd_set_param_v2);
+ int rc = 0;
+
+ if (packed_param_data != NULL)
+ size += packed_data_size;
+ svc_set_param = kzalloc(size, GFP_KERNEL);
+ if (svc_set_param == NULL)
+ return -ENOMEM;
+
+ svc_set_param->apr_hdr.hdr_field =
+ APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ svc_set_param->apr_hdr.pkt_size = size;
+ svc_set_param->apr_hdr.src_port = 0;
+ svc_set_param->apr_hdr.dest_port = 0;
+ svc_set_param->apr_hdr.token = IDX_GLOBAL_CFG;
+ svc_set_param->apr_hdr.opcode = AFE_SVC_CMD_SET_PARAM_V2;
+ svc_set_param->payload_size = packed_data_size;
+
+ if (mem_hdr != NULL) {
+ /* Out of band case. */
+ svc_set_param->mem_hdr = *mem_hdr;
+ } else if (packed_param_data != NULL) {
+ /* In band case. */
+ memcpy(&svc_set_param->param_data, packed_param_data,
+ packed_data_size);
+ } else {
+ pr_err("%s: Both memory header and param data are NULL\n",
+ __func__);
+ rc = -EINVAL;
+ goto done;
+ }
+
+ rc = afe_apr_send_pkt(svc_set_param, &this_afe.wait[index]);
+done:
+ kfree(svc_set_param);
+ return rc;
+}
+
+static int q6afe_svc_set_params(int index, struct mem_mapping_hdr *mem_hdr,
+ u8 *packed_param_data, u32 packed_data_size)
+{
+ int ret;
+
+ ret = afe_q6_interface_prepare();
+ if (ret != 0) {
+ pr_err("%s: Q6 interface prepare failed %d\n", __func__, ret);
+ return ret;
+ }
+
+ if (q6common_is_instance_id_supported())
+ return q6afe_svc_set_params_v2(index, mem_hdr,
+ packed_param_data,
+ packed_data_size);
+ else
+ return q6afe_svc_set_params_v1(index, mem_hdr,
+ packed_param_data,
+ packed_data_size);
+}
+
+static int q6afe_svc_pack_and_set_param_in_band(int index,
+ struct param_hdr_v3 param_hdr,
+ u8 *param_data)
+{
+ u8 *packed_param_data = NULL;
+ u32 packed_data_size =
+ sizeof(struct param_hdr_v3) + param_hdr.param_size;
+ int ret = 0;
+
+ packed_param_data = kzalloc(packed_data_size, GFP_KERNEL);
+ if (!packed_param_data)
+ return -ENOMEM;
+
+ ret = q6common_pack_pp_params(packed_param_data, &param_hdr, param_data,
+ &packed_data_size);
+ if (ret) {
+ pr_err("%s: Failed to pack parameter header and data, error %d\n",
+ __func__, ret);
+ goto done;
+ }
+
+ ret = q6afe_svc_set_params(index, NULL, packed_param_data,
+ packed_data_size);
+
+done:
+ kfree(packed_param_data);
+ return ret;
+}
+
static int afe_send_cal_block(u16 port_id, struct cal_block_data *cal_block)
{
- int result = 0;
- int index = 0;
- struct afe_audioif_config_command_no_payload afe_cal;
+ struct mem_mapping_hdr mem_hdr = {0};
+ int payload_size = 0;
+ int result = 0;
if (!cal_block) {
pr_debug("%s: No AFE cal to send!\n", __func__);
@@ -766,34 +1212,19 @@ static int afe_send_cal_block(u16 port_id, struct cal_block_data *cal_block)
goto done;
}
- index = q6audio_get_port_index(port_id);
- if (index < 0 || index >= AFE_MAX_PORTS) {
- pr_err("%s: AFE port index[%d] invalid!\n",
- __func__, index);
- result = -EINVAL;
- goto done;
- }
-
- afe_cal.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
- APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
- afe_cal.hdr.pkt_size = sizeof(afe_cal);
- afe_cal.hdr.src_port = 0;
- afe_cal.hdr.dest_port = 0;
- afe_cal.hdr.token = index;
- afe_cal.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
- afe_cal.param.port_id = port_id;
- afe_cal.param.payload_size = cal_block->cal_data.size;
- afe_cal.param.payload_address_lsw =
+ payload_size = cal_block->cal_data.size;
+ mem_hdr.data_payload_addr_lsw =
lower_32_bits(cal_block->cal_data.paddr);
- afe_cal.param.payload_address_msw =
+ mem_hdr.data_payload_addr_msw =
msm_audio_populate_upper_32_bits(cal_block->cal_data.paddr);
- afe_cal.param.mem_map_handle = cal_block->map_data.q6map_handle;
+ mem_hdr.mem_map_handle = cal_block->map_data.q6map_handle;
pr_debug("%s: AFE cal sent for device port = 0x%x, cal size = %zd, cal addr = 0x%pK\n",
__func__, port_id,
cal_block->cal_data.size, &cal_block->cal_data.paddr);
- result = afe_apr_send_pkt(&afe_cal, &this_afe.wait[index]);
+ result = q6afe_set_params(port_id, q6audio_get_port_index(port_id),
+ &mem_hdr, NULL, payload_size);
if (result)
pr_err("%s: AFE cal for port 0x%x failed %d\n",
__func__, port_id, result);
@@ -889,9 +1320,8 @@ unlock:
static int afe_spk_ramp_dn_cfg(int port)
{
+ struct param_hdr_v3 param_info = {0};
int ret = -EINVAL;
- int index = 0;
- struct afe_spkr_prot_config_command config;
if (afe_get_port_type(port) != MSM_AFE_PORT_TYPE_RX) {
pr_debug("%s: port doesn't match 0x%x\n", __func__, port);
@@ -903,84 +1333,39 @@ static int afe_spk_ramp_dn_cfg(int port)
__func__, port, ret, this_afe.vi_rx_port);
return 0;
}
- memset(&config, 0 , sizeof(config));
- ret = q6audio_validate_port(port);
- if (ret < 0) {
- pr_err("%s: Invalid port 0x%x ret %d", __func__, port, ret);
- ret = -EINVAL;
- goto fail_cmd;
- }
- index = q6audio_get_port_index(port);
- if (index < 0 || index >= AFE_MAX_PORTS) {
- pr_err("%s: AFE port index[%d] invalid!\n",
- __func__, index);
- ret = -EINVAL;
- goto fail_cmd;
- }
- config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
- APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
- config.hdr.pkt_size = sizeof(config);
- config.hdr.src_port = 0;
- config.hdr.dest_port = 0;
- config.hdr.token = index;
-
- config.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
- config.param.port_id = q6audio_get_port_id(port);
- config.param.payload_size =
- sizeof(config) - sizeof(config.hdr) - sizeof(config.param)
- - sizeof(config.prot_config);
- config.pdata.module_id = AFE_MODULE_FB_SPKR_PROT_V2_RX;
- config.pdata.param_id = AFE_PARAM_ID_FBSP_PTONE_RAMP_CFG;
- config.pdata.param_size = 0;
- atomic_set(&this_afe.state, 1);
- atomic_set(&this_afe.status, 0);
- ret = apr_send_pkt(this_afe.apr, (uint32_t *) &config);
- if (ret < 0) {
- pr_err("%s: port = 0x%x param = 0x%x failed %d\n",
- __func__, port, config.pdata.param_id, ret);
- goto fail_cmd;
- }
- ret = wait_event_timeout(this_afe.wait[index],
- (atomic_read(&this_afe.state) == 0),
- msecs_to_jiffies(TIMEOUT_MS));
- if (!ret) {
- pr_err("%s: wait_event timeout\n", __func__);
- ret = -EINVAL;
- goto fail_cmd;
- }
- if (atomic_read(&this_afe.status) > 0) {
- pr_err("%s: config cmd failed [%s]\n",
- __func__, adsp_err_get_err_str(
- atomic_read(&this_afe.status)));
- ret = adsp_err_get_lnx_err_code(
- atomic_read(&this_afe.status));
+ param_info.module_id = AFE_MODULE_FB_SPKR_PROT_V2_RX;
+ param_info.instance_id = INSTANCE_ID_0;
+ param_info.param_id = AFE_PARAM_ID_FBSP_PTONE_RAMP_CFG;
+ param_info.param_size = 0;
+
+ ret = q6afe_pack_and_set_param_in_band(port,
+ q6audio_get_port_index(port),
+ param_info, NULL);
+ if (ret) {
+ pr_err("%s: Failed to set speaker ramp duration param, err %d\n",
+ __func__, ret);
goto fail_cmd;
}
+
/* dsp needs atleast 15ms to ramp down pilot tone*/
usleep_range(15000, 15010);
ret = 0;
fail_cmd:
- pr_debug("%s: config.pdata.param_id 0x%x status %d\n",
- __func__, config.pdata.param_id, ret);
-return ret;
+ pr_debug("%s: config.pdata.param_id 0x%x status %d\n", __func__,
+ param_info.param_id, ret);
+ return ret;
}
static int afe_spk_prot_prepare(int src_port, int dst_port, int param_id,
- union afe_spkr_prot_config *prot_config)
+ union afe_spkr_prot_config *prot_config)
{
+ struct param_hdr_v3 param_info = {0};
int ret = -EINVAL;
- int index = 0;
- struct afe_spkr_prot_config_command config;
- memset(&config, 0 , sizeof(config));
- if (!prot_config) {
- pr_err("%s: Invalid params\n", __func__);
- goto fail_cmd;
- }
ret = q6audio_validate_port(src_port);
if (ret < 0) {
- pr_err("%s: Invalid src port 0x%x ret %d",
- __func__, src_port, ret);
+ pr_err("%s: Invalid src port 0x%x ret %d", __func__, src_port,
+ ret);
ret = -EINVAL;
goto fail_cmd;
}
@@ -991,21 +1376,15 @@ static int afe_spk_prot_prepare(int src_port, int dst_port, int param_id,
ret = -EINVAL;
goto fail_cmd;
}
- index = q6audio_get_port_index(src_port);
- if (index < 0 || index >= AFE_MAX_PORTS) {
- pr_err("%s: AFE port index[%d] invalid!\n",
- __func__, index);
- ret = -EINVAL;
- goto fail_cmd;
- }
+
switch (param_id) {
case AFE_PARAM_ID_FBSP_MODE_RX_CFG:
- config.pdata.module_id = AFE_MODULE_FB_SPKR_PROT_V2_RX;
+ param_info.module_id = AFE_MODULE_FB_SPKR_PROT_V2_RX;
break;
case AFE_PARAM_ID_FEEDBACK_PATH_CFG:
this_afe.vi_tx_port = src_port;
this_afe.vi_rx_port = dst_port;
- config.pdata.module_id = AFE_MODULE_FEEDBACK;
+ param_info.module_id = AFE_MODULE_FEEDBACK;
break;
/*
* AFE_PARAM_ID_SPKR_CALIB_VI_PROC_CFG_V2 is same as
@@ -1013,11 +1392,11 @@ static int afe_spk_prot_prepare(int src_port, int dst_port, int param_id,
*/
case AFE_PARAM_ID_SPKR_CALIB_VI_PROC_CFG_V2:
case AFE_PARAM_ID_SP_V2_TH_VI_FTM_CFG:
- config.pdata.module_id = AFE_MODULE_SPEAKER_PROTECTION_V2_TH_VI;
+ param_info.module_id = AFE_MODULE_SPEAKER_PROTECTION_V2_TH_VI;
break;
case AFE_PARAM_ID_SP_V2_EX_VI_MODE_CFG:
case AFE_PARAM_ID_SP_V2_EX_VI_FTM_CFG:
- config.pdata.module_id = AFE_MODULE_SPEAKER_PROTECTION_V2_EX_VI;
+ param_info.module_id = AFE_MODULE_SPEAKER_PROTECTION_V2_EX_VI;
break;
default:
pr_err("%s: default case 0x%x\n", __func__, param_id);
@@ -1025,48 +1404,20 @@ static int afe_spk_prot_prepare(int src_port, int dst_port, int param_id,
break;
}
- config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
- APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
- config.hdr.pkt_size = sizeof(config);
- config.hdr.src_port = 0;
- config.hdr.dest_port = 0;
- config.hdr.token = index;
-
- config.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
- config.param.port_id = q6audio_get_port_id(src_port);
- config.param.payload_size = sizeof(config) - sizeof(config.hdr)
- - sizeof(config.param);
- config.pdata.param_id = param_id;
- config.pdata.param_size = sizeof(config.prot_config);
- config.prot_config = *prot_config;
- atomic_set(&this_afe.state, 1);
- atomic_set(&this_afe.status, 0);
- ret = apr_send_pkt(this_afe.apr, (uint32_t *) &config);
- if (ret < 0) {
- pr_err("%s: port = 0x%x param = 0x%x failed %d\n",
- __func__, src_port, param_id, ret);
- goto fail_cmd;
- }
- ret = wait_event_timeout(this_afe.wait[index],
- (atomic_read(&this_afe.state) == 0),
- msecs_to_jiffies(TIMEOUT_MS));
- if (!ret) {
- pr_err("%s: wait_event timeout\n", __func__);
- ret = -EINVAL;
- goto fail_cmd;
- }
- if (atomic_read(&this_afe.status) > 0) {
- pr_err("%s: config cmd failed [%s]\n",
- __func__, adsp_err_get_err_str(
- atomic_read(&this_afe.status)));
- ret = adsp_err_get_lnx_err_code(
- atomic_read(&this_afe.status));
- goto fail_cmd;
- }
- ret = 0;
+ param_info.instance_id = INSTANCE_ID_0;
+ param_info.param_id = param_id;
+ param_info.param_size = sizeof(union afe_spkr_prot_config);
+
+ ret = q6afe_pack_and_set_param_in_band(src_port,
+ q6audio_get_port_index(src_port),
+ param_info, (u8 *) prot_config);
+ if (ret)
+ pr_err("%s: port = 0x%x param = 0x%x failed %d\n", __func__,
+ src_port, param_id, ret);
+
fail_cmd:
- pr_debug("%s: config.pdata.param_id 0x%x status %d 0x%x\n",
- __func__, config.pdata.param_id, ret, src_port);
+ pr_debug("%s: config.pdata.param_id 0x%x status %d 0x%x\n", __func__,
+ param_info.param_id, ret, src_port);
return ret;
}
@@ -1212,14 +1563,13 @@ done:
static int afe_send_hw_delay(u16 port_id, u32 rate)
{
- struct audio_cal_hw_delay_entry delay_entry;
- struct afe_audioif_config_command config;
- int index = 0;
+ struct audio_cal_hw_delay_entry delay_entry = {0};
+ struct afe_param_id_device_hw_delay_cfg hw_delay;
+ struct param_hdr_v3 param_info = {0};
int ret = -EINVAL;
pr_debug("%s:\n", __func__);
- memset(&delay_entry, 0, sizeof(delay_entry));
delay_entry.sample_rate = rate;
if (afe_get_port_type(port_id) == MSM_AFE_PORT_TYPE_TX)
ret = afe_get_cal_hw_delay(TX_DEVICE, &delay_entry);
@@ -1237,42 +1587,21 @@ static int afe_send_hw_delay(u16 port_id, u32 rate)
goto fail_cmd;
}
- index = q6audio_get_port_index(port_id);
- if (index < 0 || index >= AFE_MAX_PORTS) {
- pr_err("%s: AFE port index[%d] invalid!\n",
- __func__, index);
- ret = -EINVAL;
- goto fail_cmd;
- }
+ param_info.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE;
+ param_info.instance_id = INSTANCE_ID_0;
+ param_info.param_id = AFE_PARAM_ID_DEVICE_HW_DELAY;
+ param_info.param_size = sizeof(hw_delay);
- config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
- APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
- config.hdr.pkt_size = sizeof(config);
- config.hdr.src_port = 0;
- config.hdr.dest_port = 0;
- config.hdr.token = index;
-
- config.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
- config.param.port_id = q6audio_get_port_id(port_id);
- config.param.payload_size = sizeof(config) - sizeof(struct apr_hdr) -
- sizeof(config.param);
- config.param.payload_address_lsw = 0x00;
- config.param.payload_address_msw = 0x00;
- config.param.mem_map_handle = 0x00;
- config.pdata.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE;
- config.pdata.param_id = AFE_PARAM_ID_DEVICE_HW_DELAY;
- config.pdata.param_size = sizeof(config.port);
-
- config.port.hw_delay.delay_in_us = delay_entry.delay_usec;
- config.port.hw_delay.device_hw_delay_minor_version =
- AFE_API_VERSION_DEVICE_HW_DELAY;
-
- ret = afe_apr_send_pkt(&config, &this_afe.wait[index]);
- if (ret) {
+ hw_delay.delay_in_us = delay_entry.delay_usec;
+ hw_delay.device_hw_delay_minor_version =
+ AFE_API_VERSION_DEVICE_HW_DELAY;
+
+ ret = q6afe_pack_and_set_param_in_band(port_id,
+ q6audio_get_port_index(port_id),
+ param_info, (u8 *) &hw_delay);
+ if (ret)
pr_err("%s: AFE hw delay for port 0x%x failed %d\n",
__func__, port_id, ret);
- goto fail_cmd;
- }
fail_cmd:
pr_debug("%s: port_id 0x%x rate %u delay_usec %d status %d\n",
@@ -1371,10 +1700,11 @@ unlock:
static int afe_send_port_topology_id(u16 port_id)
{
- struct afe_audioif_config_command config;
+ struct afe_param_id_set_topology_cfg topology = {0};
+ struct param_hdr_v3 param_info = {0};
+ u32 topology_id = 0;
int index = 0;
int ret = 0;
- u32 topology_id = 0;
index = q6audio_get_port_index(port_id);
if (index < 0 || index >= AFE_MAX_PORTS) {
@@ -1390,32 +1720,17 @@ static int afe_send_port_topology_id(u16 port_id)
goto done;
}
- config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
- APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
- config.hdr.pkt_size = sizeof(config);
- config.hdr.src_port = 0;
- config.hdr.dest_port = 0;
- config.hdr.token = index;
-
- config.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
- config.param.port_id = q6audio_get_port_id(port_id);
- config.param.payload_size = sizeof(config) - sizeof(struct apr_hdr) -
- sizeof(config.param);
- config.param.payload_address_lsw = 0x00;
- config.param.payload_address_msw = 0x00;
- config.param.mem_map_handle = 0x00;
- config.pdata.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE;
- config.pdata.param_id = AFE_PARAM_ID_SET_TOPOLOGY;
- config.pdata.param_size = sizeof(config.port);
- config.port.topology.minor_version = AFE_API_VERSION_TOPOLOGY_V1;
- config.port.topology.topology_id = topology_id;
-
- pr_debug("%s: param PL size=%d iparam_size[%d][%zd %zd %zd %zd] param_id[0x%x]\n",
- __func__, config.param.payload_size, config.pdata.param_size,
- sizeof(config), sizeof(config.param), sizeof(config.port),
- sizeof(struct apr_hdr), config.pdata.param_id);
-
- ret = afe_apr_send_pkt(&config, &this_afe.wait[index]);
+ param_info.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE;
+ param_info.instance_id = INSTANCE_ID_0;
+ param_info.param_id = AFE_PARAM_ID_SET_TOPOLOGY;
+ param_info.param_size = sizeof(topology);
+
+ topology.minor_version = AFE_API_VERSION_TOPOLOGY_V1;
+ topology.topology_id = topology_id;
+
+ ret = q6afe_pack_and_set_param_in_band(port_id,
+ q6audio_get_port_index(port_id),
+ param_info, (u8 *) &topology);
if (ret) {
pr_err("%s: AFE set topology id enable for port 0x%x failed %d\n",
__func__, port_id, ret);
@@ -1568,33 +1883,24 @@ void afe_send_cal(u16 port_id)
int afe_turn_onoff_hw_mad(u16 mad_type, u16 enable)
{
+ struct afe_param_hw_mad_ctrl mad_enable_param = {0};
+ struct param_hdr_v3 param_info = {0};
int ret;
- struct afe_cmd_hw_mad_ctrl config;
pr_debug("%s: enter\n", __func__);
- memset(&config, 0, sizeof(config));
- config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
- APR_HDR_LEN(APR_HDR_SIZE),
- APR_PKT_VER);
- config.hdr.pkt_size = sizeof(config);
- config.hdr.src_port = 0;
- config.hdr.dest_port = 0;
- config.hdr.token = IDX_GLOBAL_CFG;
- config.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
- config.param.port_id = SLIMBUS_5_TX;
- config.param.payload_size = sizeof(config) - sizeof(struct apr_hdr) -
- sizeof(config.param);
- config.param.payload_address_lsw = 0x00;
- config.param.payload_address_msw = 0x00;
- config.param.mem_map_handle = 0x00;
- config.pdata.module_id = AFE_MODULE_HW_MAD;
- config.pdata.param_id = AFE_PARAM_ID_HW_MAD_CTRL;
- config.pdata.param_size = sizeof(config.payload);
- config.payload.minor_version = 1;
- config.payload.mad_type = mad_type;
- config.payload.mad_enable = enable;
-
- ret = afe_apr_send_pkt(&config, &this_afe.wait[IDX_GLOBAL_CFG]);
+
+ param_info.module_id = AFE_MODULE_HW_MAD;
+ param_info.instance_id = INSTANCE_ID_0;
+ param_info.param_id = AFE_PARAM_ID_HW_MAD_CTRL;
+ param_info.param_size = sizeof(mad_enable_param);
+
+ mad_enable_param.minor_version = 1;
+ mad_enable_param.mad_type = mad_type;
+ mad_enable_param.mad_enable = enable;
+
+ ret = q6afe_pack_and_set_param_in_band(SLIMBUS_5_TX, IDX_GLOBAL_CFG,
+ param_info,
+ (u8 *) &mad_enable_param);
if (ret)
pr_err("%s: AFE_PARAM_ID_HW_MAD_CTRL failed %d\n", __func__,
ret);
@@ -1604,31 +1910,18 @@ int afe_turn_onoff_hw_mad(u16 mad_type, u16 enable)
static int afe_send_slimbus_slave_cfg(
struct afe_param_cdc_slimbus_slave_cfg *sb_slave_cfg)
{
+ struct param_hdr_v3 param_hdr = {0};
int ret;
- struct afe_svc_cmd_sb_slave_cfg config;
pr_debug("%s: enter\n", __func__);
- config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
- APR_HDR_LEN(APR_HDR_SIZE),
- APR_PKT_VER);
- config.hdr.pkt_size = sizeof(config);
- config.hdr.src_port = 0;
- config.hdr.dest_port = 0;
- config.hdr.token = IDX_GLOBAL_CFG;
- config.hdr.opcode = AFE_SVC_CMD_SET_PARAM;
- config.param.payload_size = sizeof(config) - sizeof(struct apr_hdr) -
- sizeof(config.param);
- config.param.payload_address_lsw = 0x00;
- config.param.payload_address_msw = 0x00;
- config.param.mem_map_handle = 0x00;
- config.pdata.module_id = AFE_MODULE_CDC_DEV_CFG;
- config.pdata.param_id = AFE_PARAM_ID_CDC_SLIMBUS_SLAVE_CFG;
- config.pdata.param_size =
- sizeof(struct afe_param_cdc_slimbus_slave_cfg);
- config.sb_slave_cfg = *sb_slave_cfg;
-
- ret = afe_apr_send_pkt(&config, &this_afe.wait[IDX_GLOBAL_CFG]);
+ param_hdr.module_id = AFE_MODULE_CDC_DEV_CFG;
+ param_hdr.instance_id = INSTANCE_ID_0;
+ param_hdr.param_id = AFE_PARAM_ID_CDC_SLIMBUS_SLAVE_CFG;
+ param_hdr.param_size = sizeof(struct afe_param_cdc_slimbus_slave_cfg);
+
+ ret = q6afe_svc_pack_and_set_param_in_band(IDX_GLOBAL_CFG, param_hdr,
+ (u8 *) sb_slave_cfg);
if (ret)
pr_err("%s: AFE_PARAM_ID_CDC_SLIMBUS_SLAVE_CFG failed %d\n",
__func__, ret);
@@ -1640,29 +1933,16 @@ static int afe_send_slimbus_slave_cfg(
static int afe_send_codec_reg_page_config(
struct afe_param_cdc_reg_page_cfg *cdc_reg_page_cfg)
{
- struct afe_svc_cmd_cdc_reg_page_cfg config;
+ struct param_hdr_v3 param_hdr = {0};
int ret;
- config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
- APR_HDR_LEN(APR_HDR_SIZE),
- APR_PKT_VER);
- config.hdr.pkt_size = sizeof(config);
- config.hdr.src_port = 0;
- config.hdr.dest_port = 0;
- config.hdr.token = IDX_GLOBAL_CFG;
- config.hdr.opcode = AFE_SVC_CMD_SET_PARAM;
- config.param.payload_size = sizeof(config) - sizeof(struct apr_hdr) -
- sizeof(config.param);
- config.param.payload_address_lsw = 0x00;
- config.param.payload_address_msw = 0x00;
- config.param.mem_map_handle = 0x00;
- config.pdata.module_id = AFE_MODULE_CDC_DEV_CFG;
- config.pdata.param_id = AFE_PARAM_ID_CDC_REG_PAGE_CFG;
- config.pdata.param_size =
- sizeof(struct afe_param_cdc_reg_page_cfg);
- config.cdc_reg_page_cfg = *cdc_reg_page_cfg;
-
- ret = afe_apr_send_pkt(&config, &this_afe.wait[IDX_GLOBAL_CFG]);
+ param_hdr.module_id = AFE_MODULE_CDC_DEV_CFG;
+ param_hdr.instance_id = INSTANCE_ID_0;
+ param_hdr.param_id = AFE_PARAM_ID_CDC_REG_PAGE_CFG;
+ param_hdr.param_size = sizeof(struct afe_param_cdc_reg_page_cfg);
+
+ ret = q6afe_svc_pack_and_set_param_in_band(IDX_GLOBAL_CFG, param_hdr,
+ (u8 *) cdc_reg_page_cfg);
if (ret)
pr_err("%s: AFE_PARAM_ID_CDC_REG_PAGE_CFG failed %d\n",
__func__, ret);
@@ -1673,186 +1953,116 @@ static int afe_send_codec_reg_page_config(
static int afe_send_codec_reg_config(
struct afe_param_cdc_reg_cfg_data *cdc_reg_cfg)
{
- int i, j, ret = -EINVAL;
- int pkt_size, payload_size, reg_per_pkt, num_pkts, num_regs;
- struct afe_svc_cmd_cdc_reg_cfg *config;
- struct afe_svc_cmd_set_param *param;
+ u8 *packed_param_data = NULL;
+ u32 packed_data_size = 0;
+ u32 single_param_size = 0;
+ u32 max_data_size = 0;
+ u32 max_single_param = 0;
+ struct param_hdr_v3 param_hdr = {0};
+ int idx = 0;
+ int ret = -EINVAL;
- reg_per_pkt = (APR_MAX_BUF - sizeof(*config)) /
- sizeof(struct afe_param_cdc_reg_cfg_payload);
- if (reg_per_pkt > 0) {
- num_pkts = (cdc_reg_cfg->num_registers / reg_per_pkt) +
- (cdc_reg_cfg->num_registers % reg_per_pkt == 0 ? 0 : 1);
- } else {
- pr_err("%s: Failed to build codec reg config APR packet\n",
- __func__);
- return -EINVAL;
- }
+ max_single_param = sizeof(struct param_hdr_v3) +
+ sizeof(struct afe_param_cdc_reg_cfg);
+ max_data_size = APR_MAX_BUF - sizeof(struct afe_svc_cmd_set_param_v2);
+ packed_param_data = kzalloc(max_data_size, GFP_KERNEL);
+ if (!packed_param_data)
+ return -ENOMEM;
- for (j = 0; j < num_pkts; ++j) {
- /*
- * num_regs is set to reg_per_pkt on each pass through the loop
- * except the last, when it is set to the number of registers
- * remaining from the total
- */
- num_regs = (j < (num_pkts - 1) ? reg_per_pkt :
- cdc_reg_cfg->num_registers - (reg_per_pkt * j));
- payload_size = sizeof(struct afe_param_cdc_reg_cfg_payload) *
- num_regs;
- pkt_size = sizeof(*config) + payload_size;
- pr_debug("%s: pkt_size %d, payload_size %d\n", __func__,
- pkt_size, payload_size);
- config = kzalloc(pkt_size, GFP_KERNEL);
- if (!config)
- return -ENOMEM;
-
- config->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
- APR_HDR_LEN(APR_HDR_SIZE),
- APR_PKT_VER);
- config->hdr.pkt_size = pkt_size;
- config->hdr.src_port = 0;
- config->hdr.dest_port = 0;
- config->hdr.token = IDX_GLOBAL_CFG;
- config->hdr.opcode = AFE_SVC_CMD_SET_PARAM;
-
- param = &config->param;
- param->payload_size = payload_size;
- param->payload_address_lsw = 0x00;
- param->payload_address_msw = 0x00;
- param->mem_map_handle = 0x00;
-
- for (i = 0; i < num_regs; i++) {
- config->reg_data[i].common.module_id =
- AFE_MODULE_CDC_DEV_CFG;
- config->reg_data[i].common.param_id =
- AFE_PARAM_ID_CDC_REG_CFG;
- config->reg_data[i].common.param_size =
- sizeof(config->reg_data[i].reg_cfg);
- config->reg_data[i].reg_cfg =
- cdc_reg_cfg->reg_data[i + (j * reg_per_pkt)];
+ /* param_hdr is the same for all params sent, set once at top */
+ param_hdr.module_id = AFE_MODULE_CDC_DEV_CFG;
+ param_hdr.instance_id = INSTANCE_ID_0;
+ param_hdr.param_id = AFE_PARAM_ID_CDC_REG_CFG;
+ param_hdr.param_size = sizeof(struct afe_param_cdc_reg_cfg);
+
+ while (idx < cdc_reg_cfg->num_registers) {
+ memset(packed_param_data, 0, max_data_size);
+ packed_data_size = 0;
+ single_param_size = 0;
+
+ while (packed_data_size + max_single_param < max_data_size &&
+ idx < cdc_reg_cfg->num_registers) {
+ ret = q6common_pack_pp_params(
+ packed_param_data + packed_data_size,
+ &param_hdr, (u8 *) &cdc_reg_cfg->reg_data[idx],
+ &single_param_size);
+ if (ret) {
+ pr_err("%s: Failed to pack parameters with error %d\n",
+ __func__, ret);
+ goto done;
+ }
+ packed_data_size += single_param_size;
+ idx++;
}
- ret = afe_apr_send_pkt(config, &this_afe.wait[IDX_GLOBAL_CFG]);
+ ret = q6afe_svc_set_params(IDX_GLOBAL_CFG, NULL,
+ packed_param_data, packed_data_size);
if (ret) {
pr_err("%s: AFE_PARAM_ID_CDC_REG_CFG failed %d\n",
__func__, ret);
- kfree(config);
break;
}
- kfree(config);
}
-
+done:
+ kfree(packed_param_data);
return ret;
}
static int afe_init_cdc_reg_config(void)
{
+ struct param_hdr_v3 param_hdr = {0};
int ret;
- struct afe_svc_cmd_init_cdc_reg_cfg config;
pr_debug("%s: enter\n", __func__);
- config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
- APR_HDR_LEN(APR_HDR_SIZE),
- APR_PKT_VER);
- config.hdr.pkt_size = sizeof(config);
- config.hdr.src_port = 0;
- config.hdr.dest_port = 0;
- config.hdr.token = IDX_GLOBAL_CFG;
- config.hdr.opcode = AFE_SVC_CMD_SET_PARAM;
-
- config.param.payload_size = sizeof(struct afe_port_param_data_v2);
- config.param.payload_address_lsw = 0x00;
- config.param.payload_address_msw = 0x00;
- config.param.mem_map_handle = 0x00;
-
- config.init.module_id = AFE_MODULE_CDC_DEV_CFG;
- config.init.param_id = AFE_PARAM_ID_CDC_REG_CFG_INIT;
- config.init.param_size = 0;
-
- ret = afe_apr_send_pkt(&config, &this_afe.wait[IDX_GLOBAL_CFG]);
- if (ret) {
+ param_hdr.module_id = AFE_MODULE_CDC_DEV_CFG;
+ param_hdr.instance_id = INSTANCE_ID_0;
+ param_hdr.param_id = AFE_PARAM_ID_CDC_REG_CFG_INIT;
+
+ ret = q6afe_svc_pack_and_set_param_in_band(IDX_GLOBAL_CFG, param_hdr,
+ NULL);
+ if (ret)
pr_err("%s: AFE_PARAM_ID_CDC_INIT_REG_CFG failed %d\n",
__func__, ret);
- }
return ret;
}
static int afe_send_slimbus_slave_port_cfg(
- struct afe_param_slimbus_slave_port_cfg *port_config, u16 port_id)
+ struct afe_param_slimbus_slave_port_cfg *slim_slave_config, u16 port_id)
{
- int ret, index;
- struct afe_cmd_hw_mad_slimbus_slave_port_cfg config;
+ struct param_hdr_v3 param_hdr = {0};
+ int ret;
pr_debug("%s: enter, port_id = 0x%x\n", __func__, port_id);
- index = q6audio_get_port_index(port_id);
- if (index < 0 || index >= AFE_MAX_PORTS) {
- pr_err("%s: AFE port index[%d] invalid!\n",
- __func__, index);
- return -EINVAL;
- }
- ret = q6audio_validate_port(port_id);
- if (ret < 0) {
- pr_err("%s: port id = 0x%x ret %d\n", __func__, port_id, ret);
- return -EINVAL;
- }
-
- config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
- APR_HDR_LEN(APR_HDR_SIZE),
- APR_PKT_VER);
- config.hdr.pkt_size = sizeof(config);
- config.hdr.src_port = 0;
- config.hdr.dest_port = 0;
- config.hdr.token = index;
- config.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
- config.param.port_id = port_id;
- config.param.payload_size = sizeof(config) - sizeof(struct apr_hdr) -
- sizeof(config.param);
- config.param.payload_address_lsw = 0x00;
- config.param.payload_address_msw = 0x00;
- config.param.mem_map_handle = 0x00;
- config.pdata.module_id = AFE_MODULE_HW_MAD;
- config.pdata.param_id = AFE_PARAM_ID_SLIMBUS_SLAVE_PORT_CFG;
- config.pdata.param_size = sizeof(*port_config);
- config.sb_port_cfg = *port_config;
-
- ret = afe_apr_send_pkt(&config, &this_afe.wait[index]);
- if (ret) {
+ param_hdr.module_id = AFE_MODULE_HW_MAD;
+ param_hdr.instance_id = INSTANCE_ID_0;
+ param_hdr.reserved = 0;
+ param_hdr.param_id = AFE_PARAM_ID_SLIMBUS_SLAVE_PORT_CFG;
+ param_hdr.param_size = sizeof(struct afe_param_slimbus_slave_port_cfg);
+
+ ret = q6afe_pack_and_set_param_in_band(port_id,
+ q6audio_get_port_index(port_id),
+ param_hdr,
+ (u8 *) slim_slave_config);
+ if (ret)
pr_err("%s: AFE_PARAM_ID_SLIMBUS_SLAVE_PORT_CFG failed %d\n",
__func__, ret);
- }
+
pr_debug("%s: leave %d\n", __func__, ret);
return ret;
}
static int afe_aanc_port_cfg(void *apr, uint16_t tx_port, uint16_t rx_port)
{
- struct afe_port_cmd_set_aanc_param cfg;
+ struct afe_param_aanc_port_cfg aanc_port_cfg = {0};
+ struct param_hdr_v3 param_hdr = {0};
int ret = 0;
- int index = 0;
pr_debug("%s: tx_port 0x%x, rx_port 0x%x\n",
__func__, tx_port, rx_port);
- ret = afe_q6_interface_prepare();
- if (ret != 0) {
- pr_err("%s: Q6 interface prepare failed %d\n", __func__, ret);
- return -EINVAL;
- }
-
- index = q6audio_get_port_index(tx_port);
- if (index < 0 || index >= AFE_MAX_PORTS) {
- pr_err("%s: AFE port index[%d] invalid!\n",
- __func__, index);
- return -EINVAL;
- }
- ret = q6audio_validate_port(tx_port);
- if (ret < 0) {
- pr_err("%s: port id: 0x%x ret %d\n", __func__, tx_port, ret);
- return -EINVAL;
- }
- pr_debug("%s: AANC sample rate tx rate: %d rx rate %d\n",
- __func__, this_afe.aanc_info.aanc_tx_port_sample_rate,
- this_afe.aanc_info.aanc_rx_port_sample_rate);
+ pr_debug("%s: AANC sample rate tx rate: %d rx rate %d\n", __func__,
+ this_afe.aanc_info.aanc_tx_port_sample_rate,
+ this_afe.aanc_info.aanc_rx_port_sample_rate);
/*
* If aanc tx sample rate or rx sample rate is zero, skip aanc
* configuration as AFE resampler will fail for invalid sample
@@ -1863,176 +2073,103 @@ static int afe_aanc_port_cfg(void *apr, uint16_t tx_port, uint16_t rx_port)
return -EINVAL;
}
- cfg.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
- APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
- cfg.hdr.pkt_size = sizeof(cfg);
- cfg.hdr.src_port = 0;
- cfg.hdr.dest_port = 0;
- cfg.hdr.token = index;
- cfg.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
-
- cfg.param.port_id = tx_port;
- cfg.param.payload_size = sizeof(struct afe_port_param_data_v2) +
- sizeof(struct afe_param_aanc_port_cfg);
- cfg.param.payload_address_lsw = 0;
- cfg.param.payload_address_msw = 0;
- cfg.param.mem_map_handle = 0;
+ param_hdr.module_id = AFE_MODULE_AANC;
+ param_hdr.instance_id = INSTANCE_ID_0;
+ param_hdr.param_id = AFE_PARAM_ID_AANC_PORT_CONFIG;
+ param_hdr.param_size = sizeof(struct afe_param_aanc_port_cfg);
- cfg.pdata.module_id = AFE_MODULE_AANC;
- cfg.pdata.param_id = AFE_PARAM_ID_AANC_PORT_CONFIG;
- cfg.pdata.param_size = sizeof(struct afe_param_aanc_port_cfg);
- cfg.pdata.reserved = 0;
-
- cfg.data.aanc_port_cfg.aanc_port_cfg_minor_version =
+ aanc_port_cfg.aanc_port_cfg_minor_version =
AFE_API_VERSION_AANC_PORT_CONFIG;
- cfg.data.aanc_port_cfg.tx_port_sample_rate =
+ aanc_port_cfg.tx_port_sample_rate =
this_afe.aanc_info.aanc_tx_port_sample_rate;
- cfg.data.aanc_port_cfg.tx_port_channel_map[0] = AANC_TX_VOICE_MIC;
- cfg.data.aanc_port_cfg.tx_port_channel_map[1] = AANC_TX_NOISE_MIC;
- cfg.data.aanc_port_cfg.tx_port_channel_map[2] = AANC_TX_ERROR_MIC;
- cfg.data.aanc_port_cfg.tx_port_channel_map[3] = AANC_TX_MIC_UNUSED;
- cfg.data.aanc_port_cfg.tx_port_channel_map[4] = AANC_TX_MIC_UNUSED;
- cfg.data.aanc_port_cfg.tx_port_channel_map[5] = AANC_TX_MIC_UNUSED;
- cfg.data.aanc_port_cfg.tx_port_channel_map[6] = AANC_TX_MIC_UNUSED;
- cfg.data.aanc_port_cfg.tx_port_channel_map[7] = AANC_TX_MIC_UNUSED;
- cfg.data.aanc_port_cfg.tx_port_num_channels = 3;
- cfg.data.aanc_port_cfg.rx_path_ref_port_id = rx_port;
- cfg.data.aanc_port_cfg.ref_port_sample_rate =
- this_afe.aanc_info.aanc_rx_port_sample_rate;
-
- ret = afe_apr_send_pkt((uint32_t *) &cfg, &this_afe.wait[index]);
- if (ret) {
+ aanc_port_cfg.tx_port_channel_map[0] = AANC_TX_VOICE_MIC;
+ aanc_port_cfg.tx_port_channel_map[1] = AANC_TX_NOISE_MIC;
+ aanc_port_cfg.tx_port_channel_map[2] = AANC_TX_ERROR_MIC;
+ aanc_port_cfg.tx_port_channel_map[3] = AANC_TX_MIC_UNUSED;
+ aanc_port_cfg.tx_port_channel_map[4] = AANC_TX_MIC_UNUSED;
+ aanc_port_cfg.tx_port_channel_map[5] = AANC_TX_MIC_UNUSED;
+ aanc_port_cfg.tx_port_channel_map[6] = AANC_TX_MIC_UNUSED;
+ aanc_port_cfg.tx_port_channel_map[7] = AANC_TX_MIC_UNUSED;
+ aanc_port_cfg.tx_port_num_channels = 3;
+ aanc_port_cfg.rx_path_ref_port_id = rx_port;
+ aanc_port_cfg.ref_port_sample_rate =
+ this_afe.aanc_info.aanc_rx_port_sample_rate;
+
+ ret = q6afe_pack_and_set_param_in_band(tx_port,
+ q6audio_get_port_index(tx_port),
+ param_hdr,
+ (u8 *) &aanc_port_cfg);
+ if (ret)
pr_err("%s: AFE AANC port config failed for tx_port 0x%x, rx_port 0x%x ret %d\n",
- __func__, tx_port, rx_port, ret);
- }
+ __func__, tx_port, rx_port, ret);
return ret;
}
static int afe_aanc_mod_enable(void *apr, uint16_t tx_port, uint16_t enable)
{
- struct afe_port_cmd_set_aanc_param cfg;
+ struct afe_mod_enable_param mod_enable = {0};
+ struct param_hdr_v3 param_hdr = {0};
int ret = 0;
- int index = 0;
-
- pr_debug("%s: tx_port 0x%x\n",
- __func__, tx_port);
-
- ret = afe_q6_interface_prepare();
- if (ret != 0) {
- pr_err("%s: Q6 interface prepare failed %d\n", __func__, ret);
- return -EINVAL;
- }
-
- index = q6audio_get_port_index(tx_port);
- if (index < 0 || index >= AFE_MAX_PORTS) {
- pr_err("%s: AFE port index[%d] invalid!\n",
- __func__, index);
- return -EINVAL;
- }
- ret = q6audio_validate_port(tx_port);
- if (ret < 0) {
- pr_err("%s: port id: 0x%x ret %d\n", __func__, tx_port, ret);
- return -EINVAL;
- }
-
- cfg.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
- APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
- cfg.hdr.pkt_size = sizeof(cfg);
- cfg.hdr.src_port = 0;
- cfg.hdr.dest_port = 0;
- cfg.hdr.token = index;
- cfg.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
- cfg.param.port_id = tx_port;
- cfg.param.payload_size = sizeof(struct afe_port_param_data_v2) +
- sizeof(struct afe_mod_enable_param);
- cfg.param.payload_address_lsw = 0;
- cfg.param.payload_address_lsw = 0;
- cfg.param.mem_map_handle = 0;
+ pr_debug("%s: tx_port 0x%x\n", __func__, tx_port);
- cfg.pdata.module_id = AFE_MODULE_AANC;
- cfg.pdata.param_id = AFE_PARAM_ID_ENABLE;
- cfg.pdata.param_size = sizeof(struct afe_mod_enable_param);
- cfg.pdata.reserved = 0;
+ param_hdr.module_id = AFE_MODULE_AANC;
+ param_hdr.instance_id = INSTANCE_ID_0;
+ param_hdr.param_id = AFE_PARAM_ID_ENABLE;
+ param_hdr.param_size = sizeof(struct afe_mod_enable_param);
- cfg.data.mod_enable.enable = enable;
- cfg.data.mod_enable.reserved = 0;
+ mod_enable.enable = enable;
+ mod_enable.reserved = 0;
- ret = afe_apr_send_pkt((uint32_t *) &cfg, &this_afe.wait[index]);
- if (ret) {
+ ret = q6afe_pack_and_set_param_in_band(tx_port,
+ q6audio_get_port_index(tx_port),
+ param_hdr, (u8 *) &mod_enable);
+ if (ret)
pr_err("%s: AFE AANC enable failed for tx_port 0x%x ret %d\n",
__func__, tx_port, ret);
- }
return ret;
}
static int afe_send_bank_selection_clip(
struct afe_param_id_clip_bank_sel *param)
{
+ struct param_hdr_v3 param_hdr = {0};
int ret;
- struct afe_svc_cmd_set_clip_bank_selection config;
+
if (!param) {
pr_err("%s: Invalid params", __func__);
return -EINVAL;
}
- config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
- APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
- config.hdr.pkt_size = sizeof(config);
- config.hdr.src_port = 0;
- config.hdr.dest_port = 0;
- config.hdr.token = IDX_GLOBAL_CFG;
- config.hdr.opcode = AFE_SVC_CMD_SET_PARAM;
-
- config.param.payload_size = sizeof(struct afe_port_param_data_v2) +
- sizeof(struct afe_param_id_clip_bank_sel);
- config.param.payload_address_lsw = 0x00;
- config.param.payload_address_msw = 0x00;
- config.param.mem_map_handle = 0x00;
-
- config.pdata.module_id = AFE_MODULE_CDC_DEV_CFG;
- config.pdata.param_id = AFE_PARAM_ID_CLIP_BANK_SEL_CFG;
- config.pdata.param_size =
- sizeof(struct afe_param_id_clip_bank_sel);
- config.bank_sel = *param;
- ret = afe_apr_send_pkt(&config, &this_afe.wait[IDX_GLOBAL_CFG]);
- if (ret) {
+ param_hdr.module_id = AFE_MODULE_CDC_DEV_CFG;
+ param_hdr.instance_id = INSTANCE_ID_0;
+ param_hdr.param_id = AFE_PARAM_ID_CLIP_BANK_SEL_CFG;
+ param_hdr.param_size = sizeof(struct afe_param_id_clip_bank_sel);
+
+ ret = q6afe_svc_pack_and_set_param_in_band(IDX_GLOBAL_CFG, param_hdr,
+ (u8 *) param);
+ if (ret)
pr_err("%s: AFE_PARAM_ID_CLIP_BANK_SEL_CFG failed %d\n",
__func__, ret);
- }
return ret;
}
int afe_send_aanc_version(
struct afe_param_id_cdc_aanc_version *version_cfg)
{
+ struct param_hdr_v3 param_hdr = {0};
int ret;
- struct afe_svc_cmd_cdc_aanc_version config;
pr_debug("%s: enter\n", __func__);
- config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
- APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
- config.hdr.pkt_size = sizeof(config);
- config.hdr.src_port = 0;
- config.hdr.dest_port = 0;
- config.hdr.token = IDX_GLOBAL_CFG;
- config.hdr.opcode = AFE_SVC_CMD_SET_PARAM;
-
- config.param.payload_size = sizeof(struct afe_port_param_data_v2) +
- sizeof(struct afe_param_id_cdc_aanc_version);
- config.param.payload_address_lsw = 0x00;
- config.param.payload_address_msw = 0x00;
- config.param.mem_map_handle = 0x00;
-
- config.pdata.module_id = AFE_MODULE_CDC_DEV_CFG;
- config.pdata.param_id = AFE_PARAM_ID_CDC_AANC_VERSION;
- config.pdata.param_size =
- sizeof(struct afe_param_id_cdc_aanc_version);
- config.version = *version_cfg;
- ret = afe_apr_send_pkt(&config, &this_afe.wait[IDX_GLOBAL_CFG]);
- if (ret) {
+ param_hdr.module_id = AFE_MODULE_CDC_DEV_CFG;
+ param_hdr.instance_id = INSTANCE_ID_0;
+ param_hdr.param_id = AFE_PARAM_ID_CDC_AANC_VERSION;
+ param_hdr.param_size = sizeof(struct afe_param_id_cdc_aanc_version);
+
+ ret = q6afe_svc_pack_and_set_param_in_band(IDX_GLOBAL_CFG, param_hdr,
+ (u8 *) version_cfg);
+ if (ret)
pr_err("%s: AFE_PARAM_ID_CDC_AANC_VERSION failed %d\n",
__func__, ret);
- }
return ret;
}
@@ -2139,166 +2276,54 @@ bool afe_has_config(enum afe_config_type config)
int afe_send_spdif_clk_cfg(struct afe_param_id_spdif_clk_cfg *cfg,
u16 port_id)
{
- struct afe_spdif_clk_config_command clk_cfg;
+ struct afe_param_id_spdif_clk_cfg clk_cfg = {0};
+ struct param_hdr_v3 param_hdr = {0};
int ret = 0;
- int index = 0;
if (!cfg) {
pr_err("%s: Error, no configuration data\n", __func__);
- ret = -EINVAL;
- return ret;
- }
- index = q6audio_get_port_index(port_id);
- if (index < 0 || index >= AFE_MAX_PORTS) {
- pr_err("%s: AFE port index[%d] invalid!\n",
- __func__, index);
- return -EINVAL;
- }
- ret = q6audio_validate_port(port_id);
- if (ret < 0) {
- pr_err("%s: port id: 0x%x ret %d\n", __func__, port_id, ret);
return -EINVAL;
}
- ret = afe_q6_interface_prepare();
- if (ret) {
- pr_err("%s: Q6 interface prepare failed %d\n", __func__, ret);
- return ret;
- }
- clk_cfg.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
- APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
- clk_cfg.hdr.pkt_size = sizeof(clk_cfg);
- clk_cfg.hdr.src_port = 0;
- clk_cfg.hdr.dest_port = 0;
- clk_cfg.hdr.token = index;
-
- clk_cfg.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
- clk_cfg.param.port_id = q6audio_get_port_id(port_id);
- clk_cfg.param.payload_address_lsw = 0x00;
- clk_cfg.param.payload_address_msw = 0x00;
- clk_cfg.param.mem_map_handle = 0x00;
- clk_cfg.pdata.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE;
- clk_cfg.pdata.param_id = AFE_PARAM_ID_SPDIF_CLK_CONFIG;
- clk_cfg.pdata.param_size = sizeof(clk_cfg.clk_cfg);
- clk_cfg.param.payload_size = sizeof(clk_cfg) - sizeof(struct apr_hdr)
- - sizeof(clk_cfg.param);
- clk_cfg.clk_cfg = *cfg;
-
- pr_debug("%s: Minor version = 0x%x clk val = %d\n"
- "clk root = 0x%x\n port id = 0x%x\n",
- __func__, cfg->clk_cfg_minor_version,
- cfg->clk_value, cfg->clk_root,
- q6audio_get_port_id(port_id));
+ param_hdr.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE;
+ param_hdr.instance_id = INSTANCE_ID_0;
+ param_hdr.param_id = AFE_PARAM_ID_SPDIF_CLK_CONFIG;
+ param_hdr.param_size = sizeof(struct afe_param_id_spdif_clk_cfg);
- atomic_set(&this_afe.state, 1);
- atomic_set(&this_afe.status, 0);
- ret = apr_send_pkt(this_afe.apr, (uint32_t *) &clk_cfg);
- if (ret < 0) {
+ pr_debug("%s: Minor version = 0x%x clk val = %d clk root = 0x%x port id = 0x%x\n",
+ __func__, clk_cfg.clk_cfg_minor_version, clk_cfg.clk_value,
+ clk_cfg.clk_root, q6audio_get_port_id(port_id));
+
+ ret = q6afe_pack_and_set_param_in_band(port_id,
+ q6audio_get_port_index(port_id),
+ param_hdr, (u8 *) &clk_cfg);
+ if (ret < 0)
pr_err("%s: AFE send clock config for port 0x%x failed ret = %d\n",
__func__, port_id, ret);
- ret = -EINVAL;
- goto fail_cmd;
- }
-
- ret = wait_event_timeout(this_afe.wait[index],
- (atomic_read(&this_afe.state) == 0),
- msecs_to_jiffies(TIMEOUT_MS));
- if (!ret) {
- pr_err("%s: wait_event timeout\n",
- __func__);
- ret = -EINVAL;
- goto fail_cmd;
- }
- if (atomic_read(&this_afe.status) > 0) {
- pr_err("%s: config cmd failed [%s]\n",
- __func__, adsp_err_get_err_str(
- atomic_read(&this_afe.status)));
- ret = adsp_err_get_lnx_err_code(
- atomic_read(&this_afe.status));
- goto fail_cmd;
- }
-
-fail_cmd:
return ret;
}
int afe_send_spdif_ch_status_cfg(struct afe_param_id_spdif_ch_status_cfg
*ch_status_cfg, u16 port_id)
{
- struct afe_spdif_chstatus_config_command ch_status;
+ struct param_hdr_v3 param_hdr = {0};
int ret = 0;
- int index = 0;
- if (!ch_status_cfg) {
+ if (!ch_status_cfg)
pr_err("%s: Error, no configuration data\n", __func__);
- ret = -EINVAL;
- return ret;
- }
- index = q6audio_get_port_index(port_id);
- if (index < 0 || index >= AFE_MAX_PORTS) {
- pr_err("%s: AFE port index[%d] invalid!\n",
- __func__, index);
- return -EINVAL;
- }
- ret = q6audio_validate_port(port_id);
- if (ret < 0) {
- pr_err("%s: port id: 0x%x ret %d\n", __func__, port_id, ret);
- return -EINVAL;
- }
+ return -EINVAL;
- ret = afe_q6_interface_prepare();
- if (ret != 0) {
- pr_err("%s: Q6 interface prepare failed %d\n", __func__, ret);
- return ret;
- }
- ch_status.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
- APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
- ch_status.hdr.pkt_size = sizeof(ch_status_cfg);
- ch_status.hdr.src_port = 0;
- ch_status.hdr.dest_port = 0;
- ch_status.hdr.token = index;
-
- ch_status.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
- ch_status.param.port_id = q6audio_get_port_id(port_id);
- ch_status.param.payload_address_lsw = 0x00;
- ch_status.param.payload_address_msw = 0x00;
- ch_status.param.mem_map_handle = 0x00;
- ch_status.pdata.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE;
- ch_status.pdata.param_id = AFE_PARAM_ID_SPDIF_CLK_CONFIG;
- ch_status.pdata.param_size = sizeof(ch_status.ch_status);
- ch_status.param.payload_size = sizeof(ch_status)
- - sizeof(struct apr_hdr) - sizeof(ch_status.param);
- ch_status.ch_status = *ch_status_cfg;
+ param_hdr.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE;
+ param_hdr.instance_id = INSTANCE_ID_0;
+ param_hdr.param_id = AFE_PARAM_ID_SPDIF_CLK_CONFIG;
+ param_hdr.param_size = sizeof(struct afe_param_id_spdif_ch_status_cfg);
- atomic_set(&this_afe.state, 1);
- atomic_set(&this_afe.status, 0);
- ret = apr_send_pkt(this_afe.apr, (uint32_t *) &ch_status);
- if (ret < 0) {
+ ret = q6afe_pack_and_set_param_in_band(port_id,
+ q6audio_get_port_index(port_id),
+ param_hdr, (u8 *) ch_status_cfg);
+ if (ret < 0)
pr_err("%s: AFE send channel status for port 0x%x failed ret = %d\n",
__func__, port_id, ret);
- ret = -EINVAL;
- goto fail_cmd;
- }
-
- ret = wait_event_timeout(this_afe.wait[index],
- (atomic_read(&this_afe.state) == 0),
- msecs_to_jiffies(TIMEOUT_MS));
- if (!ret) {
- pr_err("%s: wait_event timeout\n",
- __func__);
- ret = -EINVAL;
- goto fail_cmd;
- }
- if (atomic_read(&this_afe.status) > 0) {
- pr_err("%s: config cmd failed [%s]\n",
- __func__, adsp_err_get_err_str(
- atomic_read(&this_afe.status)));
- ret = adsp_err_get_lnx_err_code(
- atomic_read(&this_afe.status));
- goto fail_cmd;
- }
-
-fail_cmd:
return ret;
}
@@ -2366,10 +2391,9 @@ fail_cmd:
int afe_spdif_port_start(u16 port_id, struct afe_spdif_port_config *spdif_port,
u32 rate)
{
- struct afe_audioif_config_command config;
- int ret = 0;
- int index = 0;
+ struct param_hdr_v3 param_hdr = {0};
uint16_t port_index;
+ int ret = 0;
if (!spdif_port) {
pr_err("%s: Error, no configuration data\n", __func__);
@@ -2379,12 +2403,6 @@ int afe_spdif_port_start(u16 port_id, struct afe_spdif_port_config *spdif_port,
pr_debug("%s: port id: 0x%x\n", __func__, port_id);
- index = q6audio_get_port_index(port_id);
- if (index < 0 || index >= AFE_MAX_PORTS) {
- pr_err("%s: AFE port index[%d] invalid!\n",
- __func__, index);
- return -EINVAL;
- }
ret = q6audio_validate_port(port_id);
if (ret < 0) {
pr_err("%s: port id: 0x%x ret %d\n", __func__, port_id, ret);
@@ -2394,24 +2412,14 @@ int afe_spdif_port_start(u16 port_id, struct afe_spdif_port_config *spdif_port,
afe_send_cal(port_id);
afe_send_hw_delay(port_id, rate);
- config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
- APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
- config.hdr.pkt_size = sizeof(config);
- config.hdr.src_port = 0;
- config.hdr.dest_port = 0;
- config.hdr.token = index;
- config.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
- config.param.port_id = q6audio_get_port_id(port_id);
- config.param.payload_size = sizeof(config) - sizeof(struct apr_hdr) -
- sizeof(config.param);
- config.param.payload_address_lsw = 0x00;
- config.param.payload_address_msw = 0x00;
- config.param.mem_map_handle = 0x00;
- config.pdata.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE;
- config.pdata.param_id = AFE_PARAM_ID_SPDIF_CONFIG;
- config.pdata.param_size = sizeof(config.port);
- config.port.spdif = spdif_port->cfg;
- ret = afe_apr_send_pkt(&config, &this_afe.wait[index]);
+ param_hdr.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE;
+ param_hdr.instance_id = INSTANCE_ID_0;
+ param_hdr.param_id = AFE_PARAM_ID_SPDIF_CONFIG;
+ param_hdr.param_size = sizeof(struct afe_spdif_port_config);
+
+ ret = q6afe_pack_and_set_param_in_band(port_id,
+ q6audio_get_port_index(port_id),
+ param_hdr, (u8 *) spdif_port);
if (ret) {
pr_err("%s: AFE enable for port 0x%x failed ret = %d\n",
__func__, port_id, ret);
@@ -2443,9 +2451,8 @@ int afe_send_slot_mapping_cfg(
struct afe_param_id_slot_mapping_cfg *slot_mapping_cfg,
u16 port_id)
{
- struct afe_slot_mapping_config_command config;
+ struct param_hdr_v3 param_hdr = {0};
int ret = 0;
- int index = 0;
if (!slot_mapping_cfg) {
pr_err("%s: Error, no configuration data\n", __func__);
@@ -2454,67 +2461,18 @@ int afe_send_slot_mapping_cfg(
pr_debug("%s: port id: 0x%x\n", __func__, port_id);
- index = q6audio_get_port_index(port_id);
- if (index < 0 || index >= AFE_MAX_PORTS) {
- pr_err("%s: AFE port index[%d] invalid!\n",
- __func__, index);
- return -EINVAL;
- }
- ret = q6audio_validate_port(port_id);
- if (ret < 0) {
- pr_err("%s: port id: 0x%x ret %d\n", __func__, port_id, ret);
- return -EINVAL;
- }
-
- memset(&config, 0, sizeof(config));
- config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
- APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
- config.hdr.pkt_size = sizeof(config);
- config.hdr.src_port = 0;
- config.hdr.dest_port = 0;
- config.hdr.token = index;
-
- config.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
- config.param.port_id = q6audio_get_port_id(port_id);
- config.param.payload_size = sizeof(config)
- - sizeof(struct apr_hdr) - sizeof(config.param);
- config.param.payload_address_lsw = 0x00;
- config.param.payload_address_msw = 0x00;
- config.param.mem_map_handle = 0x00;
- config.pdata.module_id = AFE_MODULE_TDM;
- config.pdata.param_id = AFE_PARAM_ID_PORT_SLOT_MAPPING_CONFIG;
- config.pdata.param_size = sizeof(config.slot_mapping);
- config.slot_mapping = *slot_mapping_cfg;
+ param_hdr.module_id = AFE_MODULE_TDM;
+ param_hdr.instance_id = INSTANCE_ID_0;
+ param_hdr.param_id = AFE_PARAM_ID_PORT_SLOT_MAPPING_CONFIG;
+ param_hdr.param_size = sizeof(struct afe_param_id_slot_mapping_cfg);
- atomic_set(&this_afe.state, 1);
- atomic_set(&this_afe.status, 0);
- ret = apr_send_pkt(this_afe.apr, (uint32_t *) &config);
- if (ret < 0) {
+ ret = q6afe_pack_and_set_param_in_band(port_id,
+ q6audio_get_port_index(port_id),
+ param_hdr,
+ (u8 *) slot_mapping_cfg);
+ if (ret < 0)
pr_err("%s: AFE send slot mapping for port 0x%x failed ret = %d\n",
__func__, port_id, ret);
- ret = -EINVAL;
- goto fail_cmd;
- }
-
- ret = wait_event_timeout(this_afe.wait[index],
- (atomic_read(&this_afe.state) == 0),
- msecs_to_jiffies(TIMEOUT_MS));
- if (!ret) {
- pr_err("%s: wait_event timeout\n",
- __func__);
- ret = -EINVAL;
- goto fail_cmd;
- }
- if (atomic_read(&this_afe.status) > 0) {
- pr_err("%s: config cmd failed [%s]\n",
- __func__, adsp_err_get_err_str(
- atomic_read(&this_afe.status)));
- ret = adsp_err_get_lnx_err_code(
- atomic_read(&this_afe.status));
- goto fail_cmd;
- }
-
-fail_cmd:
return ret;
}
@@ -2522,9 +2480,8 @@ int afe_send_custom_tdm_header_cfg(
struct afe_param_id_custom_tdm_header_cfg *custom_tdm_header_cfg,
u16 port_id)
{
- struct afe_custom_tdm_header_config_command config;
+ struct param_hdr_v3 param_hdr = {0};
int ret = 0;
- int index = 0;
if (!custom_tdm_header_cfg) {
pr_err("%s: Error, no configuration data\n", __func__);
@@ -2533,78 +2490,30 @@ int afe_send_custom_tdm_header_cfg(
pr_debug("%s: port id: 0x%x\n", __func__, port_id);
- index = q6audio_get_port_index(port_id);
- if (index < 0 || index >= AFE_MAX_PORTS) {
- pr_err("%s: AFE port index[%d] invalid!\n",
- __func__, index);
- return -EINVAL;
- }
- ret = q6audio_validate_port(port_id);
- if (ret < 0) {
- pr_err("%s: port id: 0x%x ret %d\n", __func__, port_id, ret);
- return -EINVAL;
- }
+ param_hdr.module_id = AFE_MODULE_TDM;
+ param_hdr.instance_id = INSTANCE_ID_0;
+ param_hdr.param_id = AFE_PARAM_ID_CUSTOM_TDM_HEADER_CONFIG;
+ param_hdr.param_size =
+ sizeof(struct afe_param_id_custom_tdm_header_cfg);
- memset(&config, 0, sizeof(config));
- config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
- APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
- config.hdr.pkt_size = sizeof(config);
- config.hdr.src_port = 0;
- config.hdr.dest_port = 0;
- config.hdr.token = index;
-
- config.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
- config.param.port_id = q6audio_get_port_id(port_id);
- config.param.payload_size = sizeof(config)
- - sizeof(struct apr_hdr) - sizeof(config.param);
- config.param.payload_address_lsw = 0x00;
- config.param.payload_address_msw = 0x00;
- config.param.mem_map_handle = 0x00;
- config.pdata.module_id = AFE_MODULE_TDM;
- config.pdata.param_id = AFE_PARAM_ID_CUSTOM_TDM_HEADER_CONFIG;
- config.pdata.param_size = sizeof(config.custom_tdm_header);
- config.custom_tdm_header = *custom_tdm_header_cfg;
-
- atomic_set(&this_afe.state, 1);
- atomic_set(&this_afe.status, 0);
- ret = apr_send_pkt(this_afe.apr, (uint32_t *) &config);
- if (ret < 0) {
+ ret = q6afe_pack_and_set_param_in_band(port_id,
+ q6audio_get_port_index(port_id),
+ param_hdr,
+ (u8 *) custom_tdm_header_cfg);
+ if (ret < 0)
pr_err("%s: AFE send custom tdm header for port 0x%x failed ret = %d\n",
__func__, port_id, ret);
- ret = -EINVAL;
- goto fail_cmd;
- }
-
- ret = wait_event_timeout(this_afe.wait[index],
- (atomic_read(&this_afe.state) == 0),
- msecs_to_jiffies(TIMEOUT_MS));
- if (!ret) {
- pr_err("%s: wait_event timeout\n",
- __func__);
- ret = -EINVAL;
- goto fail_cmd;
- }
- if (atomic_read(&this_afe.status) > 0) {
- pr_err("%s: config cmd failed [%s]\n",
- __func__, adsp_err_get_err_str(
- atomic_read(&this_afe.status)));
- ret = adsp_err_get_lnx_err_code(
- atomic_read(&this_afe.status));
- goto fail_cmd;
- }
-
-fail_cmd:
return ret;
}
int afe_tdm_port_start(u16 port_id, struct afe_tdm_port_config *tdm_port,
u32 rate, u16 num_groups)
{
- struct afe_audioif_config_command config;
- int ret = 0;
+ struct param_hdr_v3 param_hdr = {0};
int index = 0;
uint16_t port_index = 0;
enum afe_mad_type mad_type = MAD_HW_NONE;
+ int ret = 0;
if (!tdm_port) {
pr_err("%s: Error, no configuration data\n", __func__);
@@ -2669,26 +2578,15 @@ int afe_tdm_port_start(u16 port_id, struct afe_tdm_port_config *tdm_port,
}
}
- memset(&config, 0, sizeof(config));
- config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
- APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
- config.hdr.pkt_size = sizeof(config);
- config.hdr.src_port = 0;
- config.hdr.dest_port = 0;
- config.hdr.token = index;
- config.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
- config.param.port_id = q6audio_get_port_id(port_id);
- config.param.payload_size = sizeof(config) - sizeof(struct apr_hdr) -
- sizeof(config.param);
- config.param.payload_address_lsw = 0x00;
- config.param.payload_address_msw = 0x00;
- config.param.mem_map_handle = 0x00;
- config.pdata.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE;
- config.pdata.param_id = AFE_PARAM_ID_TDM_CONFIG;
- config.pdata.param_size = sizeof(config.port);
- config.port.tdm = tdm_port->tdm;
-
- ret = afe_apr_send_pkt(&config, &this_afe.wait[index]);
+ param_hdr.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE;
+ param_hdr.instance_id = INSTANCE_ID_0;
+ param_hdr.param_id = AFE_PARAM_ID_TDM_CONFIG;
+ param_hdr.param_size = sizeof(struct afe_param_id_tdm_cfg);
+
+ ret = q6afe_pack_and_set_param_in_band(port_id,
+ q6audio_get_port_index(port_id),
+ param_hdr,
+ (u8 *) &tdm_port->tdm);
if (ret) {
pr_err("%s: AFE enable for port 0x%x failed ret = %d\n",
__func__, port_id, ret);
@@ -2743,61 +2641,45 @@ void afe_set_routing_callback(routing_cb cb)
int afe_port_send_usb_dev_param(u16 port_id, union afe_port_config *afe_config)
{
- struct afe_usb_audio_dev_param_command config;
- int ret = 0, index = 0;
+ struct afe_param_id_usb_audio_dev_params usb_dev = {0};
+ struct afe_param_id_usb_audio_dev_lpcm_fmt lpcm_fmt = {0};
+ struct param_hdr_v3 param_hdr = {0};
+ int ret = 0;
if (!afe_config) {
pr_err("%s: Error, no configuration data\n", __func__);
ret = -EINVAL;
goto exit;
}
- index = q6audio_get_port_index(port_id);
- if (index < 0 || index >= AFE_MAX_PORTS) {
- pr_err("%s: AFE port index[%d] invalid! for port ID 0x%x\n",
- __func__, index, port_id);
- ret = -EINVAL;
- goto exit;
- }
- memset(&config, 0, sizeof(config));
- config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
- APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
- config.hdr.pkt_size = sizeof(config);
- config.hdr.src_port = 0;
- config.hdr.dest_port = 0;
- config.hdr.token = index;
- config.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
- config.param.port_id = q6audio_get_port_id(port_id);
- config.param.payload_size = sizeof(config) - sizeof(struct apr_hdr) -
- sizeof(config.param);
- config.param.payload_address_lsw = 0x00;
- config.param.payload_address_msw = 0x00;
- config.param.mem_map_handle = 0x00;
- config.pdata.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE;
- config.pdata.param_id = AFE_PARAM_ID_USB_AUDIO_DEV_PARAMS;
- config.pdata.param_size = sizeof(config.usb_dev);
- config.usb_dev.cfg_minor_version =
- AFE_API_MINIOR_VERSION_USB_AUDIO_CONFIG;
- config.usb_dev.dev_token = afe_config->usb_audio.dev_token;
-
- ret = afe_apr_send_pkt(&config, &this_afe.wait[index]);
+
+ param_hdr.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE;
+ param_hdr.instance_id = INSTANCE_ID_0;
+
+ param_hdr.param_id = AFE_PARAM_ID_USB_AUDIO_DEV_PARAMS;
+ param_hdr.param_size = sizeof(usb_dev);
+ usb_dev.cfg_minor_version = AFE_API_MINIOR_VERSION_USB_AUDIO_CONFIG;
+ usb_dev.dev_token = afe_config->usb_audio.dev_token;
+
+ ret = q6afe_pack_and_set_param_in_band(port_id,
+ q6audio_get_port_index(port_id),
+ param_hdr, (u8 *) &usb_dev);
if (ret) {
pr_err("%s: AFE device param cmd failed %d\n",
__func__, ret);
- ret = -EINVAL;
goto exit;
}
- config.pdata.param_id = AFE_PARAM_ID_USB_AUDIO_DEV_LPCM_FMT;
- config.pdata.param_size = sizeof(config.lpcm_fmt);
- config.lpcm_fmt.cfg_minor_version =
- AFE_API_MINIOR_VERSION_USB_AUDIO_CONFIG;
- config.lpcm_fmt.endian = afe_config->usb_audio.endian;
+ param_hdr.param_id = AFE_PARAM_ID_USB_AUDIO_DEV_LPCM_FMT;
+ param_hdr.param_size = sizeof(lpcm_fmt);
+ lpcm_fmt.cfg_minor_version = AFE_API_MINIOR_VERSION_USB_AUDIO_CONFIG;
+ lpcm_fmt.endian = afe_config->usb_audio.endian;
- ret = afe_apr_send_pkt(&config, &this_afe.wait[index]);
+ ret = q6afe_pack_and_set_param_in_band(port_id,
+ q6audio_get_port_index(port_id),
+ param_hdr, (u8 *) &lpcm_fmt);
if (ret) {
pr_err("%s: AFE device param cmd LPCM_FMT failed %d\n",
__func__, ret);
- ret = -EINVAL;
goto exit;
}
@@ -2810,11 +2692,12 @@ static int q6afe_send_enc_config(u16 port_id,
union afe_port_config afe_config,
u16 afe_in_channels, u16 afe_in_bit_width)
{
- struct afe_audioif_config_command config;
- int index;
+ u32 enc_fmt;
+ struct afe_enc_cfg_blk_param_t enc_blk_param = {0};
+ struct avs_enc_packetizer_id_param_t enc_pkt_id_param = {0};
+ struct afe_port_media_type_t media_type = {0};
+ struct param_hdr_v3 param_hdr = {0};
int ret;
- int payload_size = sizeof(config) - sizeof(struct apr_hdr) -
- sizeof(config.param) - sizeof(config.port);
pr_debug("%s:update DSP for enc format = %d\n", __func__, format);
if (format != ASM_MEDIA_FMT_SBC && format != ASM_MEDIA_FMT_AAC_V2 &&
@@ -2822,94 +2705,76 @@ static int q6afe_send_enc_config(u16 port_id,
pr_err("%s:Unsuppported format Ignore AFE config\n", __func__);
return 0;
}
- memset(&config, 0, sizeof(config));
- index = q6audio_get_port_index(port_id);
- if (index < 0) {
- pr_err("%s: Invalid index number: %d\n", __func__, index);
- return -EINVAL;
- }
- config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
- APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
- config.hdr.pkt_size = sizeof(config);
- config.hdr.src_port = 0;
- config.hdr.dest_port = 0;
- config.hdr.token = index;
-
- config.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
- config.param.port_id = q6audio_get_port_id(port_id);
- config.param.payload_size = payload_size + sizeof(config.port.enc_fmt);
- config.param.payload_address_lsw = 0x00;
- config.param.payload_address_msw = 0x00;
- config.param.mem_map_handle = 0x00;
- config.pdata.module_id = AFE_MODULE_ID_ENCODER;
- config.pdata.param_id = AFE_ENCODER_PARAM_ID_ENC_FMT_ID;
- config.pdata.param_size = sizeof(config.port.enc_fmt);
- config.port.enc_fmt.fmt_id = format;
- pr_debug("%s:sending AFE_ENCODER_PARAM_ID_ENC_FMT_ID payload: %d\n",
- __func__, config.param.payload_size);
- ret = afe_apr_send_pkt(&config, &this_afe.wait[index]);
+ param_hdr.module_id = AFE_MODULE_ID_ENCODER;
+ param_hdr.instance_id = INSTANCE_ID_0;
+
+ param_hdr.param_id = AFE_ENCODER_PARAM_ID_ENC_FMT_ID;
+ param_hdr.param_size = sizeof(enc_fmt);
+ enc_fmt = format;
+ pr_debug("%s:sending AFE_ENCODER_PARAM_ID_ENC_FMT_ID payload\n",
+ __func__);
+ ret = q6afe_pack_and_set_param_in_band(port_id,
+ q6audio_get_port_index(port_id),
+ param_hdr, (u8 *) &enc_fmt);
if (ret) {
pr_err("%s:unable to send AFE_ENCODER_PARAM_ID_ENC_FMT_ID",
__func__);
goto exit;
}
- config.param.payload_size = payload_size
- + sizeof(config.port.enc_blk_param);
- pr_debug("%s:send AFE_ENCODER_PARAM_ID_ENC_CFG_BLK to DSP payload:%d\n",
- __func__, config.param.payload_size);
- config.pdata.param_id = AFE_ENCODER_PARAM_ID_ENC_CFG_BLK;
- config.pdata.param_size = sizeof(config.port.enc_blk_param);
- config.port.enc_blk_param.enc_cfg_blk_size =
- sizeof(config.port.enc_blk_param.enc_blk_config);
- config.port.enc_blk_param.enc_blk_config = *cfg;
- ret = afe_apr_send_pkt(&config, &this_afe.wait[index]);
+ pr_debug("%s:send AFE_ENCODER_PARAM_ID_ENC_CFG_BLK to DSP payloadn",
+ __func__);
+ param_hdr.param_id = AFE_ENCODER_PARAM_ID_ENC_CFG_BLK;
+ param_hdr.param_size = sizeof(struct afe_enc_cfg_blk_param_t);
+ enc_blk_param.enc_cfg_blk_size = sizeof(union afe_enc_config_data);
+ enc_blk_param.enc_blk_config = *cfg;
+ ret = q6afe_pack_and_set_param_in_band(port_id,
+ q6audio_get_port_index(port_id),
+ param_hdr,
+ (u8 *) &enc_blk_param);
if (ret) {
pr_err("%s: AFE_ENCODER_PARAM_ID_ENC_CFG_BLK for port 0x%x failed %d\n",
__func__, port_id, ret);
goto exit;
}
- config.param.payload_size =
- payload_size + sizeof(config.port.enc_pkt_id_param);
- pr_debug("%s:sending AFE_ENCODER_PARAM_ID_PACKETIZER to DSP payload = %d",
- __func__, config.param.payload_size);
- config.pdata.param_id = AFE_ENCODER_PARAM_ID_PACKETIZER_ID;
- config.pdata.param_size = sizeof(config.port.enc_pkt_id_param);
- config.port.enc_pkt_id_param.enc_packetizer_id =
- AFE_MODULE_ID_PACKETIZER_COP;
- ret = afe_apr_send_pkt(&config, &this_afe.wait[index]);
+ pr_debug("%s:sending AFE_ENCODER_PARAM_ID_PACKETIZER to DSP\n",
+ __func__);
+ param_hdr.param_id = AFE_ENCODER_PARAM_ID_PACKETIZER_ID;
+ param_hdr.param_size = sizeof(struct avs_enc_packetizer_id_param_t);
+ enc_pkt_id_param.enc_packetizer_id = AFE_MODULE_ID_PACKETIZER_COP;
+ ret = q6afe_pack_and_set_param_in_band(port_id,
+ q6audio_get_port_index(port_id),
+ param_hdr,
+ (u8 *) &enc_pkt_id_param);
if (ret) {
pr_err("%s: AFE_ENCODER_PARAM_ID_PACKETIZER for port 0x%x failed %d\n",
__func__, port_id, ret);
goto exit;
}
- config.param.payload_size =
- payload_size + sizeof(config.port.media_type);
- config.pdata.param_size = sizeof(config.port.media_type);
-
pr_debug("%s:Sending AFE_API_VERSION_PORT_MEDIA_TYPE to DSP", __func__);
- config.pdata.module_id = AFE_MODULE_PORT;
- config.pdata.param_id = AFE_PARAM_ID_PORT_MEDIA_TYPE;
- config.port.media_type.minor_version = AFE_API_VERSION_PORT_MEDIA_TYPE;
- config.port.media_type.sample_rate = afe_config.slim_sch.sample_rate;
+ param_hdr.module_id = AFE_MODULE_PORT;
+ param_hdr.param_id = AFE_PARAM_ID_PORT_MEDIA_TYPE;
+ param_hdr.param_size = sizeof(struct afe_port_media_type_t);
+ media_type.minor_version = AFE_API_VERSION_PORT_MEDIA_TYPE;
+ media_type.sample_rate = afe_config.slim_sch.sample_rate;
if (afe_in_bit_width)
- config.port.media_type.bit_width = afe_in_bit_width;
+ media_type.bit_width = afe_in_bit_width;
else
- config.port.media_type.bit_width =
- afe_config.slim_sch.bit_width;
+ media_type.bit_width = afe_config.slim_sch.bit_width;
if (afe_in_channels)
- config.port.media_type.num_channels = afe_in_channels;
+ media_type.num_channels = afe_in_channels;
else
- config.port.media_type.num_channels =
- afe_config.slim_sch.num_channels;
- config.port.media_type.data_format = AFE_PORT_DATA_FORMAT_PCM;
- config.port.media_type.reserved = 0;
+ media_type.num_channels = afe_config.slim_sch.num_channels;
+ media_type.data_format = AFE_PORT_DATA_FORMAT_PCM;
+ media_type.reserved = 0;
- ret = afe_apr_send_pkt(&config, &this_afe.wait[index]);
+ ret = q6afe_pack_and_set_param_in_band(port_id,
+ q6audio_get_port_index(port_id),
+ param_hdr, (u8 *) &media_type);
if (ret) {
pr_err("%s: AFE_API_VERSION_PORT_MEDIA_TYPE for port 0x%x failed %d\n",
__func__, port_id, ret);
@@ -2924,13 +2789,16 @@ static int __afe_port_start(u16 port_id, union afe_port_config *afe_config,
u32 rate, u16 afe_in_channels, u16 afe_in_bit_width,
union afe_enc_config_data *cfg, u32 enc_format)
{
- struct afe_audioif_config_command config;
+ union afe_port_config port_cfg;
+ struct param_hdr_v3 param_hdr = {0};
int ret = 0;
int cfg_type;
int index = 0;
enum afe_mad_type mad_type;
uint16_t port_index;
+ memset(&port_cfg, 0, sizeof(port_cfg));
+
if (!afe_config) {
pr_err("%s: Error, no configuration data\n", __func__);
ret = -EINVAL;
@@ -3051,13 +2919,6 @@ static int __afe_port_start(u16 port_id, union afe_port_config *afe_config,
}
}
- config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
- APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
- config.hdr.pkt_size = sizeof(config);
- config.hdr.src_port = 0;
- config.hdr.dest_port = 0;
- config.hdr.token = index;
-
switch (port_id) {
case AFE_PORT_ID_PRIMARY_PCM_RX:
case AFE_PORT_ID_PRIMARY_PCM_TX:
@@ -3153,24 +3014,21 @@ static int __afe_port_start(u16 port_id, union afe_port_config *afe_config,
ret = -EINVAL;
goto fail_cmd;
}
- config.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
- config.param.port_id = q6audio_get_port_id(port_id);
- config.param.payload_size = sizeof(config) - sizeof(struct apr_hdr) -
- sizeof(config.param);
- config.param.payload_address_lsw = 0x00;
- config.param.payload_address_msw = 0x00;
- config.param.mem_map_handle = 0x00;
- config.pdata.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE;
- config.pdata.param_id = cfg_type;
- config.pdata.param_size = sizeof(config.port);
-
- config.port = *afe_config;
+
+ param_hdr.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE;
+ param_hdr.instance_id = INSTANCE_ID_0;
+ param_hdr.param_id = cfg_type;
+ param_hdr.param_size = sizeof(union afe_port_config);
+
+ port_cfg = *afe_config;
if ((enc_format != ASM_MEDIA_FMT_NONE) &&
(cfg_type == AFE_PARAM_ID_SLIMBUS_CONFIG)) {
- config.port.slim_sch.data_format =
- AFE_SB_DATA_FORMAT_GENERIC_COMPRESSED;
+ port_cfg.slim_sch.data_format =
+ AFE_SB_DATA_FORMAT_GENERIC_COMPRESSED;
}
- ret = afe_apr_send_pkt(&config, &this_afe.wait[index]);
+ ret = q6afe_pack_and_set_param_in_band(port_id,
+ q6audio_get_port_index(port_id),
+ param_hdr, (u8 *) &port_cfg);
if (ret) {
pr_err("%s: AFE enable for port 0x%x failed %d\n",
__func__, port_id, ret);
@@ -3515,11 +3373,15 @@ int afe_open(u16 port_id,
union afe_port_config *afe_config, int rate)
{
struct afe_port_cmd_device_start start;
- struct afe_audioif_config_command config;
+ union afe_port_config port_cfg;
+ struct param_hdr_v3 param_hdr = {0};
int ret = 0;
int cfg_type;
int index = 0;
+ memset(&start, 0, sizeof(start));
+ memset(&port_cfg, 0, sizeof(port_cfg));
+
if (!afe_config) {
pr_err("%s: Error, no configuration data\n", __func__);
ret = -EINVAL;
@@ -3574,12 +3436,6 @@ int afe_open(u16 port_id,
}
mutex_lock(&this_afe.afe_cmd_lock);
- config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
- APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
- config.hdr.pkt_size = sizeof(config);
- config.hdr.src_port = 0;
- config.hdr.dest_port = 0;
- config.hdr.token = index;
switch (port_id) {
case PRIMARY_I2S_RX:
case PRIMARY_I2S_TX:
@@ -3641,24 +3497,16 @@ int afe_open(u16 port_id,
ret = -EINVAL;
goto fail_cmd;
}
- config.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
- config.param.port_id = q6audio_get_port_id(port_id);
- config.param.payload_size = sizeof(config) - sizeof(struct apr_hdr)
- - sizeof(config.param);
- config.param.payload_address_lsw = 0x00;
- config.param.payload_address_msw = 0x00;
- config.param.mem_map_handle = 0x00;
- config.pdata.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE;
- config.pdata.param_id = cfg_type;
- config.pdata.param_size = sizeof(config.port);
-
- config.port = *afe_config;
- pr_debug("%s: param PL size=%d iparam_size[%d][%zd %zd %zd %zd] param_id[0x%x]\n",
- __func__, config.param.payload_size, config.pdata.param_size,
- sizeof(config), sizeof(config.param), sizeof(config.port),
- sizeof(struct apr_hdr), config.pdata.param_id);
-
- ret = afe_apr_send_pkt(&config, &this_afe.wait[index]);
+
+ param_hdr.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE;
+ param_hdr.instance_id = INSTANCE_ID_0;
+ param_hdr.param_id = cfg_type;
+ param_hdr.param_size = sizeof(union afe_port_config);
+ port_cfg = *afe_config;
+
+ ret = q6afe_pack_and_set_param_in_band(port_id,
+ q6audio_get_port_index(port_id),
+ param_hdr, (u8 *) &port_cfg);
if (ret) {
pr_err("%s: AFE enable for port 0x%x opcode[0x%x]failed %d\n",
__func__, port_id, cfg_type, ret);
@@ -3689,57 +3537,28 @@ fail_cmd:
int afe_loopback(u16 enable, u16 rx_port, u16 tx_port)
{
- struct afe_loopback_cfg_v1 lb_cmd;
+ struct afe_loopback_cfg_v1 lb_param = {0};
+ struct param_hdr_v3 param_hdr = {0};
int ret = 0;
- int index = 0;
if (rx_port == MI2S_RX)
rx_port = AFE_PORT_ID_PRIMARY_MI2S_RX;
if (tx_port == MI2S_TX)
tx_port = AFE_PORT_ID_PRIMARY_MI2S_TX;
- ret = afe_q6_interface_prepare();
- if (ret != 0) {
- pr_err("%s: Q6 interface prepare failed %d\n", __func__, ret);
- return ret;
- }
+ param_hdr.module_id = AFE_MODULE_LOOPBACK;
+ param_hdr.instance_id = INSTANCE_ID_0;
+ param_hdr.param_id = AFE_PARAM_ID_LOOPBACK_CONFIG;
+ param_hdr.param_size = sizeof(struct afe_loopback_cfg_v1);
- index = q6audio_get_port_index(rx_port);
- if (index < 0 || index >= AFE_MAX_PORTS) {
- pr_err("%s: AFE port index[%d] invalid!\n",
- __func__, index);
- return -EINVAL;
- }
- ret = q6audio_validate_port(rx_port);
- if (ret < 0) {
- pr_err("%s: Invalid port 0x%x ret %d", __func__, rx_port, ret);
- return -EINVAL;
- }
+ lb_param.dst_port_id = rx_port;
+ lb_param.routing_mode = LB_MODE_DEFAULT;
+ lb_param.enable = (enable ? 1 : 0);
+ lb_param.loopback_cfg_minor_version = AFE_API_VERSION_LOOPBACK_CONFIG;
- lb_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
- APR_HDR_LEN(20), APR_PKT_VER);
- lb_cmd.hdr.pkt_size = sizeof(lb_cmd);
- lb_cmd.hdr.src_port = 0;
- lb_cmd.hdr.dest_port = 0;
- lb_cmd.hdr.token = index;
- lb_cmd.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
- lb_cmd.param.port_id = tx_port;
- lb_cmd.param.payload_size = (sizeof(lb_cmd) - sizeof(struct apr_hdr) -
- sizeof(struct afe_port_cmd_set_param_v2));
- lb_cmd.param.payload_address_lsw = 0x00;
- lb_cmd.param.payload_address_msw = 0x00;
- lb_cmd.param.mem_map_handle = 0x00;
- lb_cmd.pdata.module_id = AFE_MODULE_LOOPBACK;
- lb_cmd.pdata.param_id = AFE_PARAM_ID_LOOPBACK_CONFIG;
- lb_cmd.pdata.param_size = lb_cmd.param.payload_size -
- sizeof(struct afe_port_param_data_v2);
-
- lb_cmd.dst_port_id = rx_port;
- lb_cmd.routing_mode = LB_MODE_DEFAULT;
- lb_cmd.enable = (enable ? 1 : 0);
- lb_cmd.loopback_cfg_minor_version = AFE_API_VERSION_LOOPBACK_CONFIG;
-
- ret = afe_apr_send_pkt(&lb_cmd, &this_afe.wait[index]);
+ ret = q6afe_pack_and_set_param_in_band(tx_port,
+ q6audio_get_port_index(tx_port),
+ param_hdr, (u8 *) &lb_param);
if (ret)
pr_err("%s: AFE loopback failed %d\n", __func__, ret);
return ret;
@@ -3747,9 +3566,9 @@ int afe_loopback(u16 enable, u16 rx_port, u16 tx_port)
int afe_loopback_gain(u16 port_id, u16 volume)
{
- struct afe_loopback_gain_per_path_param set_param;
+ struct afe_loopback_gain_per_path_param set_param = {0};
+ struct param_hdr_v3 param_hdr = {0};
int ret = 0;
- int index = 0;
if (this_afe.apr == NULL) {
this_afe.apr = apr_register("ADSP", "AFE", afe_callback,
@@ -3770,18 +3589,6 @@ int afe_loopback_gain(u16 port_id, u16 volume)
ret = -EINVAL;
goto fail_cmd;
}
- index = q6audio_get_port_index(port_id);
- if (index < 0 || index >= AFE_MAX_PORTS) {
- pr_err("%s: AFE port index[%d] invalid!\n",
- __func__, index);
- return -EINVAL;
- }
- ret = q6audio_validate_port(port_id);
- if (ret < 0) {
- pr_err("%s: Invalid port 0x%x ret %d",
- __func__, port_id, ret);
- return -EINVAL;
- }
/* RX ports numbers are even .TX ports numbers are odd. */
if (port_id % 2 == 0) {
@@ -3793,36 +3600,19 @@ int afe_loopback_gain(u16 port_id, u16 volume)
pr_debug("%s: port 0x%x volume %d\n", __func__, port_id, volume);
- set_param.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
- APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
- set_param.hdr.pkt_size = sizeof(set_param);
- set_param.hdr.src_port = 0;
- set_param.hdr.dest_port = 0;
- set_param.hdr.token = index;
- set_param.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
-
- set_param.param.port_id = port_id;
- set_param.param.payload_size =
- (sizeof(struct afe_loopback_gain_per_path_param) -
- sizeof(struct apr_hdr) - sizeof(struct afe_port_cmd_set_param_v2));
- set_param.param.payload_address_lsw = 0;
- set_param.param.payload_address_msw = 0;
- set_param.param.mem_map_handle = 0;
-
- set_param.pdata.module_id = AFE_MODULE_LOOPBACK;
- set_param.pdata.param_id = AFE_PARAM_ID_LOOPBACK_GAIN_PER_PATH;
- set_param.pdata.param_size =
- (set_param.param.payload_size -
- sizeof(struct afe_port_param_data_v2));
+ param_hdr.module_id = AFE_MODULE_LOOPBACK;
+ param_hdr.instance_id = INSTANCE_ID_0;
+ param_hdr.param_id = AFE_PARAM_ID_LOOPBACK_GAIN_PER_PATH;
+ param_hdr.param_size = sizeof(struct afe_loopback_gain_per_path_param);
set_param.rx_port_id = port_id;
set_param.gain = volume;
- ret = afe_apr_send_pkt(&set_param, &this_afe.wait[index]);
- if (ret) {
+ ret = q6afe_pack_and_set_param_in_band(port_id,
+ q6audio_get_port_index(port_id),
+ param_hdr, (u8 *) &set_param);
+ if (ret)
pr_err("%s: AFE param set failed for port 0x%x ret %d\n",
__func__, port_id, ret);
- goto fail_cmd;
- }
fail_cmd:
return ret;
@@ -3950,9 +3740,9 @@ int afe_pseudo_port_stop_nowait(u16 port_id)
int afe_port_group_set_param(u16 group_id,
union afe_port_group_config *afe_group_config)
{
- int ret;
- struct afe_port_group_create config;
+ struct param_hdr_v3 param_hdr = {0};
int cfg_type;
+ int ret;
if (!afe_group_config) {
pr_err("%s: Error, no configuration data\n", __func__);
@@ -3983,27 +3773,13 @@ int afe_port_group_set_param(u16 group_id,
return -EINVAL;
}
- memset(&config, 0, sizeof(config));
- config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
- APR_HDR_LEN(APR_HDR_SIZE),
- APR_PKT_VER);
- config.hdr.pkt_size = sizeof(config);
- config.hdr.src_port = 0;
- config.hdr.dest_port = 0;
- config.hdr.token = IDX_GLOBAL_CFG;
- config.hdr.opcode = AFE_SVC_CMD_SET_PARAM;
-
- config.param.payload_size = sizeof(config) - sizeof(struct apr_hdr) -
- sizeof(config.param);
- config.param.payload_address_lsw = 0x00;
- config.param.payload_address_msw = 0x00;
- config.param.mem_map_handle = 0x00;
- config.pdata.module_id = AFE_MODULE_GROUP_DEVICE;
- config.pdata.param_id = cfg_type;
- config.pdata.param_size = sizeof(config.data);
- config.data = *afe_group_config;
-
- ret = afe_apr_send_pkt(&config, &this_afe.wait[IDX_GLOBAL_CFG]);
+ param_hdr.module_id = AFE_MODULE_GROUP_DEVICE;
+ param_hdr.instance_id = INSTANCE_ID_0;
+ param_hdr.param_id = cfg_type;
+ param_hdr.param_size = sizeof(union afe_port_group_config);
+
+ ret = q6afe_svc_pack_and_set_param_in_band(IDX_GLOBAL_CFG, param_hdr,
+ (u8 *) afe_group_config);
if (ret)
pr_err("%s: AFE_PARAM_ID_GROUP_DEVICE_CFG failed %d\n",
__func__, ret);
@@ -4015,8 +3791,9 @@ int afe_port_group_enable(u16 group_id,
union afe_port_group_config *afe_group_config,
u16 enable)
{
+ struct afe_group_device_enable group_enable = {0};
+ struct param_hdr_v3 param_hdr = {0};
int ret;
- struct afe_port_group_create config;
pr_debug("%s: group id: 0x%x enable: %d\n", __func__,
group_id, enable);
@@ -4035,28 +3812,15 @@ int afe_port_group_enable(u16 group_id,
}
}
- memset(&config, 0, sizeof(config));
- config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
- APR_HDR_LEN(APR_HDR_SIZE),
- APR_PKT_VER);
- config.hdr.pkt_size = sizeof(config);
- config.hdr.src_port = 0;
- config.hdr.dest_port = 0;
- config.hdr.token = IDX_GLOBAL_CFG;
- config.hdr.opcode = AFE_SVC_CMD_SET_PARAM;
-
- config.param.payload_size = sizeof(config) - sizeof(struct apr_hdr) -
- sizeof(config.param);
- config.param.payload_address_lsw = 0x00;
- config.param.payload_address_msw = 0x00;
- config.param.mem_map_handle = 0x00;
- config.pdata.module_id = AFE_MODULE_GROUP_DEVICE;
- config.pdata.param_id = AFE_PARAM_ID_GROUP_DEVICE_ENABLE;
- config.pdata.param_size = sizeof(config.data);
- config.data.group_enable.group_id = group_id;
- config.data.group_enable.enable = enable;
-
- ret = afe_apr_send_pkt(&config, &this_afe.wait[IDX_GLOBAL_CFG]);
+ param_hdr.module_id = AFE_MODULE_GROUP_DEVICE;
+ param_hdr.instance_id = INSTANCE_ID_0;
+ param_hdr.param_id = AFE_PARAM_ID_GROUP_DEVICE_ENABLE;
+ param_hdr.param_size = sizeof(struct afe_group_device_enable);
+ group_enable.group_id = group_id;
+ group_enable.enable = enable;
+
+ ret = q6afe_svc_pack_and_set_param_in_band(IDX_GLOBAL_CFG, param_hdr,
+ (u8 *) &group_enable);
if (ret)
pr_err("%s: AFE_PARAM_ID_GROUP_DEVICE_ENABLE failed %d\n",
__func__, ret);
@@ -5006,9 +4770,7 @@ fail_cmd:
static int afe_sidetone_iir(u16 tx_port_id)
{
- struct afe_loopback_iir_cfg_v2 iir_sidetone;
int ret;
- int index = 0;
uint16_t size = 0;
int cal_index = AFE_SIDETONE_IIR_CAL;
int iir_pregain = 0;
@@ -5016,20 +4778,13 @@ static int afe_sidetone_iir(u16 tx_port_id)
int iir_enable;
struct cal_block_data *cal_block;
int mid;
-
- memset(&iir_sidetone, 0, sizeof(iir_sidetone));
- index = q6audio_get_port_index(tx_port_id);
- iir_sidetone.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
- APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
- iir_sidetone.hdr.pkt_size = sizeof(iir_sidetone);
- iir_sidetone.hdr.src_port = 0;
- iir_sidetone.hdr.dest_port = 0;
- iir_sidetone.hdr.token = index;
- iir_sidetone.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
- iir_sidetone.param.port_id = tx_port_id;
- iir_sidetone.param.payload_address_lsw = 0x00;
- iir_sidetone.param.payload_address_msw = 0x00;
- iir_sidetone.param.mem_map_handle = 0x00;
+ struct afe_mod_enable_param enable = {0};
+ struct afe_sidetone_iir_filter_config_params filter_data = {0};
+ struct param_hdr_v3 param_hdr = {0};
+ u8 *packed_param_data = NULL;
+ u32 packed_param_size = 0;
+ u32 single_param_size = 0;
+ struct audio_cal_info_sidetone_iir *st_iir_cal_info = NULL;
if (this_afe.cal_data[cal_index] == NULL) {
pr_err("%s: cal data is NULL\n", __func__);
@@ -5045,14 +4800,13 @@ static int afe_sidetone_iir(u16 tx_port_id)
goto done;
}
- iir_pregain = ((struct audio_cal_info_sidetone_iir *)
- cal_block->cal_info)->pregain;
- iir_enable = ((struct audio_cal_info_sidetone_iir *)
- cal_block->cal_info)->iir_enable;
- iir_num_biquad_stages = ((struct audio_cal_info_sidetone_iir *)
- cal_block->cal_info)->num_biquad_stages;
- mid = ((struct audio_cal_info_sidetone_iir *)
- cal_block->cal_info)->mid;
+ /* Cache data from cal block while inside lock to reduce locked time */
+ st_iir_cal_info =
+ (struct audio_cal_info_sidetone_iir *) cal_block->cal_info;
+ iir_pregain = st_iir_cal_info->pregain;
+ iir_enable = st_iir_cal_info->iir_enable;
+ iir_num_biquad_stages = st_iir_cal_info->num_biquad_stages;
+ mid = st_iir_cal_info->mid;
/*
* calculate the actual size of payload based on no of stages
@@ -5068,75 +4822,85 @@ static int afe_sidetone_iir(u16 tx_port_id)
pr_debug("%s: adding 2 to size:%d\n", __func__, size);
size = size + 2;
}
- memcpy(&iir_sidetone.st_iir_filter_config_data.iir_config,
- &((struct audio_cal_info_sidetone_iir *)
- cal_block->cal_info)->iir_config,
- sizeof(iir_sidetone.st_iir_filter_config_data.iir_config));
+ memcpy(&filter_data.iir_config, &st_iir_cal_info->iir_config, size);
mutex_unlock(&this_afe.cal_data[cal_index]->lock);
- /*
- * Calculate the payload size for setparams command
- */
- iir_sidetone.param.payload_size = (sizeof(iir_sidetone) -
- sizeof(struct apr_hdr) -
- sizeof(struct afe_port_cmd_set_param_v2) -
- (MAX_SIDETONE_IIR_DATA_SIZE - size));
-
- pr_debug("%s: payload size :%d\n", __func__,
- iir_sidetone.param.payload_size);
+ packed_param_size =
+ sizeof(param_hdr) * 2 + sizeof(enable) + sizeof(filter_data);
+ packed_param_data = kzalloc(packed_param_size, GFP_KERNEL);
+ if (!packed_param_data)
+ return -ENOMEM;
+ packed_param_size = 0;
/*
* Set IIR enable params
*/
- iir_sidetone.st_iir_enable_pdata.module_id = mid;
- iir_sidetone.st_iir_enable_pdata.param_id =
- AFE_PARAM_ID_ENABLE;
- iir_sidetone.st_iir_enable_pdata.param_size =
- sizeof(iir_sidetone.st_iir_mode_enable_data);
- iir_sidetone.st_iir_mode_enable_data.enable = iir_enable;
+ param_hdr.module_id = mid;
+ param_hdr.param_id = INSTANCE_ID_0;
+ param_hdr.param_id = AFE_PARAM_ID_ENABLE;
+ param_hdr.param_size = sizeof(enable);
+ enable.enable = iir_enable;
+ ret = q6common_pack_pp_params(packed_param_data, &param_hdr,
+ (u8 *) &enable, &single_param_size);
+ if (ret) {
+ pr_err("%s: Failed to pack param data, error %d\n", __func__,
+ ret);
+ goto done;
+ }
+ packed_param_size += single_param_size;
/*
* Set IIR filter config params
*/
- iir_sidetone.st_iir_filter_config_pdata.module_id = mid;
- iir_sidetone.st_iir_filter_config_pdata.param_id =
- AFE_PARAM_ID_SIDETONE_IIR_FILTER_CONFIG;
- iir_sidetone.st_iir_filter_config_pdata.param_size =
- sizeof(iir_sidetone.st_iir_filter_config_data.num_biquad_stages)
- +
- sizeof(iir_sidetone.st_iir_filter_config_data.pregain) + size;
- iir_sidetone.st_iir_filter_config_pdata.reserved = 0;
- iir_sidetone.st_iir_filter_config_data.num_biquad_stages =
- iir_num_biquad_stages;
- iir_sidetone.st_iir_filter_config_data.pregain = iir_pregain;
+ param_hdr.module_id = mid;
+ param_hdr.instance_id = INSTANCE_ID_0;
+ param_hdr.param_id = AFE_PARAM_ID_SIDETONE_IIR_FILTER_CONFIG;
+ param_hdr.param_size = sizeof(filter_data.num_biquad_stages) +
+ sizeof(filter_data.pregain) + size;
+ filter_data.num_biquad_stages = iir_num_biquad_stages;
+ filter_data.pregain = iir_pregain;
+ ret = q6common_pack_pp_params(packed_param_data + packed_param_size,
+ &param_hdr, (u8 *) &filter_data,
+ &single_param_size);
+ if (ret) {
+ pr_err("%s: Failed to pack param data, error %d\n", __func__,
+ ret);
+ goto done;
+ }
+ packed_param_size += single_param_size;
+
pr_debug("%s: tx(0x%x)mid(0x%x)iir_en(%d)stg(%d)gain(0x%x)size(%d)\n",
- __func__, tx_port_id, mid,
- iir_sidetone.st_iir_mode_enable_data.enable,
- iir_sidetone.st_iir_filter_config_data.num_biquad_stages,
- iir_sidetone.st_iir_filter_config_data.pregain,
- iir_sidetone.st_iir_filter_config_pdata.param_size);
- ret = afe_apr_send_pkt(&iir_sidetone, &this_afe.wait[index]);
+ __func__, tx_port_id, mid, enable.enable,
+ filter_data.num_biquad_stages, filter_data.pregain,
+ param_hdr.param_size);
+
+ ret = q6afe_set_params(tx_port_id, q6audio_get_port_index(tx_port_id),
+ NULL, packed_param_data, packed_param_size);
if (ret)
pr_err("%s: AFE sidetone failed for tx_port(0x%x)\n",
__func__, tx_port_id);
done:
+ kfree(packed_param_data);
return ret;
-
}
static int afe_sidetone(u16 tx_port_id, u16 rx_port_id, bool enable)
{
- struct afe_st_loopback_cfg_v1 cmd_sidetone;
int ret;
- int index;
int cal_index = AFE_SIDETONE_CAL;
int sidetone_gain;
int sidetone_enable;
struct cal_block_data *cal_block;
int mid = 0;
+ struct afe_loopback_sidetone_gain gain_data = {0};
+ struct loopback_cfg_data cfg_data = {0};
+ struct param_hdr_v3 param_hdr = {0};
+ u8 *packed_param_data = NULL;
+ u32 packed_param_size = 0;
+ u32 single_param_size = 0;
+ struct audio_cal_info_sidetone *st_cal_info = NULL;
- memset(&cmd_sidetone, 0, sizeof(cmd_sidetone));
if (this_afe.cal_data[cal_index] == NULL) {
pr_err("%s: cal data is NULL\n", __func__);
ret = -EINVAL;
@@ -5150,60 +4914,61 @@ static int afe_sidetone(u16 tx_port_id, u16 rx_port_id, bool enable)
ret = -EINVAL;
goto done;
}
- sidetone_gain = ((struct audio_cal_info_sidetone *)
- cal_block->cal_info)->gain;
- sidetone_enable = ((struct audio_cal_info_sidetone *)
- cal_block->cal_info)->enable;
- mid = ((struct audio_cal_info_sidetone *)
- cal_block->cal_info)->mid;
- mutex_unlock(&this_afe.cal_data[cal_index]->lock);
- index = q6audio_get_port_index(tx_port_id);
- cmd_sidetone.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
- APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
- cmd_sidetone.hdr.pkt_size = sizeof(cmd_sidetone);
- cmd_sidetone.hdr.src_port = 0;
- cmd_sidetone.hdr.dest_port = 0;
- cmd_sidetone.hdr.token = index;
- cmd_sidetone.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
- cmd_sidetone.param.port_id = tx_port_id;
- cmd_sidetone.param.payload_size = (sizeof(cmd_sidetone) -
- sizeof(struct apr_hdr) -
- sizeof(struct afe_port_cmd_set_param_v2));
- cmd_sidetone.param.payload_address_lsw = 0x00;
- cmd_sidetone.param.payload_address_msw = 0x00;
- cmd_sidetone.param.mem_map_handle = 0x00;
- cmd_sidetone.gain_pdata.module_id = AFE_MODULE_LOOPBACK;
- cmd_sidetone.gain_pdata.param_id = AFE_PARAM_ID_LOOPBACK_GAIN_PER_PATH;
- /*
- * size of actual payload only
- */
- cmd_sidetone.gain_pdata.param_size = sizeof(
- struct afe_loopback_sidetone_gain);
- cmd_sidetone.gain_data.rx_port_id = rx_port_id;
- cmd_sidetone.gain_data.gain = sidetone_gain;
+ /* Cache data from cal block while inside lock to reduce locked time */
+ st_cal_info = (struct audio_cal_info_sidetone *) cal_block->cal_info;
+ sidetone_gain = st_cal_info->gain;
+ sidetone_enable = st_cal_info->enable;
+ mid = st_cal_info->mid;
+ mutex_unlock(&this_afe.cal_data[cal_index]->lock);
- cmd_sidetone.cfg_pdata.module_id = AFE_MODULE_LOOPBACK;
- cmd_sidetone.cfg_pdata.param_id = AFE_PARAM_ID_LOOPBACK_CONFIG;
- /*
- * size of actual payload only
- */
- cmd_sidetone.cfg_pdata.param_size = sizeof(struct loopback_cfg_data);
- cmd_sidetone.cfg_data.loopback_cfg_minor_version =
- AFE_API_VERSION_LOOPBACK_CONFIG;
- cmd_sidetone.cfg_data.dst_port_id = rx_port_id;
- cmd_sidetone.cfg_data.routing_mode = LB_MODE_SIDETONE;
- cmd_sidetone.cfg_data.enable = enable;
+ /* Set gain data. */
+ param_hdr.module_id = AFE_MODULE_LOOPBACK;
+ param_hdr.instance_id = INSTANCE_ID_0;
+ param_hdr.param_id = AFE_PARAM_ID_LOOPBACK_GAIN_PER_PATH;
+ param_hdr.param_size = sizeof(struct afe_loopback_sidetone_gain);
+ gain_data.rx_port_id = rx_port_id;
+ gain_data.gain = sidetone_gain;
+ ret = q6common_pack_pp_params(packed_param_data, &param_hdr,
+ (u8 *) &gain_data, &single_param_size);
+ if (ret) {
+ pr_err("%s: Failed to pack param data, error %d\n", __func__,
+ ret);
+ goto done;
+ }
+ packed_param_size += single_param_size;
+
+ /* Set configuration data. */
+ param_hdr.module_id = AFE_MODULE_LOOPBACK;
+ param_hdr.instance_id = INSTANCE_ID_0;
+ param_hdr.param_id = AFE_PARAM_ID_LOOPBACK_CONFIG;
+ param_hdr.param_size = sizeof(struct loopback_cfg_data);
+ cfg_data.loopback_cfg_minor_version = AFE_API_VERSION_LOOPBACK_CONFIG;
+ cfg_data.dst_port_id = rx_port_id;
+ cfg_data.routing_mode = LB_MODE_SIDETONE;
+ cfg_data.enable = enable;
+ ret = q6common_pack_pp_params(packed_param_data + packed_param_size,
+ &param_hdr, (u8 *) &cfg_data,
+ &single_param_size);
+ if (ret) {
+ pr_err("%s: Failed to pack param data, error %d\n", __func__,
+ ret);
+ goto done;
+ }
+ packed_param_size += single_param_size;
pr_debug("%s rx(0x%x) tx(0x%x) enable(%d) mid(0x%x) gain(%d) sidetone_enable(%d)\n",
__func__, rx_port_id, tx_port_id,
enable, mid, sidetone_gain, sidetone_enable);
- ret = afe_apr_send_pkt(&cmd_sidetone, &this_afe.wait[index]);
+ ret = q6afe_set_params(tx_port_id, q6audio_get_port_index(tx_port_id),
+ NULL, packed_param_data, packed_param_size);
if (ret)
pr_err("%s: AFE sidetone send failed for tx_port:%d rx_port:%d ret:%d\n",
__func__, tx_port_id, rx_port_id, ret);
+
done:
+ kfree(packed_param_data);
return ret;
}
@@ -5588,93 +5353,44 @@ fail_cmd:
int afe_set_digital_codec_core_clock(u16 port_id,
struct afe_digital_clk_cfg *cfg)
{
- struct afe_lpass_digital_clk_config_command clk_cfg;
- int index = 0;
+ struct afe_digital_clk_cfg clk_cfg = {0};
+ struct param_hdr_v3 param_hdr = {0};
int ret = 0;
if (!cfg) {
pr_err("%s: clock cfg is NULL\n", __func__);
- ret = -EINVAL;
- return ret;
- }
-
- ret = afe_q6_interface_prepare();
- if (ret != 0) {
- pr_err("%s: Q6 interface prepare failed %d\n", __func__, ret);
- return ret;
+ return -EINVAL;
}
- clk_cfg.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
- APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
- clk_cfg.hdr.pkt_size = sizeof(clk_cfg);
- clk_cfg.hdr.src_port = 0;
- clk_cfg.hdr.dest_port = 0;
- clk_cfg.hdr.token = index;
-
- clk_cfg.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
/*default rx port is taken to enable the codec digital clock*/
- clk_cfg.param.port_id = q6audio_get_port_id(port_id);
- clk_cfg.param.payload_size = sizeof(clk_cfg) - sizeof(struct apr_hdr)
- - sizeof(clk_cfg.param);
- clk_cfg.param.payload_address_lsw = 0x00;
- clk_cfg.param.payload_address_msw = 0x00;
- clk_cfg.param.mem_map_handle = 0x00;
- clk_cfg.pdata.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE;
- clk_cfg.pdata.param_id = AFE_PARAM_ID_INTERNAL_DIGIATL_CDC_CLK_CONFIG;
- clk_cfg.pdata.param_size = sizeof(clk_cfg.clk_cfg);
- clk_cfg.clk_cfg = *cfg;
+ param_hdr.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE;
+ param_hdr.instance_id = INSTANCE_ID_0;
+ param_hdr.param_id = AFE_PARAM_ID_INTERNAL_DIGIATL_CDC_CLK_CONFIG;
+ param_hdr.param_size = sizeof(struct afe_digital_clk_cfg);
+ clk_cfg = *cfg;
pr_debug("%s: Minor version =0x%x clk val = %d\n"
"clk root = 0x%x resrv = 0x%x\n",
- __func__, cfg->i2s_cfg_minor_version,
- cfg->clk_val, cfg->clk_root, cfg->reserved);
-
- atomic_set(&this_afe.state, 1);
- atomic_set(&this_afe.status, 0);
- ret = apr_send_pkt(this_afe.apr, (uint32_t *) &clk_cfg);
- if (ret < 0) {
- pr_err("%s: AFE enable for port 0x%x ret %d\n",
- __func__, port_id, ret);
- ret = -EINVAL;
- goto fail_cmd;
- }
-
- ret = wait_event_timeout(this_afe.wait[index],
- (atomic_read(&this_afe.state) == 0),
- msecs_to_jiffies(TIMEOUT_MS));
- if (!ret) {
- pr_err("%s: wait_event timeout\n", __func__);
- ret = -EINVAL;
- goto fail_cmd;
- }
- if (atomic_read(&this_afe.status) > 0) {
- pr_err("%s: config cmd failed [%s]\n",
- __func__, adsp_err_get_err_str(
- atomic_read(&this_afe.status)));
- ret = adsp_err_get_lnx_err_code(
- atomic_read(&this_afe.status));
- goto fail_cmd;
- }
+ __func__, cfg->i2s_cfg_minor_version, cfg->clk_val,
+ cfg->clk_root, cfg->reserved);
-fail_cmd:
+ ret = q6afe_pack_and_set_param_in_band(port_id,
+ q6audio_get_port_index(port_id),
+ param_hdr, (u8 *) &clk_cfg);
+ if (ret < 0)
+ pr_err("%s: AFE enable for port 0x%x ret %d\n", __func__,
+ port_id, ret);
return ret;
}
int afe_set_lpass_clock(u16 port_id, struct afe_clk_cfg *cfg)
{
- struct afe_lpass_clk_config_command clk_cfg;
- int index = 0;
+ struct afe_clk_cfg clk_cfg = {0};
+ struct param_hdr_v3 param_hdr = {0};
int ret = 0;
if (!cfg) {
pr_err("%s: clock cfg is NULL\n", __func__);
- ret = -EINVAL;
- return ret;
- }
- index = q6audio_get_port_index(port_id);
- if (index < 0 || index >= AFE_MAX_PORTS) {
- pr_err("%s: AFE port index[%d] invalid!\n",
- __func__, index);
return -EINVAL;
}
ret = q6audio_is_digital_pcm_interface(port_id);
@@ -5684,31 +5400,12 @@ int afe_set_lpass_clock(u16 port_id, struct afe_clk_cfg *cfg)
return -EINVAL;
}
- ret = afe_q6_interface_prepare();
- if (ret != 0) {
- pr_err("%s: Q6 interface prepare failed %d\n", __func__, ret);
- return ret;
- }
-
mutex_lock(&this_afe.afe_cmd_lock);
- clk_cfg.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
- APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
- clk_cfg.hdr.pkt_size = sizeof(clk_cfg);
- clk_cfg.hdr.src_port = 0;
- clk_cfg.hdr.dest_port = 0;
- clk_cfg.hdr.token = index;
-
- clk_cfg.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
- clk_cfg.param.port_id = q6audio_get_port_id(port_id);
- clk_cfg.param.payload_size = sizeof(clk_cfg) - sizeof(struct apr_hdr)
- - sizeof(clk_cfg.param);
- clk_cfg.param.payload_address_lsw = 0x00;
- clk_cfg.param.payload_address_msw = 0x00;
- clk_cfg.param.mem_map_handle = 0x00;
- clk_cfg.pdata.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE;
- clk_cfg.pdata.param_id = AFE_PARAM_ID_LPAIF_CLK_CONFIG;
- clk_cfg.pdata.param_size = sizeof(clk_cfg.clk_cfg);
- clk_cfg.clk_cfg = *cfg;
+ param_hdr.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE;
+ param_hdr.instance_id = INSTANCE_ID_0;
+ param_hdr.param_id = AFE_PARAM_ID_LPAIF_CLK_CONFIG;
+ param_hdr.param_size = sizeof(clk_cfg);
+ clk_cfg = *cfg;
pr_debug("%s: Minor version =0x%x clk val1 = %d\n"
"clk val2 = %d, clk src = 0x%x\n"
@@ -5719,41 +5416,20 @@ int afe_set_lpass_clock(u16 port_id, struct afe_clk_cfg *cfg)
cfg->clk_root, cfg->clk_set_mode,
cfg->reserved, q6audio_get_port_id(port_id));
- atomic_set(&this_afe.state, 1);
- atomic_set(&this_afe.status, 0);
- ret = apr_send_pkt(this_afe.apr, (uint32_t *) &clk_cfg);
- if (ret < 0) {
+ ret = q6afe_pack_and_set_param_in_band(port_id,
+ q6audio_get_port_index(port_id),
+ param_hdr, (u8 *) &clk_cfg);
+ if (ret < 0)
pr_err("%s: AFE enable for port 0x%x ret %d\n",
__func__, port_id, ret);
- ret = -EINVAL;
- goto fail_cmd;
- }
-
- ret = wait_event_timeout(this_afe.wait[index],
- (atomic_read(&this_afe.state) == 0),
- msecs_to_jiffies(TIMEOUT_MS));
- if (!ret) {
- pr_err("%s: wait_event timeout\n", __func__);
- ret = -EINVAL;
- goto fail_cmd;
- }
- if (atomic_read(&this_afe.status) > 0) {
- pr_err("%s: config cmd failed [%s]\n",
- __func__, adsp_err_get_err_str(
- atomic_read(&this_afe.status)));
- ret = adsp_err_get_lnx_err_code(
- atomic_read(&this_afe.status));
- goto fail_cmd;
- }
-fail_cmd:
mutex_unlock(&this_afe.afe_cmd_lock);
return ret;
}
int afe_set_lpass_clk_cfg(int index, struct afe_clk_set *cfg)
{
- struct afe_lpass_clk_config_command_v2 clk_cfg;
+ struct param_hdr_v3 param_hdr = {0};
int ret = 0;
if (!cfg) {
@@ -5774,24 +5450,10 @@ int afe_set_lpass_clk_cfg(int index, struct afe_clk_set *cfg)
}
mutex_lock(&this_afe.afe_cmd_lock);
- clk_cfg.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
- APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
- clk_cfg.hdr.pkt_size = sizeof(clk_cfg);
- clk_cfg.hdr.src_port = 0;
- clk_cfg.hdr.dest_port = 0;
- clk_cfg.hdr.token = index;
-
- clk_cfg.hdr.opcode = AFE_SVC_CMD_SET_PARAM;
- clk_cfg.param.payload_size = sizeof(clk_cfg) - sizeof(struct apr_hdr)
- - sizeof(clk_cfg.param);
- clk_cfg.param.payload_address_lsw = 0x00;
- clk_cfg.param.payload_address_msw = 0x00;
- clk_cfg.param.mem_map_handle = 0x00;
- clk_cfg.pdata.module_id = AFE_MODULE_CLOCK_SET;
- clk_cfg.pdata.param_id = AFE_PARAM_ID_CLOCK_SET;
- clk_cfg.pdata.param_size = sizeof(clk_cfg.clk_cfg);
- clk_cfg.clk_cfg = *cfg;
-
+ param_hdr.module_id = AFE_MODULE_CLOCK_SET;
+ param_hdr.instance_id = INSTANCE_ID_0;
+ param_hdr.param_id = AFE_PARAM_ID_CLOCK_SET;
+ param_hdr.param_size = sizeof(struct afe_clk_set);
pr_debug("%s: Minor version =0x%x clk id = %d\n"
"clk freq (Hz) = %d, clk attri = 0x%x\n"
@@ -5800,34 +5462,12 @@ int afe_set_lpass_clk_cfg(int index, struct afe_clk_set *cfg)
cfg->clk_id, cfg->clk_freq_in_hz, cfg->clk_attri,
cfg->clk_root, cfg->enable);
- atomic_set(&this_afe.state, 1);
- atomic_set(&this_afe.status, 0);
- ret = apr_send_pkt(this_afe.apr, (uint32_t *) &clk_cfg);
- if (ret < 0) {
+ ret = q6afe_svc_pack_and_set_param_in_band(index, param_hdr,
+ (u8 *) cfg);
+ if (ret < 0)
pr_err("%s: AFE clk cfg failed with ret %d\n",
__func__, ret);
- ret = -EINVAL;
- goto fail_cmd;
- }
-
- ret = wait_event_timeout(this_afe.wait[index],
- (atomic_read(&this_afe.state) == 0),
- msecs_to_jiffies(TIMEOUT_MS));
- if (!ret) {
- pr_err("%s: wait_event timeout\n", __func__);
- ret = -EINVAL;
- goto fail_cmd;
- } else {
- /* set ret to 0 as no timeout happened */
- ret = 0;
- }
- if (atomic_read(&this_afe.status) != 0) {
- pr_err("%s: config cmd failed\n", __func__);
- ret = -EINVAL;
- goto fail_cmd;
- }
-fail_cmd:
mutex_unlock(&this_afe.afe_cmd_lock);
return ret;
}
@@ -5861,19 +5501,12 @@ int afe_set_lpass_clock_v2(u16 port_id, struct afe_clk_set *cfg)
int afe_set_lpass_internal_digital_codec_clock(u16 port_id,
struct afe_digital_clk_cfg *cfg)
{
- struct afe_lpass_digital_clk_config_command clk_cfg;
- int index = 0;
+ struct afe_digital_clk_cfg clk_cfg = {0};
+ struct param_hdr_v3 param_hdr = {0};
int ret = 0;
if (!cfg) {
pr_err("%s: clock cfg is NULL\n", __func__);
- ret = -EINVAL;
- return ret;
- }
- index = q6audio_get_port_index(port_id);
- if (index < 0 || index >= AFE_MAX_PORTS) {
- pr_err("%s: AFE port index[%d] invalid!\n",
- __func__, index);
return -EINVAL;
}
ret = q6audio_is_digital_pcm_interface(port_id);
@@ -5883,30 +5516,11 @@ int afe_set_lpass_internal_digital_codec_clock(u16 port_id,
return -EINVAL;
}
- ret = afe_q6_interface_prepare();
- if (ret != 0) {
- pr_err("%s: Q6 interface prepare failed %d\n", __func__, ret);
- return ret;
- }
-
- clk_cfg.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
- APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
- clk_cfg.hdr.pkt_size = sizeof(clk_cfg);
- clk_cfg.hdr.src_port = 0;
- clk_cfg.hdr.dest_port = 0;
- clk_cfg.hdr.token = index;
-
- clk_cfg.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
- clk_cfg.param.port_id = q6audio_get_port_id(port_id);
- clk_cfg.param.payload_size = sizeof(clk_cfg) - sizeof(struct apr_hdr)
- - sizeof(clk_cfg.param);
- clk_cfg.param.payload_address_lsw = 0x00;
- clk_cfg.param.payload_address_msw = 0x00;
- clk_cfg.param.mem_map_handle = 0x00;
- clk_cfg.pdata.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE;
- clk_cfg.pdata.param_id = AFE_PARAM_ID_INTERNAL_DIGIATL_CDC_CLK_CONFIG;
- clk_cfg.pdata.param_size = sizeof(clk_cfg.clk_cfg);
- clk_cfg.clk_cfg = *cfg;
+ param_hdr.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE;
+ param_hdr.instance_id = INSTANCE_ID_0;
+ param_hdr.param_id = AFE_PARAM_ID_INTERNAL_DIGIATL_CDC_CLK_CONFIG;
+ param_hdr.param_size = sizeof(clk_cfg);
+ clk_cfg = *cfg;
pr_debug("%s: Minor version =0x%x clk val = %d\n"
"clk root = 0x%x resrv = 0x%x port id = 0x%x\n",
@@ -5914,49 +5528,22 @@ int afe_set_lpass_internal_digital_codec_clock(u16 port_id,
cfg->clk_val, cfg->clk_root, cfg->reserved,
q6audio_get_port_id(port_id));
- atomic_set(&this_afe.state, 1);
- atomic_set(&this_afe.status, 0);
- ret = apr_send_pkt(this_afe.apr, (uint32_t *) &clk_cfg);
- if (ret < 0) {
+ ret = q6afe_pack_and_set_param_in_band(port_id,
+ q6audio_get_port_index(port_id),
+ param_hdr, (u8 *) &clk_cfg);
+ if (ret < 0)
pr_err("%s: AFE enable for port 0x0x%x ret %d\n",
__func__, port_id, ret);
- ret = -EINVAL;
- goto fail_cmd;
- }
-
- ret = wait_event_timeout(this_afe.wait[index],
- (atomic_read(&this_afe.state) == 0),
- msecs_to_jiffies(TIMEOUT_MS));
- if (!ret) {
- pr_err("%s: wait_event timeout\n", __func__);
- ret = -EINVAL;
- goto fail_cmd;
- }
- if (atomic_read(&this_afe.status) > 0) {
- pr_err("%s: config cmd failed [%s]\n",
- __func__, adsp_err_get_err_str(
- atomic_read(&this_afe.status)));
- ret = adsp_err_get_lnx_err_code(
- atomic_read(&this_afe.status));
- goto fail_cmd;
- }
-fail_cmd:
return ret;
}
int afe_enable_lpass_core_shared_clock(u16 port_id, u32 enable)
{
- struct afe_lpass_core_shared_clk_config_command clk_cfg;
- int index = 0;
+ struct afe_param_id_lpass_core_shared_clk_cfg clk_cfg = {0};
+ struct param_hdr_v3 param_hdr = {0};
int ret = 0;
- index = q6audio_get_port_index(port_id);
- if (index < 0 || index >= AFE_MAX_PORTS) {
- pr_err("%s: AFE port index[%d] invalid!\n",
- __func__, index);
- return -EINVAL;
- }
ret = q6audio_is_digital_pcm_interface(port_id);
if (ret < 0) {
pr_err("%s: q6audio_is_digital_pcm_interface fail %d\n",
@@ -5964,65 +5551,25 @@ int afe_enable_lpass_core_shared_clock(u16 port_id, u32 enable)
return -EINVAL;
}
- ret = afe_q6_interface_prepare();
- if (ret != 0) {
- pr_err("%s: Q6 interface prepare failed %d\n", __func__, ret);
- return ret;
- }
-
mutex_lock(&this_afe.afe_cmd_lock);
- clk_cfg.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
- APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
- clk_cfg.hdr.pkt_size = sizeof(clk_cfg);
- clk_cfg.hdr.src_port = 0;
- clk_cfg.hdr.dest_port = 0;
- clk_cfg.hdr.token = index;
-
- clk_cfg.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
- clk_cfg.param.port_id = q6audio_get_port_id(port_id);
- clk_cfg.param.payload_size = sizeof(clk_cfg) - sizeof(struct apr_hdr)
- - sizeof(clk_cfg.param);
- clk_cfg.param.payload_address_lsw = 0x00;
- clk_cfg.param.payload_address_msw = 0x00;
- clk_cfg.param.mem_map_handle = 0x00;
- clk_cfg.pdata.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE;
- clk_cfg.pdata.param_id = AFE_PARAM_ID_LPASS_CORE_SHARED_CLOCK_CONFIG;
- clk_cfg.pdata.param_size = sizeof(clk_cfg.clk_cfg);
- clk_cfg.clk_cfg.lpass_core_shared_clk_cfg_minor_version =
- AFE_API_VERSION_LPASS_CORE_SHARED_CLK_CONFIG;
- clk_cfg.clk_cfg.enable = enable;
+ param_hdr.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE;
+ param_hdr.instance_id = INSTANCE_ID_0;
+ param_hdr.param_id = AFE_PARAM_ID_LPASS_CORE_SHARED_CLOCK_CONFIG;
+ param_hdr.param_size = sizeof(clk_cfg);
+ clk_cfg.lpass_core_shared_clk_cfg_minor_version =
+ AFE_API_VERSION_LPASS_CORE_SHARED_CLK_CONFIG;
+ clk_cfg.enable = enable;
pr_debug("%s: port id = %d, enable = %d\n",
__func__, q6audio_get_port_id(port_id), enable);
- atomic_set(&this_afe.state, 1);
- atomic_set(&this_afe.status, 0);
- ret = apr_send_pkt(this_afe.apr, (uint32_t *) &clk_cfg);
- if (ret < 0) {
+ ret = q6afe_pack_and_set_param_in_band(port_id,
+ q6audio_get_port_index(port_id),
+ param_hdr, (u8 *) &clk_cfg);
+ if (ret < 0)
pr_err("%s: AFE enable for port 0x%x ret %d\n",
__func__, port_id, ret);
- ret = -EINVAL;
- goto fail_cmd;
- }
-
- ret = wait_event_timeout(this_afe.wait[index],
- (atomic_read(&this_afe.state) == 0),
- msecs_to_jiffies(TIMEOUT_MS));
- if (!ret) {
- pr_err("%s: wait_event timeout\n", __func__);
- ret = -EINVAL;
- goto fail_cmd;
- }
- if (atomic_read(&this_afe.status) > 0) {
- pr_err("%s: config cmd failed [%s]\n",
- __func__, adsp_err_get_err_str(
- atomic_read(&this_afe.status)));
- ret = adsp_err_get_lnx_err_code(
- atomic_read(&this_afe.status));
- goto fail_cmd;
- }
-fail_cmd:
mutex_unlock(&this_afe.afe_cmd_lock);
return ret;
}
@@ -6052,8 +5599,9 @@ int q6afe_check_osr_clk_freq(u32 freq)
int afe_get_sp_th_vi_ftm_data(struct afe_sp_th_vi_get_param *th_vi)
{
+ struct param_hdr_v3 param_hdr = {0};
+ int port = SLIMBUS_4_TX;
int ret = -EINVAL;
- int index = 0, port = SLIMBUS_4_TX;
if (!th_vi) {
pr_err("%s: Invalid params\n", __func__);
@@ -6062,59 +5610,18 @@ int afe_get_sp_th_vi_ftm_data(struct afe_sp_th_vi_get_param *th_vi)
if (this_afe.vi_tx_port != -1)
port = this_afe.vi_tx_port;
- ret = q6audio_validate_port(port);
- if (ret < 0) {
- pr_err("%s: invalid port 0x%x ret %d\n", __func__, port, ret);
- goto done;
- }
- index = q6audio_get_port_index(port);
- if (index < 0) {
- pr_err("%s: invalid port 0x%x, index %d\n",
- __func__, port, index);
- ret = -EINVAL;
- goto done;
- }
- th_vi->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
- APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
- th_vi->hdr.pkt_size = sizeof(*th_vi);
- th_vi->hdr.src_port = 0;
- th_vi->hdr.dest_port = 0;
- th_vi->hdr.token = index;
- th_vi->hdr.opcode = AFE_PORT_CMD_GET_PARAM_V2;
- th_vi->get_param.mem_map_handle = 0;
- th_vi->get_param.module_id = AFE_MODULE_SPEAKER_PROTECTION_V2_TH_VI;
- th_vi->get_param.param_id = AFE_PARAM_ID_SP_V2_TH_VI_FTM_PARAMS;
- th_vi->get_param.payload_address_lsw = 0;
- th_vi->get_param.payload_address_msw = 0;
- th_vi->get_param.payload_size = sizeof(*th_vi)
- - sizeof(th_vi->get_param) - sizeof(th_vi->hdr);
- th_vi->get_param.port_id = q6audio_get_port_id(port);
- th_vi->pdata.module_id = AFE_MODULE_SPEAKER_PROTECTION_V2_TH_VI;
- th_vi->pdata.param_id = AFE_PARAM_ID_SP_V2_TH_VI_FTM_PARAMS;
- th_vi->pdata.param_size = sizeof(th_vi->param);
- atomic_set(&this_afe.status, 0);
- atomic_set(&this_afe.state, 1);
- ret = apr_send_pkt(this_afe.apr, (uint32_t *)th_vi);
- if (ret < 0) {
- pr_err("%s: get param port 0x%x param id[0x%x]failed %d\n",
- __func__, port, th_vi->get_param.param_id, ret);
- goto done;
- }
- ret = wait_event_timeout(this_afe.wait[index],
- (atomic_read(&this_afe.state) == 0),
- msecs_to_jiffies(TIMEOUT_MS));
- if (!ret) {
- pr_err("%s: wait_event timeout\n", __func__);
- ret = -EINVAL;
- goto done;
- }
- if (atomic_read(&this_afe.status) > 0) {
- pr_err("%s: config cmd failed [%s]\n",
- __func__, adsp_err_get_err_str(
- atomic_read(&this_afe.status)));
- ret = adsp_err_get_lnx_err_code(atomic_read(&this_afe.status));
+ param_hdr.module_id = AFE_MODULE_SPEAKER_PROTECTION_V2_TH_VI;
+ param_hdr.instance_id = INSTANCE_ID_0;
+ param_hdr.param_id = AFE_PARAM_ID_SP_V2_TH_VI_FTM_PARAMS;
+ param_hdr.param_size = sizeof(struct afe_sp_th_vi_ftm_params);
+
+ ret = q6afe_get_params(port, NULL, &param_hdr);
+ if (ret) {
+ pr_err("%s: Failed to get TH VI FTM data\n", __func__);
goto done;
}
+
+ th_vi->pdata = param_hdr;
memcpy(&th_vi->param , &this_afe.th_vi_resp.param,
sizeof(this_afe.th_vi_resp.param));
pr_debug("%s: DC resistance %d %d temp %d %d status %d %d\n",
@@ -6131,8 +5638,9 @@ done:
int afe_get_sp_ex_vi_ftm_data(struct afe_sp_ex_vi_get_param *ex_vi)
{
+ struct param_hdr_v3 param_hdr = {0};
+ int port = SLIMBUS_4_TX;
int ret = -EINVAL;
- int index = 0, port = SLIMBUS_4_TX;
if (!ex_vi) {
pr_err("%s: Invalid params\n", __func__);
@@ -6141,61 +5649,19 @@ int afe_get_sp_ex_vi_ftm_data(struct afe_sp_ex_vi_get_param *ex_vi)
if (this_afe.vi_tx_port != -1)
port = this_afe.vi_tx_port;
- ret = q6audio_validate_port(port);
- if (ret < 0) {
- pr_err("%s: invalid port 0x%x ret %d\n", __func__, port, ret);
- goto done;
- }
+ param_hdr.module_id = AFE_MODULE_SPEAKER_PROTECTION_V2_EX_VI;
+ param_hdr.instance_id = INSTANCE_ID_0;
+ param_hdr.param_id = AFE_PARAM_ID_SP_V2_EX_VI_FTM_PARAMS;
+ param_hdr.param_size = sizeof(struct afe_sp_ex_vi_ftm_params);
- index = q6audio_get_port_index(port);
- if (index < 0) {
- pr_err("%s: invalid index %d port 0x%x\n", __func__,
- index, port);
- ret = -EINVAL;
- goto done;
- }
-
- ex_vi->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
- APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
- ex_vi->hdr.pkt_size = sizeof(*ex_vi);
- ex_vi->hdr.src_port = 0;
- ex_vi->hdr.dest_port = 0;
- ex_vi->hdr.token = index;
- ex_vi->hdr.opcode = AFE_PORT_CMD_GET_PARAM_V2;
- ex_vi->get_param.mem_map_handle = 0;
- ex_vi->get_param.module_id = AFE_MODULE_SPEAKER_PROTECTION_V2_EX_VI;
- ex_vi->get_param.param_id = AFE_PARAM_ID_SP_V2_EX_VI_FTM_PARAMS;
- ex_vi->get_param.payload_address_lsw = 0;
- ex_vi->get_param.payload_address_msw = 0;
- ex_vi->get_param.payload_size = sizeof(*ex_vi)
- - sizeof(ex_vi->get_param) - sizeof(ex_vi->hdr);
- ex_vi->get_param.port_id = q6audio_get_port_id(port);
- ex_vi->pdata.module_id = AFE_MODULE_SPEAKER_PROTECTION_V2_EX_VI;
- ex_vi->pdata.param_id = AFE_PARAM_ID_SP_V2_EX_VI_FTM_PARAMS;
- ex_vi->pdata.param_size = sizeof(ex_vi->param);
- atomic_set(&this_afe.status, 0);
- atomic_set(&this_afe.state, 1);
- ret = apr_send_pkt(this_afe.apr, (uint32_t *)ex_vi);
+ ret = q6afe_get_params(port, NULL, &param_hdr);
if (ret < 0) {
pr_err("%s: get param port 0x%x param id[0x%x]failed %d\n",
- __func__, port, ex_vi->get_param.param_id, ret);
- goto done;
- }
- ret = wait_event_timeout(this_afe.wait[index],
- (atomic_read(&this_afe.state) == 0),
- msecs_to_jiffies(TIMEOUT_MS));
- if (!ret) {
- pr_err("%s: wait_event timeout\n", __func__);
- ret = -EINVAL;
- goto done;
- }
- if (atomic_read(&this_afe.status) > 0) {
- pr_err("%s: config cmd failed [%s]\n",
- __func__, adsp_err_get_err_str(
- atomic_read(&this_afe.status)));
- ret = adsp_err_get_lnx_err_code(atomic_read(&this_afe.status));
+ __func__, port, param_hdr.param_id, ret);
goto done;
}
+
+ ex_vi->pdata = param_hdr;
memcpy(&ex_vi->param , &this_afe.ex_vi_resp.param,
sizeof(this_afe.ex_vi_resp.param));
pr_debug("%s: freq %d %d resistance %d %d qfactor %d %d state %d %d\n",
@@ -6215,80 +5681,28 @@ done:
int afe_get_av_dev_drift(struct afe_param_id_dev_timing_stats *timing_stats,
u16 port)
{
+ struct param_hdr_v3 param_hdr = {0};
int ret = -EINVAL;
- int index = 0;
- struct afe_av_dev_drift_get_param av_dev_drift;
if (!timing_stats) {
pr_err("%s: Invalid params\n", __func__);
goto exit;
}
- ret = q6audio_validate_port(port);
- if (ret < 0) {
- pr_err("%s: invalid port 0x%x ret %d\n", __func__, port, ret);
- ret = -EINVAL;
- goto exit;
- }
+ param_hdr.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE;
+ param_hdr.instance_id = INSTANCE_ID_0;
+ param_hdr.param_id = AFE_PARAM_ID_DEV_TIMING_STATS;
+ param_hdr.param_size = sizeof(struct afe_param_id_dev_timing_stats);
- index = q6audio_get_port_index(port);
- if (index < 0 || index >= AFE_MAX_PORTS) {
- pr_err("%s: Invalid AFE port index[%d]\n",
- __func__, index);
- ret = -EINVAL;
- goto exit;
- }
-
- memset(&av_dev_drift, 0, sizeof(struct afe_av_dev_drift_get_param));
-
- av_dev_drift.hdr.hdr_field =
- APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
- APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
- av_dev_drift.hdr.pkt_size = sizeof(av_dev_drift);
- av_dev_drift.hdr.src_port = 0;
- av_dev_drift.hdr.dest_port = 0;
- av_dev_drift.hdr.token = index;
- av_dev_drift.hdr.opcode = AFE_PORT_CMD_GET_PARAM_V2;
- av_dev_drift.get_param.mem_map_handle = 0;
- av_dev_drift.get_param.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE;
- av_dev_drift.get_param.param_id = AFE_PARAM_ID_DEV_TIMING_STATS;
- av_dev_drift.get_param.payload_address_lsw = 0;
- av_dev_drift.get_param.payload_address_msw = 0;
- av_dev_drift.get_param.payload_size = sizeof(av_dev_drift)
- - sizeof(av_dev_drift.get_param) - sizeof(av_dev_drift.hdr);
- av_dev_drift.get_param.port_id = q6audio_get_port_id(port);
- av_dev_drift.pdata.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE;
- av_dev_drift.pdata.param_id = AFE_PARAM_ID_DEV_TIMING_STATS;
- av_dev_drift.pdata.param_size = sizeof(av_dev_drift.timing_stats);
- atomic_set(&this_afe.status, 0);
- atomic_set(&this_afe.state, 1);
- ret = apr_send_pkt(this_afe.apr, (uint32_t *)&av_dev_drift);
+ ret = q6afe_get_params(port, NULL, &param_hdr);
if (ret < 0) {
pr_err("%s: get param port 0x%x param id[0x%x] failed %d\n",
- __func__, port, av_dev_drift.get_param.param_id, ret);
- goto exit;
- }
-
- ret = wait_event_timeout(this_afe.wait[index],
- (atomic_read(&this_afe.state) == 0),
- msecs_to_jiffies(TIMEOUT_MS));
- if (!ret) {
- pr_err("%s: wait_event timeout\n", __func__);
- ret = -EINVAL;
- goto exit;
- }
-
- if (atomic_read(&this_afe.status) > 0) {
- pr_err("%s: config cmd failed [%s]\n",
- __func__, adsp_err_get_err_str(
- atomic_read(&this_afe.status)));
- ret = adsp_err_get_lnx_err_code(
- atomic_read(&this_afe.status));
+ __func__, port, param_hdr.param_id, ret);
goto exit;
}
memcpy(timing_stats, &this_afe.av_dev_drift_resp.timing_stats,
- sizeof(this_afe.av_dev_drift_resp.timing_stats));
+ param_hdr.param_size);
ret = 0;
exit:
return ret;
@@ -6296,8 +5710,9 @@ exit:
int afe_spk_prot_get_calib_data(struct afe_spkr_prot_get_vi_calib *calib_resp)
{
+ struct param_hdr_v3 param_hdr = {0};
+ int port = SLIMBUS_4_TX;
int ret = -EINVAL;
- int index = 0, port = SLIMBUS_4_TX;
if (!calib_resp) {
pr_err("%s: Invalid params\n", __func__);
@@ -6306,60 +5721,15 @@ int afe_spk_prot_get_calib_data(struct afe_spkr_prot_get_vi_calib *calib_resp)
if (this_afe.vi_tx_port != -1)
port = this_afe.vi_tx_port;
- ret = q6audio_validate_port(port);
- if (ret < 0) {
- pr_err("%s: invalid port 0x%x ret %d\n", __func__, port, ret);
- ret = -EINVAL;
- goto fail_cmd;
- }
- index = q6audio_get_port_index(port);
- if (index < 0 || index >= AFE_MAX_PORTS) {
- pr_err("%s: AFE port index[%d] invalid!\n",
- __func__, index);
- ret = -EINVAL;
- goto fail_cmd;
- }
- calib_resp->hdr.hdr_field =
- APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
- APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
- calib_resp->hdr.pkt_size = sizeof(*calib_resp);
- calib_resp->hdr.src_port = 0;
- calib_resp->hdr.dest_port = 0;
- calib_resp->hdr.token = index;
- calib_resp->hdr.opcode = AFE_PORT_CMD_GET_PARAM_V2;
- calib_resp->get_param.mem_map_handle = 0;
- calib_resp->get_param.module_id = AFE_MODULE_FB_SPKR_PROT_VI_PROC_V2;
- calib_resp->get_param.param_id = AFE_PARAM_ID_CALIB_RES_CFG_V2;
- calib_resp->get_param.payload_address_lsw = 0;
- calib_resp->get_param.payload_address_msw = 0;
- calib_resp->get_param.payload_size = sizeof(*calib_resp)
- - sizeof(calib_resp->get_param) - sizeof(calib_resp->hdr);
- calib_resp->get_param.port_id = q6audio_get_port_id(port);
- calib_resp->pdata.module_id = AFE_MODULE_FB_SPKR_PROT_VI_PROC_V2;
- calib_resp->pdata.param_id = AFE_PARAM_ID_CALIB_RES_CFG_V2;
- calib_resp->pdata.param_size = sizeof(calib_resp->res_cfg);
- atomic_set(&this_afe.status, 0);
- atomic_set(&this_afe.state, 1);
- ret = apr_send_pkt(this_afe.apr, (uint32_t *)calib_resp);
+ param_hdr.module_id = AFE_MODULE_FB_SPKR_PROT_VI_PROC_V2;
+ param_hdr.instance_id = INSTANCE_ID_0;
+ param_hdr.param_id = AFE_PARAM_ID_CALIB_RES_CFG_V2;
+ param_hdr.param_size = sizeof(struct afe_spkr_prot_get_vi_calib);
+
+ ret = q6afe_get_params(port, NULL, &param_hdr);
if (ret < 0) {
pr_err("%s: get param port 0x%x param id[0x%x]failed %d\n",
- __func__, port, calib_resp->get_param.param_id, ret);
- goto fail_cmd;
- }
- ret = wait_event_timeout(this_afe.wait[index],
- (atomic_read(&this_afe.state) == 0),
- msecs_to_jiffies(TIMEOUT_MS));
- if (!ret) {
- pr_err("%s: wait_event timeout\n", __func__);
- ret = -EINVAL;
- goto fail_cmd;
- }
- if (atomic_read(&this_afe.status) > 0) {
- pr_err("%s: config cmd failed [%s]\n",
- __func__, adsp_err_get_err_str(
- atomic_read(&this_afe.status)));
- ret = adsp_err_get_lnx_err_code(
- atomic_read(&this_afe.status));
+ __func__, port, param_hdr.param_id, ret);
goto fail_cmd;
}
memcpy(&calib_resp->res_cfg , &this_afe.calib_data.res_cfg,
diff --git a/sound/soc/msm/qdsp6v2/q6asm.c b/sound/soc/msm/qdsp6v2/q6asm.c
index c3d86e6cced2..201234a25bd9 100644
--- a/sound/soc/msm/qdsp6v2/q6asm.c
+++ b/sound/soc/msm/qdsp6v2/q6asm.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
* Author: Brian Swetland <swetland@google.com>
*
* This software is licensed under the terms of the GNU General Public
@@ -41,6 +41,7 @@
#include <sound/audio_cal_utils.h>
#include <sound/adsp_err.h>
#include <sound/compress_params.h>
+#include <sound/q6common.h>
#define TRUE 0x01
#define FALSE 0x00
@@ -92,8 +93,13 @@ struct asm_mmap {
};
static struct asm_mmap this_mmap;
+
+struct audio_session {
+ struct audio_client *ac;
+ spinlock_t session_lock;
+};
/* session id: 0 reserved */
-static struct audio_client *session[ASM_ACTIVE_STREAMS_ALLOWED + 1];
+static struct audio_session session[ASM_ACTIVE_STREAMS_ALLOWED + 1];
struct asm_buffer_node {
struct list_head list;
@@ -545,8 +551,8 @@ static int q6asm_session_alloc(struct audio_client *ac)
{
int n;
for (n = 1; n <= ASM_ACTIVE_STREAMS_ALLOWED; n++) {
- if (!session[n]) {
- session[n] = ac;
+ if (!(session[n].ac)) {
+ session[n].ac = ac;
return n;
}
}
@@ -554,24 +560,39 @@ static int q6asm_session_alloc(struct audio_client *ac)
return -ENOMEM;
}
-static bool q6asm_is_valid_audio_client(struct audio_client *ac)
+static int q6asm_get_session_id_from_audio_client(struct audio_client *ac)
{
int n;
for (n = 1; n <= ASM_ACTIVE_STREAMS_ALLOWED; n++) {
- if (session[n] == ac)
- return 1;
+ if (session[n].ac == ac)
+ return n;
}
return 0;
}
+static bool q6asm_is_valid_audio_client(struct audio_client *ac)
+{
+ return q6asm_get_session_id_from_audio_client(ac) ? 1 : 0;
+}
+
static void q6asm_session_free(struct audio_client *ac)
{
+ int session_id;
+ unsigned long flags;
+
pr_debug("%s: sessionid[%d]\n", __func__, ac->session);
+ session_id = ac->session;
rtac_remove_popp_from_adm_devices(ac->session);
- session[ac->session] = 0;
+ spin_lock_irqsave(&(session[session_id].session_lock), flags);
+ session[ac->session].ac = NULL;
ac->session = 0;
ac->perf_mode = LEGACY_PCM_MODE;
ac->fptr_cache_ops = NULL;
+ ac->cb = NULL;
+ ac->priv = NULL;
+ kfree(ac);
+ ac = NULL;
+ spin_unlock_irqrestore(&(session[session_id].session_lock), flags);
return;
}
@@ -1083,8 +1104,6 @@ void q6asm_audio_client_free(struct audio_client *ac)
pr_debug("%s: APR De-Register\n", __func__);
/*done:*/
- kfree(ac);
- ac = NULL;
mutex_unlock(&session_lock);
return;
@@ -1219,6 +1238,7 @@ struct audio_client *q6asm_audio_client_alloc(app_cb cb, void *priv)
if (n <= 0) {
pr_err("%s: ASM Session alloc fail n=%d\n", __func__, n);
mutex_unlock(&session_lock);
+ kfree(ac);
goto fail_session;
}
ac->session = n;
@@ -1295,7 +1315,6 @@ fail_apr2:
fail_apr1:
q6asm_session_free(ac);
fail_session:
- kfree(ac);
return NULL;
}
@@ -1310,11 +1329,11 @@ struct audio_client *q6asm_get_audio_client(int session_id)
goto err;
}
- if (!session[session_id]) {
+ if (!(session[session_id].ac)) {
pr_err("%s: session not active: %d\n", __func__, session_id);
goto err;
}
- return session[session_id];
+ return session[session_id].ac;
err:
return NULL;
}
@@ -1518,6 +1537,7 @@ static int32_t q6asm_srvc_callback(struct apr_client_data *data, void *priv)
uint32_t i = IN;
uint32_t *payload;
unsigned long dsp_flags;
+ unsigned long flags;
struct asm_buffer_node *buf_node = NULL;
struct list_head *ptr, *next;
union asm_token_struct asm_token;
@@ -1525,6 +1545,8 @@ static int32_t q6asm_srvc_callback(struct apr_client_data *data, void *priv)
struct audio_client *ac = NULL;
struct audio_port_data *port;
+ int session_id;
+
if (!data) {
pr_err("%s: Invalid CB\n", __func__);
return 0;
@@ -1565,13 +1587,23 @@ static int32_t q6asm_srvc_callback(struct apr_client_data *data, void *priv)
rtac_clear_mapping(ASM_RTAC_CAL);
return 0;
}
+
asm_token.token = data->token;
- ac = q6asm_get_audio_client(asm_token._token.session_id);
+ session_id = asm_token._token.session_id;
+
+ if ((session_id > 0 && session_id <= ASM_ACTIVE_STREAMS_ALLOWED))
+ spin_lock_irqsave(&(session[session_id].session_lock), flags);
+
+ ac = q6asm_get_audio_client(session_id);
dir = q6asm_get_flag_from_token(&asm_token, ASM_DIRECTION_OFFSET);
if (!ac) {
pr_debug("%s: session[%d] already freed\n",
- __func__, asm_token._token.session_id);
+ __func__, session_id);
+ if ((session_id > 0 &&
+ session_id <= ASM_ACTIVE_STREAMS_ALLOWED))
+ spin_unlock_irqrestore(
+ &(session[session_id].session_lock), flags);
return 0;
}
@@ -1622,6 +1654,10 @@ static int32_t q6asm_srvc_callback(struct apr_client_data *data, void *priv)
__func__, payload[0]);
break;
}
+ if ((session_id > 0 &&
+ session_id <= ASM_ACTIVE_STREAMS_ALLOWED))
+ spin_unlock_irqrestore(
+ &(session[session_id].session_lock), flags);
return 0;
}
@@ -1656,6 +1692,10 @@ static int32_t q6asm_srvc_callback(struct apr_client_data *data, void *priv)
if (ac->cb)
ac->cb(data->opcode, data->token,
data->payload, ac->priv);
+ if ((session_id > 0 && session_id <= ASM_ACTIVE_STREAMS_ALLOWED))
+ spin_unlock_irqrestore(
+ &(session[session_id].session_lock), flags);
+
return 0;
}
@@ -1723,6 +1763,8 @@ static int32_t q6asm_callback(struct apr_client_data *data, void *priv)
uint8_t buf_index;
struct msm_adsp_event_data *pp_event_package = NULL;
uint32_t payload_size = 0;
+ unsigned long flags;
+ int session_id;
if (ac == NULL) {
pr_err("%s: ac NULL\n", __func__);
@@ -1732,15 +1774,21 @@ static int32_t q6asm_callback(struct apr_client_data *data, void *priv)
pr_err("%s: data NULL\n", __func__);
return -EINVAL;
}
- if (!q6asm_is_valid_audio_client(ac)) {
- pr_err("%s: audio client pointer is invalid, ac = %pK\n",
- __func__, ac);
+
+ session_id = q6asm_get_session_id_from_audio_client(ac);
+ if (session_id <= 0 || session_id > ASM_ACTIVE_STREAMS_ALLOWED) {
+ pr_err("%s: Session ID is invalid, session = %d\n", __func__,
+ session_id);
return -EINVAL;
}
- if (ac->session <= 0 || ac->session > ASM_ACTIVE_STREAMS_ALLOWED) {
- pr_err("%s: Session ID is invalid, session = %d\n", __func__,
- ac->session);
+ spin_lock_irqsave(&(session[session_id].session_lock), flags);
+
+ if (!q6asm_is_valid_audio_client(ac)) {
+ pr_err("%s: audio client pointer is invalid, ac = %pK\n",
+ __func__, ac);
+ spin_unlock_irqrestore(
+ &(session[session_id].session_lock), flags);
return -EINVAL;
}
@@ -1753,7 +1801,6 @@ static int32_t q6asm_callback(struct apr_client_data *data, void *priv)
}
if (data->opcode == RESET_EVENTS) {
- mutex_lock(&ac->cmd_lock);
atomic_set(&ac->reset, 1);
if (ac->apr == NULL) {
ac->apr = ac->apr2;
@@ -1774,7 +1821,8 @@ static int32_t q6asm_callback(struct apr_client_data *data, void *priv)
wake_up(&ac->time_wait);
wake_up(&ac->cmd_wait);
wake_up(&ac->mem_wait);
- mutex_unlock(&ac->cmd_lock);
+ spin_unlock_irqrestore(
+ &(session[session_id].session_lock), flags);
return 0;
}
@@ -1788,6 +1836,8 @@ static int32_t q6asm_callback(struct apr_client_data *data, void *priv)
(data->opcode != ASM_SESSION_EVENT_RX_UNDERFLOW)) {
if (payload == NULL) {
pr_err("%s: payload is null\n", __func__);
+ spin_unlock_irqrestore(
+ &(session[session_id].session_lock), flags);
return -EINVAL;
}
dev_vdbg(ac->dev, "%s: Payload = [0x%x] status[0x%x] opcode 0x%x\n",
@@ -1796,6 +1846,7 @@ static int32_t q6asm_callback(struct apr_client_data *data, void *priv)
if (data->opcode == APR_BASIC_RSP_RESULT) {
switch (payload[0]) {
case ASM_STREAM_CMD_SET_PP_PARAMS_V2:
+ case ASM_STREAM_CMD_SET_PP_PARAMS_V3:
if (rtac_make_asm_callback(ac->session, payload,
data->payload_size))
break;
@@ -1813,6 +1864,8 @@ static int32_t q6asm_callback(struct apr_client_data *data, void *priv)
ret = q6asm_is_valid_session(data, priv);
if (ret != 0) {
pr_err("%s: session invalid %d\n", __func__, ret);
+ spin_unlock_irqrestore(
+ &(session[session_id].session_lock), flags);
return ret;
}
case ASM_SESSION_CMD_SET_MTMX_STRTR_PARAMS_V2:
@@ -1842,9 +1895,12 @@ static int32_t q6asm_callback(struct apr_client_data *data, void *priv)
pr_err("%s: cmd = 0x%x returned error = 0x%x\n",
__func__, payload[0], payload[1]);
if (wakeup_flag) {
- if ((is_adsp_reg_event(payload[0]) >= 0)
- || (payload[0] ==
- ASM_STREAM_CMD_SET_PP_PARAMS_V2))
+ if ((is_adsp_reg_event(payload[0]) >=
+ 0) ||
+ (payload[0] ==
+ ASM_STREAM_CMD_SET_PP_PARAMS_V2) ||
+ (payload[0] ==
+ ASM_STREAM_CMD_SET_PP_PARAMS_V3))
atomic_set(&ac->cmd_state_pp,
payload[1]);
else
@@ -1852,10 +1908,14 @@ static int32_t q6asm_callback(struct apr_client_data *data, void *priv)
payload[1]);
wake_up(&ac->cmd_wait);
}
+ spin_unlock_irqrestore(
+ &(session[session_id].session_lock),
+ flags);
return 0;
}
if ((is_adsp_reg_event(payload[0]) >= 0) ||
- (payload[0] == ASM_STREAM_CMD_SET_PP_PARAMS_V2)) {
+ (payload[0] == ASM_STREAM_CMD_SET_PP_PARAMS_V2) ||
+ (payload[0] == ASM_STREAM_CMD_SET_PP_PARAMS_V3)) {
if (atomic_read(&ac->cmd_state_pp) &&
wakeup_flag) {
atomic_set(&ac->cmd_state_pp, 0);
@@ -1882,6 +1942,9 @@ static int32_t q6asm_callback(struct apr_client_data *data, void *priv)
atomic_set(&ac->mem_state, payload[1]);
wake_up(&ac->mem_wait);
}
+ spin_unlock_irqrestore(
+ &(session[session_id].session_lock),
+ flags);
return 0;
}
if (atomic_read(&ac->mem_state) && wakeup_flag) {
@@ -1898,10 +1961,10 @@ static int32_t q6asm_callback(struct apr_client_data *data, void *priv)
break;
}
case ASM_STREAM_CMD_GET_PP_PARAMS_V2:
- pr_debug("%s: ASM_STREAM_CMD_GET_PP_PARAMS_V2 session %d opcode 0x%x token 0x%x src %d dest %d\n",
- __func__, ac->session,
- data->opcode, data->token,
- data->src_port, data->dest_port);
+ case ASM_STREAM_CMD_GET_PP_PARAMS_V3:
+ pr_debug("%s: ASM_STREAM_CMD_GET_PP_PARAMS session %d opcode 0x%x token 0x%x src %d dest %d\n",
+ __func__, ac->session, data->opcode,
+ data->token, data->src_port, data->dest_port);
/* Should only come here if there is an APR */
/* error or malformed APR packet. Otherwise */
/* response will be returned as */
@@ -1929,6 +1992,9 @@ static int32_t q6asm_callback(struct apr_client_data *data, void *priv)
__func__, payload[0]);
break;
}
+
+ spin_unlock_irqrestore(
+ &(session[session_id].session_lock), flags);
return 0;
}
@@ -1942,6 +2008,9 @@ static int32_t q6asm_callback(struct apr_client_data *data, void *priv)
if (port->buf == NULL) {
pr_err("%s: Unexpected Write Done\n",
__func__);
+ spin_unlock_irqrestore(
+ &(session[session_id].session_lock),
+ flags);
return -EINVAL;
}
spin_lock_irqsave(&port->dsp_lock, dsp_flags);
@@ -1956,6 +2025,9 @@ static int32_t q6asm_callback(struct apr_client_data *data, void *priv)
__func__, payload[0], payload[1]);
spin_unlock_irqrestore(&port->dsp_lock,
dsp_flags);
+ spin_unlock_irqrestore(
+ &(session[session_id].session_lock),
+ flags);
return -EINVAL;
}
port->buf[buf_index].used = 1;
@@ -1971,13 +2043,13 @@ static int32_t q6asm_callback(struct apr_client_data *data, void *priv)
break;
}
case ASM_STREAM_CMDRSP_GET_PP_PARAMS_V2:
- pr_debug("%s: ASM_STREAM_CMDRSP_GET_PP_PARAMS_V2 session %d opcode 0x%x token 0x%x src %d dest %d\n",
- __func__, ac->session, data->opcode,
- data->token,
- data->src_port, data->dest_port);
+ case ASM_STREAM_CMDRSP_GET_PP_PARAMS_V3:
+ pr_debug("%s: ASM_STREAM_CMDRSP_GET_PP_PARAMS session %d opcode 0x%x token 0x%x src %d dest %d\n",
+ __func__, ac->session, data->opcode, data->token,
+ data->src_port, data->dest_port);
if (payload[0] != 0) {
- pr_err("%s: ASM_STREAM_CMDRSP_GET_PP_PARAMS_V2 returned error = 0x%x\n",
- __func__, payload[0]);
+ pr_err("%s: ASM_STREAM_CMDRSP_GET_PP_PARAMS returned error = 0x%x\n",
+ __func__, payload[0]);
} else if (generic_get_data) {
generic_get_data->valid = 1;
if (generic_get_data->is_inband) {
@@ -2026,6 +2098,9 @@ static int32_t q6asm_callback(struct apr_client_data *data, void *priv)
if (ac->io_mode & SYNC_IO_MODE) {
if (port->buf == NULL) {
pr_err("%s: Unexpected Write Done\n", __func__);
+ spin_unlock_irqrestore(
+ &(session[session_id].session_lock),
+ flags);
return -EINVAL;
}
spin_lock_irqsave(&port->dsp_lock, dsp_flags);
@@ -2100,8 +2175,11 @@ static int32_t q6asm_callback(struct apr_client_data *data, void *priv)
pr_debug("%s: ASM_STREAM_EVENT payload[0][0x%x] payload[1][0x%x]",
__func__, payload[0], payload[1]);
i = is_adsp_raise_event(data->opcode);
- if (i < 0)
+ if (i < 0) {
+ spin_unlock_irqrestore(
+ &(session[session_id].session_lock), flags);
return 0;
+ }
/* repack payload for asm_stream_pp_event
* package is composed of event type + size + actual payload
@@ -2110,8 +2188,11 @@ static int32_t q6asm_callback(struct apr_client_data *data, void *priv)
pp_event_package = kzalloc(payload_size
+ sizeof(struct msm_adsp_event_data),
GFP_ATOMIC);
- if (!pp_event_package)
+ if (!pp_event_package) {
+ spin_unlock_irqrestore(
+ &(session[session_id].session_lock), flags);
return -ENOMEM;
+ }
pp_event_package->event_type = i;
pp_event_package->payload_len = payload_size;
@@ -2120,6 +2201,8 @@ static int32_t q6asm_callback(struct apr_client_data *data, void *priv)
ac->cb(data->opcode, data->token,
(void *)pp_event_package, ac->priv);
kfree(pp_event_package);
+ spin_unlock_irqrestore(
+ &(session[session_id].session_lock), flags);
return 0;
case ASM_SESSION_CMDRSP_ADJUST_SESSION_CLOCK_V2:
pr_debug("%s: ASM_SESSION_CMDRSP_ADJUST_SESSION_CLOCK_V2 sesion %d status 0x%x msw %u lsw %u\n",
@@ -2145,7 +2228,8 @@ static int32_t q6asm_callback(struct apr_client_data *data, void *priv)
if (ac->cb)
ac->cb(data->opcode, data->token,
data->payload, ac->priv);
-
+ spin_unlock_irqrestore(
+ &(session[session_id].session_lock), flags);
return 0;
}
@@ -2321,11 +2405,16 @@ int q6asm_is_dsp_buf_avail(int dir, struct audio_client *ac)
static void __q6asm_add_hdr(struct audio_client *ac, struct apr_hdr *hdr,
uint32_t pkt_size, uint32_t cmd_flg, uint32_t stream_id)
{
+ unsigned long flags;
+
dev_vdbg(ac->dev, "%s: pkt_size=%d cmd_flg=%d session=%d stream_id=%d\n",
__func__, pkt_size, cmd_flg, ac->session, stream_id);
mutex_lock(&ac->cmd_lock);
+ spin_lock_irqsave(&(session[ac->session].session_lock), flags);
if (ac->apr == NULL) {
pr_err("%s: AC APR handle NULL", __func__);
+ spin_unlock_irqrestore(
+ &(session[ac->session].session_lock), flags);
mutex_unlock(&ac->cmd_lock);
return;
}
@@ -2348,6 +2437,8 @@ static void __q6asm_add_hdr(struct audio_client *ac, struct apr_hdr *hdr,
WAIT_CMD);
hdr->pkt_size = pkt_size;
+ spin_unlock_irqrestore(
+ &(session[ac->session].session_lock), flags);
mutex_unlock(&ac->cmd_lock);
return;
}
@@ -2466,6 +2557,136 @@ static void q6asm_add_mmaphdr(struct audio_client *ac, struct apr_hdr *hdr,
return;
}
+int q6asm_set_pp_params(struct audio_client *ac,
+ struct mem_mapping_hdr *mem_hdr, u8 *param_data,
+ u32 param_size)
+{
+ struct asm_stream_cmd_set_pp_params *asm_set_param = NULL;
+ int pkt_size = 0;
+ int ret = 0;
+
+ if (ac == NULL) {
+ pr_err("%s: Audio Client is NULL\n", __func__);
+ return -EINVAL;
+ } else if (ac->apr == NULL) {
+ pr_err("%s: APR pointer is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ pkt_size = sizeof(struct asm_stream_cmd_set_pp_params);
+ /* Add param size to packet size when sending in-band only */
+ if (param_data != NULL)
+ pkt_size += param_size;
+ asm_set_param = kzalloc(pkt_size, GFP_KERNEL);
+ if (!asm_set_param)
+ return -ENOMEM;
+
+ q6asm_add_hdr_async(ac, &asm_set_param->apr_hdr, pkt_size, TRUE);
+
+ /* With pre-packed data, only the opcode differes from V2 and V3. */
+ if (q6common_is_instance_id_supported())
+ asm_set_param->apr_hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V3;
+ else
+ asm_set_param->apr_hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
+
+ asm_set_param->payload_size = param_size;
+
+ if (mem_hdr != NULL) {
+ /* Out of band case */
+ asm_set_param->mem_hdr = *mem_hdr;
+ } else if (param_data != NULL) {
+ /* In band case. Parameter data must be pre-packed with its
+ * header before calling this function. Use
+ * q6common_pack_pp_params to pack parameter data and header
+ * correctly.
+ */
+ memcpy(&asm_set_param->param_data, param_data, param_size);
+ } else {
+ pr_err("%s: Received NULL pointers for both mem header and param data\n",
+ __func__);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ atomic_set(&ac->cmd_state_pp, -1);
+ ret = apr_send_pkt(ac->apr, (uint32_t *) asm_set_param);
+ if (ret < 0) {
+ pr_err("%s: apr send failed rc %d\n", __func__, ret);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ ret = wait_event_timeout(ac->cmd_wait,
+ atomic_read(&ac->cmd_state_pp) >= 0, 5 * HZ);
+ if (!ret) {
+ pr_err("%s: timeout sending apr pkt\n", __func__);
+ ret = -ETIMEDOUT;
+ goto done;
+ }
+
+ if (atomic_read(&ac->cmd_state_pp) > 0) {
+ pr_err("%s: DSP returned error[%s]\n", __func__,
+ adsp_err_get_err_str(atomic_read(&ac->cmd_state_pp)));
+ ret = adsp_err_get_lnx_err_code(atomic_read(&ac->cmd_state_pp));
+ goto done;
+ }
+ ret = 0;
+done:
+ kfree(asm_set_param);
+ return ret;
+}
+EXPORT_SYMBOL(q6asm_set_pp_params);
+
+int q6asm_pack_and_set_pp_param_in_band(struct audio_client *ac,
+ struct param_hdr_v3 param_hdr,
+ u8 *param_data)
+{
+ u8 *packed_data = NULL;
+ u32 packed_size = sizeof(union param_hdrs) + param_hdr.param_size;
+ int ret = 0;
+
+ packed_data = kzalloc(packed_size, GFP_KERNEL);
+ if (packed_data == NULL)
+ return -ENOMEM;
+
+ ret = q6common_pack_pp_params(packed_data, &param_hdr, param_data,
+ &packed_size);
+ if (ret) {
+ pr_err("%s: Failed to pack params, error %d\n", __func__, ret);
+ goto done;
+ }
+
+ ret = q6asm_set_pp_params(ac, NULL, packed_data, packed_size);
+done:
+ kfree(packed_data);
+ return ret;
+}
+EXPORT_SYMBOL(q6asm_pack_and_set_pp_param_in_band);
+
+int q6asm_set_soft_volume_module_instance_ids(int instance,
+ struct param_hdr_v3 *param_hdr)
+{
+ if (param_hdr == NULL) {
+ pr_err("%s: Param header is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ switch (instance) {
+ case SOFT_VOLUME_INSTANCE_2:
+ param_hdr->module_id = ASM_MODULE_ID_VOL_CTRL2;
+ param_hdr->instance_id = INSTANCE_ID_0;
+ return 0;
+ case SOFT_VOLUME_INSTANCE_1:
+ param_hdr->module_id = ASM_MODULE_ID_VOL_CTRL;
+ param_hdr->instance_id = INSTANCE_ID_0;
+ return 0;
+ default:
+ pr_err("%s: Invalid instance %d\n", __func__, instance);
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL(q6asm_set_soft_volume_module_instance_ids);
+
static int __q6asm_open_read(struct audio_client *ac,
uint32_t format, uint16_t bits_per_sample,
uint32_t pcm_format_block_ver,
@@ -6741,67 +6962,27 @@ fail_cmd:
int q6asm_set_lrgain(struct audio_client *ac, int left_gain, int right_gain)
{
struct asm_volume_ctrl_multichannel_gain multi_ch_gain;
- int sz = 0;
+ struct param_hdr_v3 param_info = {0};
int rc = 0;
- if (ac == NULL) {
- pr_err("%s: APR handle NULL\n", __func__);
- rc = -EINVAL;
- goto fail_cmd;
- }
- if (ac->apr == NULL) {
- pr_err("%s: AC APR handle NULL\n", __func__);
- rc = -EINVAL;
- goto fail_cmd;
- }
-
memset(&multi_ch_gain, 0, sizeof(multi_ch_gain));
- sz = sizeof(struct asm_volume_ctrl_multichannel_gain);
- q6asm_add_hdr_async(ac, &multi_ch_gain.hdr, sz, TRUE);
- atomic_set(&ac->cmd_state_pp, -1);
- multi_ch_gain.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
- multi_ch_gain.param.data_payload_addr_lsw = 0;
- multi_ch_gain.param.data_payload_addr_msw = 0;
- multi_ch_gain.param.mem_map_handle = 0;
- multi_ch_gain.param.data_payload_size = sizeof(multi_ch_gain) -
- sizeof(multi_ch_gain.hdr) - sizeof(multi_ch_gain.param);
- multi_ch_gain.data.module_id = ASM_MODULE_ID_VOL_CTRL;
- multi_ch_gain.data.param_id = ASM_PARAM_ID_MULTICHANNEL_GAIN;
- multi_ch_gain.data.param_size = multi_ch_gain.param.data_payload_size -
- sizeof(multi_ch_gain.data);
- multi_ch_gain.data.reserved = 0;
+
+ param_info.module_id = ASM_MODULE_ID_VOL_CTRL;
+ param_info.instance_id = INSTANCE_ID_0;
+ param_info.param_id = ASM_PARAM_ID_MULTICHANNEL_GAIN;
+ param_info.param_size = sizeof(multi_ch_gain);
+
multi_ch_gain.gain_data[0].channeltype = PCM_CHANNEL_FL;
multi_ch_gain.gain_data[0].gain = left_gain << 15;
multi_ch_gain.gain_data[1].channeltype = PCM_CHANNEL_FR;
multi_ch_gain.gain_data[1].gain = right_gain << 15;
multi_ch_gain.num_channels = 2;
- rc = apr_send_pkt(ac->apr, (uint32_t *) &multi_ch_gain);
- if (rc < 0) {
+ rc = q6asm_pack_and_set_pp_param_in_band(ac, param_info,
+ (u8 *) &multi_ch_gain);
+ if (rc < 0)
pr_err("%s: set-params send failed paramid[0x%x] rc %d\n",
- __func__, multi_ch_gain.data.param_id, rc);
- rc = -EINVAL;
- goto fail_cmd;
- }
+ __func__, param_info.param_id, rc);
- rc = wait_event_timeout(ac->cmd_wait,
- (atomic_read(&ac->cmd_state_pp) >= 0), 5*HZ);
- if (!rc) {
- pr_err("%s: timeout, set-params paramid[0x%x]\n", __func__,
- multi_ch_gain.data.param_id);
- rc = -ETIMEDOUT;
- goto fail_cmd;
- }
- if (atomic_read(&ac->cmd_state_pp) > 0) {
- pr_err("%s: DSP returned error[%s] , set-params paramid[0x%x]\n",
- __func__, adsp_err_get_err_str(
- atomic_read(&ac->cmd_state_pp)),
- multi_ch_gain.data.param_id);
- rc = adsp_err_get_lnx_err_code(
- atomic_read(&ac->cmd_state_pp));
- goto fail_cmd;
- }
- rc = 0;
-fail_cmd:
return rc;
}
@@ -6817,20 +6998,14 @@ int q6asm_set_multich_gain(struct audio_client *ac, uint32_t channels,
uint32_t *gains, uint8_t *ch_map, bool use_default)
{
struct asm_volume_ctrl_multichannel_gain multich_gain;
- int sz = 0;
+ struct param_hdr_v3 param_info = {0};
int rc = 0;
int i;
u8 default_chmap[VOLUME_CONTROL_MAX_CHANNELS];
if (ac == NULL) {
- pr_err("%s: ac is NULL\n", __func__);
- rc = -EINVAL;
- goto done;
- }
- if (ac->apr == NULL) {
- dev_err(ac->dev, "%s: AC APR handle NULL\n", __func__);
- rc = -EINVAL;
- goto done;
+ pr_err("%s: Audio client is NULL\n", __func__);
+ return -EINVAL;
}
if (gains == NULL) {
dev_err(ac->dev, "%s: gain_list is NULL\n", __func__);
@@ -6850,20 +7025,10 @@ int q6asm_set_multich_gain(struct audio_client *ac, uint32_t channels,
}
memset(&multich_gain, 0, sizeof(multich_gain));
- sz = sizeof(struct asm_volume_ctrl_multichannel_gain);
- q6asm_add_hdr_async(ac, &multich_gain.hdr, sz, TRUE);
- atomic_set(&ac->cmd_state_pp, -1);
- multich_gain.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
- multich_gain.param.data_payload_addr_lsw = 0;
- multich_gain.param.data_payload_addr_msw = 0;
- multich_gain.param.mem_map_handle = 0;
- multich_gain.param.data_payload_size = sizeof(multich_gain) -
- sizeof(multich_gain.hdr) - sizeof(multich_gain.param);
- multich_gain.data.module_id = ASM_MODULE_ID_VOL_CTRL;
- multich_gain.data.param_id = ASM_PARAM_ID_MULTICHANNEL_GAIN;
- multich_gain.data.param_size = multich_gain.param.data_payload_size -
- sizeof(multich_gain.data);
- multich_gain.data.reserved = 0;
+ param_info.module_id = ASM_MODULE_ID_VOL_CTRL;
+ param_info.instance_id = INSTANCE_ID_0;
+ param_info.param_id = ASM_PARAM_ID_MULTICHANNEL_GAIN;
+ param_info.param_size = sizeof(multich_gain);
if (use_default) {
rc = q6asm_map_channels(default_chmap, channels, false);
@@ -6882,166 +7047,56 @@ int q6asm_set_multich_gain(struct audio_client *ac, uint32_t channels,
}
multich_gain.num_channels = channels;
- rc = apr_send_pkt(ac->apr, (uint32_t *) &multich_gain);
- if (rc < 0) {
+ rc = q6asm_pack_and_set_pp_param_in_band(ac, param_info,
+ (u8 *) &multich_gain);
+ if (rc)
pr_err("%s: set-params send failed paramid[0x%x] rc %d\n",
- __func__, multich_gain.data.param_id, rc);
- goto done;
- }
-
- rc = wait_event_timeout(ac->cmd_wait,
- (atomic_read(&ac->cmd_state_pp) >= 0), 5*HZ);
- if (!rc) {
- pr_err("%s: timeout, set-params paramid[0x%x]\n", __func__,
- multich_gain.data.param_id);
- rc = -EINVAL;
- goto done;
- }
- if (atomic_read(&ac->cmd_state_pp) > 0) {
- pr_err("%s: DSP returned error[%d] , set-params paramid[0x%x]\n",
- __func__, atomic_read(&ac->cmd_state_pp),
- multich_gain.data.param_id);
- rc = -EINVAL;
- goto done;
- }
- rc = 0;
+ __func__, param_info.param_id, rc);
done:
return rc;
}
int q6asm_set_mute(struct audio_client *ac, int muteflag)
{
- struct asm_volume_ctrl_mute_config mute;
- int sz = 0;
+ struct asm_volume_ctrl_mute_config mute = {0};
+ struct param_hdr_v3 param_info = {0};
int rc = 0;
- if (ac == NULL) {
- pr_err("%s: APR handle NULL\n", __func__);
- rc = -EINVAL;
- goto fail_cmd;
- }
- if (ac->apr == NULL) {
- pr_err("%s: AC APR handle NULL\n", __func__);
- rc = -EINVAL;
- goto fail_cmd;
- }
-
- sz = sizeof(struct asm_volume_ctrl_mute_config);
- q6asm_add_hdr_async(ac, &mute.hdr, sz, TRUE);
- atomic_set(&ac->cmd_state_pp, -1);
- mute.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
- mute.param.data_payload_addr_lsw = 0;
- mute.param.data_payload_addr_msw = 0;
- mute.param.mem_map_handle = 0;
- mute.param.data_payload_size = sizeof(mute) -
- sizeof(mute.hdr) - sizeof(mute.param);
- mute.data.module_id = ASM_MODULE_ID_VOL_CTRL;
- mute.data.param_id = ASM_PARAM_ID_VOL_CTRL_MUTE_CONFIG;
- mute.data.param_size = mute.param.data_payload_size - sizeof(mute.data);
- mute.data.reserved = 0;
+ param_info.module_id = ASM_MODULE_ID_VOL_CTRL;
+ param_info.instance_id = INSTANCE_ID_0;
+ param_info.param_id = ASM_PARAM_ID_VOL_CTRL_MUTE_CONFIG;
+ param_info.param_size = sizeof(mute);
mute.mute_flag = muteflag;
- rc = apr_send_pkt(ac->apr, (uint32_t *) &mute);
- if (rc < 0) {
+ rc = q6asm_pack_and_set_pp_param_in_band(ac, param_info, (u8 *) &mute);
+ if (rc)
pr_err("%s: set-params send failed paramid[0x%x] rc %d\n",
- __func__, mute.data.param_id, rc);
- rc = -EINVAL;
- goto fail_cmd;
- }
-
- rc = wait_event_timeout(ac->cmd_wait,
- (atomic_read(&ac->cmd_state_pp) >= 0), 5*HZ);
- if (!rc) {
- pr_err("%s: timeout, set-params paramid[0x%x]\n", __func__,
- mute.data.param_id);
- rc = -ETIMEDOUT;
- goto fail_cmd;
- }
- if (atomic_read(&ac->cmd_state_pp) > 0) {
- pr_err("%s: DSP returned error[%s] set-params paramid[0x%x]\n",
- __func__, adsp_err_get_err_str(
- atomic_read(&ac->cmd_state_pp)),
- mute.data.param_id);
- rc = adsp_err_get_lnx_err_code(
- atomic_read(&ac->cmd_state_pp));
- goto fail_cmd;
- }
- rc = 0;
-fail_cmd:
+ __func__, param_info.param_id, rc);
return rc;
}
static int __q6asm_set_volume(struct audio_client *ac, int volume, int instance)
{
- struct asm_volume_ctrl_master_gain vol;
- int sz = 0;
- int rc = 0;
- int module_id;
-
- if (ac == NULL) {
- pr_err("%s: APR handle NULL\n", __func__);
- rc = -EINVAL;
- goto fail_cmd;
- }
- if (ac->apr == NULL) {
- pr_err("%s: AC APR handle NULL\n", __func__);
- rc = -EINVAL;
- goto fail_cmd;
- }
+ struct asm_volume_ctrl_master_gain vol = {0};
+ struct param_hdr_v3 param_info = {0};
+ int rc = 0;
- switch (instance) {
- case SOFT_VOLUME_INSTANCE_2:
- module_id = ASM_MODULE_ID_VOL_CTRL2;
- break;
- case SOFT_VOLUME_INSTANCE_1:
- default:
- module_id = ASM_MODULE_ID_VOL_CTRL;
- break;
+ rc = q6asm_set_soft_volume_module_instance_ids(instance, &param_info);
+ if (rc) {
+ pr_err("%s: Failed to pack soft volume module and instance IDs, error %d\n",
+ __func__, rc);
+ return rc;
}
- sz = sizeof(struct asm_volume_ctrl_master_gain);
- q6asm_add_hdr_async(ac, &vol.hdr, sz, TRUE);
- atomic_set(&ac->cmd_state_pp, -1);
- vol.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
- vol.param.data_payload_addr_lsw = 0;
- vol.param.data_payload_addr_msw = 0;
- vol.param.mem_map_handle = 0;
- vol.param.data_payload_size = sizeof(vol) -
- sizeof(vol.hdr) - sizeof(vol.param);
- vol.data.module_id = module_id;
- vol.data.param_id = ASM_PARAM_ID_VOL_CTRL_MASTER_GAIN;
- vol.data.param_size = vol.param.data_payload_size - sizeof(vol.data);
- vol.data.reserved = 0;
+ param_info.param_id = ASM_PARAM_ID_VOL_CTRL_MASTER_GAIN;
+ param_info.param_size = sizeof(vol);
vol.master_gain = volume;
- rc = apr_send_pkt(ac->apr, (uint32_t *) &vol);
- if (rc < 0) {
+ rc = q6asm_pack_and_set_pp_param_in_band(ac, param_info, (u8 *) &vol);
+ if (rc)
pr_err("%s: set-params send failed paramid[0x%x] rc %d\n",
- __func__, vol.data.param_id, rc);
- rc = -EINVAL;
- goto fail_cmd;
- }
-
- rc = wait_event_timeout(ac->cmd_wait,
- (atomic_read(&ac->cmd_state_pp) >= 0), 5*HZ);
- if (!rc) {
- pr_err("%s: timeout, set-params paramid[0x%x]\n", __func__,
- vol.data.param_id);
- rc = -ETIMEDOUT;
- goto fail_cmd;
- }
- if (atomic_read(&ac->cmd_state_pp) > 0) {
- pr_err("%s: DSP returned error[%s] set-params paramid[0x%x]\n",
- __func__, adsp_err_get_err_str(
- atomic_read(&ac->cmd_state_pp)),
- vol.data.param_id);
- rc = adsp_err_get_lnx_err_code(
- atomic_read(&ac->cmd_state_pp));
- goto fail_cmd;
- }
+ __func__, param_info.param_id, rc);
- rc = 0;
-fail_cmd:
return rc;
}
@@ -7240,68 +7295,26 @@ done:
int q6asm_set_softpause(struct audio_client *ac,
struct asm_softpause_params *pause_param)
{
- struct asm_soft_pause_params softpause;
- int sz = 0;
+ struct asm_soft_pause_params softpause = {0};
+ struct param_hdr_v3 param_info = {0};
int rc = 0;
- if (ac == NULL) {
- pr_err("%s: APR handle NULL\n", __func__);
- rc = -EINVAL;
- goto fail_cmd;
- }
- if (ac->apr == NULL) {
- pr_err("%s: AC APR handle NULL\n", __func__);
- rc = -EINVAL;
- goto fail_cmd;
- }
+ param_info.module_id = ASM_MODULE_ID_VOL_CTRL;
+ param_info.instance_id = INSTANCE_ID_0;
+ param_info.param_id = ASM_PARAM_ID_SOFT_PAUSE_PARAMETERS;
+ param_info.param_size = sizeof(softpause);
- sz = sizeof(struct asm_soft_pause_params);
- q6asm_add_hdr_async(ac, &softpause.hdr, sz, TRUE);
- atomic_set(&ac->cmd_state_pp, -1);
- softpause.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
-
- softpause.param.data_payload_addr_lsw = 0;
- softpause.param.data_payload_addr_msw = 0;
- softpause.param.mem_map_handle = 0;
- softpause.param.data_payload_size = sizeof(softpause) -
- sizeof(softpause.hdr) - sizeof(softpause.param);
- softpause.data.module_id = ASM_MODULE_ID_VOL_CTRL;
- softpause.data.param_id = ASM_PARAM_ID_SOFT_PAUSE_PARAMETERS;
- softpause.data.param_size = softpause.param.data_payload_size -
- sizeof(softpause.data);
- softpause.data.reserved = 0;
softpause.enable_flag = pause_param->enable;
softpause.period = pause_param->period;
softpause.step = pause_param->step;
softpause.ramping_curve = pause_param->rampingcurve;
- rc = apr_send_pkt(ac->apr, (uint32_t *) &softpause);
- if (rc < 0) {
+ rc = q6asm_pack_and_set_pp_param_in_band(ac, param_info,
+ (u8 *) &softpause);
+ if (rc)
pr_err("%s: set-params send failed paramid[0x%x] rc %d\n",
- __func__, softpause.data.param_id, rc);
- rc = -EINVAL;
- goto fail_cmd;
- }
+ __func__, param_info.param_id, rc);
- rc = wait_event_timeout(ac->cmd_wait,
- (atomic_read(&ac->cmd_state_pp) >= 0), 5*HZ);
- if (!rc) {
- pr_err("%s: timeout, set-params paramid[0x%x]\n", __func__,
- softpause.data.param_id);
- rc = -ETIMEDOUT;
- goto fail_cmd;
- }
- if (atomic_read(&ac->cmd_state_pp) > 0) {
- pr_err("%s: DSP returned error[%s] set-params paramid[0x%x]\n",
- __func__, adsp_err_get_err_str(
- atomic_read(&ac->cmd_state_pp)),
- softpause.data.param_id);
- rc = adsp_err_get_lnx_err_code(
- atomic_read(&ac->cmd_state_pp));
- goto fail_cmd;
- }
- rc = 0;
-fail_cmd:
return rc;
}
@@ -7309,77 +7322,30 @@ static int __q6asm_set_softvolume(struct audio_client *ac,
struct asm_softvolume_params *softvol_param,
int instance)
{
- struct asm_soft_step_volume_params softvol;
- int sz = 0;
- int rc = 0;
- int module_id;
+ struct asm_soft_step_volume_params softvol = {0};
+ struct param_hdr_v3 param_info = {0};
+ int rc = 0;
- if (ac == NULL) {
- pr_err("%s: APR handle NULL\n", __func__);
- rc = -EINVAL;
- goto fail_cmd;
- }
- if (ac->apr == NULL) {
- pr_err("%s: AC APR handle NULL\n", __func__);
- rc = -EINVAL;
- goto fail_cmd;
+ rc = q6asm_set_soft_volume_module_instance_ids(instance, &param_info);
+ if (rc) {
+ pr_err("%s: Failed to pack soft volume module and instance IDs, error %d\n",
+ __func__, rc);
+ return rc;
}
- switch (instance) {
- case SOFT_VOLUME_INSTANCE_2:
- module_id = ASM_MODULE_ID_VOL_CTRL2;
- break;
- case SOFT_VOLUME_INSTANCE_1:
- default:
- module_id = ASM_MODULE_ID_VOL_CTRL;
- break;
- }
+ param_info.param_id = ASM_PARAM_ID_SOFT_VOL_STEPPING_PARAMETERS;
+ param_info.param_size = sizeof(softvol);
- sz = sizeof(struct asm_soft_step_volume_params);
- q6asm_add_hdr_async(ac, &softvol.hdr, sz, TRUE);
- atomic_set(&ac->cmd_state_pp, -1);
- softvol.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
- softvol.param.data_payload_addr_lsw = 0;
- softvol.param.data_payload_addr_msw = 0;
- softvol.param.mem_map_handle = 0;
- softvol.param.data_payload_size = sizeof(softvol) -
- sizeof(softvol.hdr) - sizeof(softvol.param);
- softvol.data.module_id = module_id;
- softvol.data.param_id = ASM_PARAM_ID_SOFT_VOL_STEPPING_PARAMETERS;
- softvol.data.param_size = softvol.param.data_payload_size -
- sizeof(softvol.data);
- softvol.data.reserved = 0;
softvol.period = softvol_param->period;
softvol.step = softvol_param->step;
softvol.ramping_curve = softvol_param->rampingcurve;
- rc = apr_send_pkt(ac->apr, (uint32_t *) &softvol);
- if (rc < 0) {
+ rc = q6asm_pack_and_set_pp_param_in_band(ac, param_info,
+ (u8 *) &softvol);
+ if (rc)
pr_err("%s: set-params send failed paramid[0x%x] rc %d\n",
- __func__, softvol.data.param_id, rc);
- rc = -EINVAL;
- goto fail_cmd;
- }
+ __func__, param_info.param_id, rc);
- rc = wait_event_timeout(ac->cmd_wait,
- (atomic_read(&ac->cmd_state_pp) >= 0), 5*HZ);
- if (!rc) {
- pr_err("%s: timeout, set-params paramid[0x%x]\n", __func__,
- softvol.data.param_id);
- rc = -ETIMEDOUT;
- goto fail_cmd;
- }
- if (atomic_read(&ac->cmd_state_pp) > 0) {
- pr_err("%s: DSP returned error[%s] set-params paramid[0x%x]\n",
- __func__, adsp_err_get_err_str(
- atomic_read(&ac->cmd_state_pp)),
- softvol.data.param_id);
- rc = adsp_err_get_lnx_err_code(
- atomic_read(&ac->cmd_state_pp));
- goto fail_cmd;
- }
- rc = 0;
-fail_cmd:
return rc;
}
@@ -7400,334 +7366,156 @@ int q6asm_set_softvolume_v2(struct audio_client *ac,
int q6asm_set_vol_ctrl_gain_pair(struct audio_client *ac,
struct asm_stream_pan_ctrl_params *pan_param)
{
- int sz = 0;
- int rc = 0;
+ struct audproc_volume_ctrl_multichannel_gain gain_data;
+ struct param_hdr_v3 param_hdr = {0};
+ int num_out_ch = 0;
int i = 0;
- int32_t ch = 0;
- struct apr_hdr hdr;
- struct audproc_volume_ctrl_channel_type_gain_pair
- gain_data[ASM_MAX_CHANNELS];
- struct asm_stream_cmd_set_pp_params_v2 payload_params;
- struct asm_stream_param_data_v2 data;
- uint16_t *asm_params = NULL;
-
- if (ac == NULL) {
- pr_err("%s: ac is NULL\n", __func__);
- rc = -EINVAL;
- goto fail;
- }
- if (ac->apr == NULL) {
- dev_err(ac->dev, "%s: ac apr handle NULL\n", __func__);
- rc = -EINVAL;
- goto fail;
- }
+ int rc = 0;
- sz = sizeof(struct apr_hdr) +
- sizeof(struct asm_stream_cmd_set_pp_params_v2) +
- sizeof(struct asm_stream_param_data_v2) +
- sizeof(uint32_t) +
- (sizeof(struct audproc_volume_ctrl_channel_type_gain_pair) *
- ASM_MAX_CHANNELS);
- asm_params = kzalloc(sz, GFP_KERNEL);
- if (!asm_params) {
- rc = -ENOMEM;
- goto fail;
+ if (pan_param == NULL) {
+ pr_err("%s: Pan parameter is NULL\n", __func__);
+ return -EINVAL;
}
- q6asm_add_hdr_async(ac, &hdr, sz, TRUE);
- atomic_set(&ac->cmd_state_pp, -1);
+ memset(&gain_data, 0, sizeof(gain_data));
- hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
- memcpy(((u8 *)asm_params), &hdr, sizeof(struct apr_hdr));
-
- payload_params.data_payload_addr_lsw = 0;
- payload_params.data_payload_addr_msw = 0;
- payload_params.mem_map_handle = 0;
- payload_params.data_payload_size =
- sizeof(struct asm_stream_param_data_v2) +
- sizeof(uint32_t) +
- (sizeof(struct audproc_volume_ctrl_channel_type_gain_pair) *
- ASM_MAX_CHANNELS);
- memcpy(((u8 *)asm_params + sizeof(struct apr_hdr)),
- &payload_params,
- sizeof(struct asm_stream_cmd_set_pp_params_v2));
-
- data.module_id = AUDPROC_MODULE_ID_VOL_CTRL;
- data.param_id = AUDPROC_PARAM_ID_MULTICHANNEL_GAIN;
- data.param_size = sizeof(uint32_t) +
- (sizeof(struct audproc_volume_ctrl_channel_type_gain_pair) *
- ASM_MAX_CHANNELS);
- data.reserved = 0;
- memcpy(((u8 *)asm_params + sizeof(struct apr_hdr) +
- sizeof(struct asm_stream_cmd_set_pp_params_v2)),
- &data, sizeof(struct asm_stream_param_data_v2));
-
- ch = pan_param->num_output_channels;
- memcpy(((u8 *)asm_params + sizeof(struct apr_hdr) +
- sizeof(struct asm_stream_cmd_set_pp_params_v2) +
- sizeof(struct asm_stream_param_data_v2)),
- &ch,
- sizeof(uint32_t));
-
- memset(gain_data, 0,
- ASM_MAX_CHANNELS *
- sizeof(struct audproc_volume_ctrl_channel_type_gain_pair));
- for (i = 0; i < pan_param->num_output_channels; i++) {
- gain_data[i].channel_type =
- pan_param->output_channel_map[i];
- gain_data[i].gain = pan_param->gain[i];
- }
- memcpy(((u8 *)asm_params + sizeof(struct apr_hdr) +
- sizeof(struct asm_stream_cmd_set_pp_params_v2) +
- sizeof(struct asm_stream_param_data_v2) +
- sizeof(uint32_t)),
- gain_data,
- ASM_MAX_CHANNELS *
- sizeof(struct audproc_volume_ctrl_channel_type_gain_pair));
+ param_hdr.module_id = AUDPROC_MODULE_ID_VOL_CTRL;
+ param_hdr.instance_id = INSTANCE_ID_0;
+ param_hdr.param_id = AUDPROC_PARAM_ID_MULTICHANNEL_GAIN;
+ param_hdr.param_size = sizeof(gain_data);
- rc = apr_send_pkt(ac->apr, (uint32_t *) asm_params);
- if (rc < 0) {
- pr_err("%s: set-params send failed paramid[0x%x] rc %d\n",
- __func__, data.param_id, rc);
- goto done;
+ num_out_ch = pan_param->num_output_channels;
+ if (num_out_ch > ASM_MAX_CHANNELS) {
+ pr_err("%s: Invalid number of output channels %d\n", __func__,
+ num_out_ch);
+ return -EINVAL;
}
+ gain_data.num_channels = num_out_ch;
- rc = wait_event_timeout(ac->cmd_wait,
- (atomic_read(&ac->cmd_state_pp) >= 0), 5 * HZ);
- if (!rc) {
- pr_err("%s: timeout, set-params paramid[0x%x]\n", __func__,
- data.param_id);
- rc = -EINVAL;
- goto done;
+ for (i = 0; i < num_out_ch; i++) {
+ gain_data.gain_data[i].channel_type =
+ pan_param->output_channel_map[i];
+ gain_data.gain_data[i].gain = pan_param->gain[i];
}
- if (atomic_read(&ac->cmd_state_pp) > 0) {
- pr_err("%s: DSP returned error[%d], set-params paramid[0x%x]\n",
- __func__, atomic_read(&ac->cmd_state_pp),
- data.param_id);
- rc = -EINVAL;
- goto done;
- }
- rc = 0;
-done:
- kfree(asm_params);
-fail:
+
+ rc = q6asm_pack_and_set_pp_param_in_band(ac, param_hdr,
+ (uint8_t *) &gain_data);
+ if (rc < 0)
+ pr_err("%s: set-params send failed paramid[0x%x] rc %d\n",
+ __func__, param_hdr.param_id, rc);
return rc;
}
int q6asm_set_mfc_panning_params(struct audio_client *ac,
struct asm_stream_pan_ctrl_params *pan_param)
{
- int sz, rc, i;
- struct audproc_mfc_output_media_fmt mfc_cfg;
- struct apr_hdr hdr;
- struct asm_stream_cmd_set_pp_params_v2 payload_params;
- struct asm_stream_param_data_v2 data;
- struct audproc_chmixer_param_coeff pan_cfg;
- uint16_t variable_payload = 0;
- char *asm_params = NULL;
- uint16_t ii;
- uint16_t *dst_gain_ptr = NULL;
- sz = rc = i = 0;
- if (ac == NULL) {
- pr_err("%s: ac handle NULL\n", __func__);
- rc = -EINVAL;
- goto fail_cmd1;
- }
- if (ac->apr == NULL) {
- pr_err("%s: ac apr handle NULL\n", __func__);
- rc = -EINVAL;
- goto fail_cmd1;
- }
+ struct audproc_mfc_param_media_fmt mfc_cfg = {0};
+ struct audproc_chmixer_param_coeff *chmixer_cfg;
+ struct param_hdr_v3 param_hdr = {0};
+ u16 *dst_gain_ptr = NULL;
+ int num_out_ch;
+ int num_in_ch;
+ int chmixer_cfg_size = 0;
+ int packed_data_size = 0;
+ int out_ch_map_size;
+ int in_ch_map_size;
+ int gain_size;
+ int i = 0;
+ int rc = 0;
+
+ if (!pan_param)
+ return -EINVAL;
+
+ num_out_ch = pan_param->num_output_channels;
+ num_in_ch = pan_param->num_input_channels;
+
+ param_hdr.module_id = AUDPROC_MODULE_ID_MFC;
+ param_hdr.instance_id = INSTANCE_ID_0;
+ param_hdr.param_id = AUDPROC_PARAM_ID_MFC_OUTPUT_MEDIA_FORMAT;
+ param_hdr.param_size = sizeof(mfc_cfg);
- sz = sizeof(struct audproc_mfc_output_media_fmt);
- q6asm_add_hdr_async(ac, &mfc_cfg.params.hdr, sz, TRUE);
- atomic_set(&ac->cmd_state_pp, -1);
- mfc_cfg.params.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
- mfc_cfg.params.payload_addr_lsw = 0;
- mfc_cfg.params.payload_addr_msw = 0;
- mfc_cfg.params.mem_map_handle = 0;
- mfc_cfg.params.payload_size = sizeof(mfc_cfg) - sizeof(mfc_cfg.params);
- mfc_cfg.data.module_id = AUDPROC_MODULE_ID_MFC;
- mfc_cfg.data.param_id = AUDPROC_PARAM_ID_MFC_OUTPUT_MEDIA_FORMAT;
- mfc_cfg.data.param_size = mfc_cfg.params.payload_size -
- sizeof(mfc_cfg.data);
- mfc_cfg.data.reserved = 0;
mfc_cfg.sampling_rate = 0;
mfc_cfg.bits_per_sample = 0;
- mfc_cfg.num_channels = pan_param->num_output_channels;
- for (i = 0; i < mfc_cfg.num_channels; i++)
+ mfc_cfg.num_channels = num_out_ch;
+ for (i = 0; i < num_out_ch; i++)
mfc_cfg.channel_type[i] = pan_param->output_channel_map[i];
- rc = apr_send_pkt(ac->apr, (uint32_t *) &mfc_cfg);
+ rc = q6asm_pack_and_set_pp_param_in_band(ac, param_hdr,
+ (uint8_t *) &mfc_cfg);
if (rc < 0) {
pr_err("%s: set-params send failed paramid[0x%x] rc %d\n",
- __func__, mfc_cfg.data.param_id, rc);
- rc = -EINVAL;
- goto fail_cmd1;
+ __func__, param_hdr.param_id, rc);
+ return rc;
}
- rc = wait_event_timeout(ac->cmd_wait,
- (atomic_read(&ac->cmd_state_pp) >= 0), 5*HZ);
- if (!rc) {
- pr_err("%s: timeout, set-params paramid[0x%x]\n", __func__,
- mfc_cfg.data.param_id);
- rc = -ETIMEDOUT;
- goto fail_cmd1;
- }
- if (atomic_read(&ac->cmd_state_pp) > 0) {
- pr_err("%s: DSP returned error[%s] set-params paramid[0x%x]\n",
- __func__, adsp_err_get_err_str(
- atomic_read(&ac->cmd_state_pp)),
- mfc_cfg.data.param_id);
- rc = adsp_err_get_lnx_err_code(
- atomic_read(&ac->cmd_state_pp));
- goto fail_cmd1;
- }
+ out_ch_map_size = num_out_ch * sizeof(uint16_t);
+ in_ch_map_size = num_in_ch * sizeof(uint16_t);
+ gain_size = num_out_ch * num_in_ch * sizeof(uint16_t);
- variable_payload = pan_param->num_output_channels * sizeof(uint16_t)+
- pan_param->num_input_channels * sizeof(uint16_t) +
- pan_param->num_output_channels *
- pan_param->num_input_channels * sizeof(uint16_t);
- i = (variable_payload % sizeof(uint32_t));
- variable_payload += (i == 0) ? 0 : sizeof(uint32_t) - i;
- sz = sizeof(struct apr_hdr) +
- sizeof(struct asm_stream_cmd_set_pp_params_v2) +
- sizeof(struct asm_stream_param_data_v2) +
- sizeof(struct audproc_chmixer_param_coeff) +
- variable_payload;
+ chmixer_cfg_size = sizeof(struct audproc_chmixer_param_coeff) +
+ out_ch_map_size + in_ch_map_size + gain_size;
+ chmixer_cfg = kzalloc(chmixer_cfg_size, GFP_KERNEL);
+ if (!chmixer_cfg)
+ return -ENOMEM;
- asm_params = kzalloc(sz, GFP_KERNEL);
- if (!asm_params) {
- rc = -ENOMEM;
- goto fail_cmd1;
- }
+ param_hdr.module_id = AUDPROC_MODULE_ID_MFC;
+ param_hdr.instance_id = INSTANCE_ID_0;
+ param_hdr.param_id = AUDPROC_CHMIXER_PARAM_ID_COEFF;
+ param_hdr.param_size = chmixer_cfg_size;
- q6asm_add_hdr_async(ac, &hdr, sz, TRUE);
- atomic_set(&ac->cmd_state_pp, -1);
- hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
- memcpy(((u8 *)asm_params), &hdr, sizeof(struct apr_hdr));
-
- payload_params.data_payload_addr_lsw = 0;
- payload_params.data_payload_addr_msw = 0;
- payload_params.mem_map_handle = 0;
- payload_params.data_payload_size =
- sizeof(struct audproc_chmixer_param_coeff) +
- variable_payload + sizeof(struct asm_stream_param_data_v2);
- memcpy(((u8 *)asm_params + sizeof(struct apr_hdr)),
- &payload_params,
- sizeof(struct asm_stream_cmd_set_pp_params_v2));
-
- data.module_id = AUDPROC_MODULE_ID_MFC;
- data.param_id = AUDPROC_CHMIXER_PARAM_ID_COEFF;
- data.param_size = sizeof(struct audproc_chmixer_param_coeff) +
- variable_payload;
- data.reserved = 0;
- memcpy(((u8 *)asm_params + sizeof(struct apr_hdr) +
- sizeof(struct asm_stream_cmd_set_pp_params_v2)),
- &data, sizeof(struct asm_stream_param_data_v2));
-
- pan_cfg.index = 0;
- pan_cfg.num_output_channels = pan_param->num_output_channels;
- pan_cfg.num_input_channels = pan_param->num_input_channels;
- memcpy(((u8 *)asm_params + sizeof(struct apr_hdr) +
- sizeof(struct asm_stream_cmd_set_pp_params_v2) +
- sizeof(struct asm_stream_param_data_v2)),
- &pan_cfg, sizeof(struct audproc_chmixer_param_coeff));
- memcpy(((u8 *)asm_params + sizeof(struct apr_hdr) +
- sizeof(struct asm_stream_cmd_set_pp_params_v2) +
- sizeof(struct asm_stream_param_data_v2) +
- sizeof(struct audproc_chmixer_param_coeff)),
- pan_param->output_channel_map,
- pan_param->num_output_channels * sizeof(uint16_t));
- memcpy(((u8 *)asm_params + sizeof(struct apr_hdr) +
- sizeof(struct asm_stream_cmd_set_pp_params_v2) +
- sizeof(struct asm_stream_param_data_v2) +
- sizeof(struct audproc_chmixer_param_coeff) +
- pan_param->num_output_channels * sizeof(uint16_t)),
- pan_param->input_channel_map,
- pan_param->num_input_channels * sizeof(uint16_t));
-
- dst_gain_ptr = (uint16_t *) ((u8 *)asm_params + sizeof(struct apr_hdr) +
- sizeof(struct asm_stream_cmd_set_pp_params_v2) +
- sizeof(struct asm_stream_param_data_v2) +
- sizeof(struct audproc_chmixer_param_coeff) +
- (pan_param->num_output_channels * sizeof(uint16_t)) +
- (pan_param->num_input_channels * sizeof(uint16_t)));
- for (ii = 0; ii < pan_param->num_output_channels *
- pan_param->num_input_channels; ii++)
- dst_gain_ptr[ii] = (uint16_t) pan_param->gain[ii];
+ chmixer_cfg->index = 0;
+ chmixer_cfg->num_output_channels = num_out_ch;
+ chmixer_cfg->num_input_channels = num_in_ch;
- rc = apr_send_pkt(ac->apr, (uint32_t *) asm_params);
+ /* Repack channel data as max channels may not be used */
+ memcpy(chmixer_cfg->payload, pan_param->output_channel_map,
+ out_ch_map_size);
+ packed_data_size += out_ch_map_size;
+
+ memcpy(chmixer_cfg->payload + packed_data_size,
+ pan_param->input_channel_map, in_ch_map_size);
+ packed_data_size += in_ch_map_size;
+
+ dst_gain_ptr = (uint16_t *) chmixer_cfg->payload + packed_data_size;
+ for (i = 0; i < num_out_ch * num_in_ch; i++)
+ dst_gain_ptr[i] = (uint16_t) pan_param->gain[i];
+
+ rc = q6asm_pack_and_set_pp_param_in_band(ac, param_hdr,
+ (uint8_t *) chmixer_cfg);
if (rc < 0) {
pr_err("%s: set-params send failed paramid[0x%x] rc %d\n",
- __func__, data.param_id, rc);
+ __func__, param_hdr.param_id, rc);
rc = -EINVAL;
- goto fail_cmd2;
}
+ kfree(chmixer_cfg);
- rc = wait_event_timeout(ac->cmd_wait,
- (atomic_read(&ac->cmd_state_pp) >= 0), 5*HZ);
- if (!rc) {
- pr_err("%s: timeout, set-params paramid[0x%x]\n", __func__,
- data.param_id);
- rc = -ETIMEDOUT;
- goto fail_cmd2;
- }
- if (atomic_read(&ac->cmd_state_pp) > 0) {
- pr_err("%s: DSP returned error[%s] set-params paramid[0x%x]\n",
- __func__, adsp_err_get_err_str(
- atomic_read(&ac->cmd_state_pp)),
- data.param_id);
- rc = adsp_err_get_lnx_err_code(
- atomic_read(&ac->cmd_state_pp));
- goto fail_cmd2;
- }
- rc = 0;
-fail_cmd2:
- kfree(asm_params);
-fail_cmd1:
return rc;
}
int q6asm_equalizer(struct audio_client *ac, void *eq_p)
{
- struct asm_eq_params eq;
+ struct asm_eq_params eq = {0};
struct msm_audio_eq_stream_config *eq_params = NULL;
+ struct param_hdr_v3 param_info = {0};
int i = 0;
- int sz = 0;
int rc = 0;
if (ac == NULL) {
- pr_err("%s: APR handle NULL\n", __func__);
- rc = -EINVAL;
- goto fail_cmd;
- }
- if (ac->apr == NULL) {
- pr_err("%s: AC APR handle NULL\n", __func__);
- rc = -EINVAL;
- goto fail_cmd;
+ pr_err("%s: Audio client is NULL\n", __func__);
+ return -EINVAL;
}
-
if (eq_p == NULL) {
pr_err("%s: [%d]: Invalid Eq param\n", __func__, ac->session);
rc = -EINVAL;
goto fail_cmd;
}
- sz = sizeof(struct asm_eq_params);
- eq_params = (struct msm_audio_eq_stream_config *) eq_p;
- q6asm_add_hdr(ac, &eq.hdr, sz, TRUE);
- atomic_set(&ac->cmd_state_pp, -1);
- eq.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
- eq.param.data_payload_addr_lsw = 0;
- eq.param.data_payload_addr_msw = 0;
- eq.param.mem_map_handle = 0;
- eq.param.data_payload_size = sizeof(eq) -
- sizeof(eq.hdr) - sizeof(eq.param);
- eq.data.module_id = ASM_MODULE_ID_EQUALIZER;
- eq.data.param_id = ASM_PARAM_ID_EQUALIZER_PARAMETERS;
- eq.data.param_size = eq.param.data_payload_size - sizeof(eq.data);
+ eq_params = (struct msm_audio_eq_stream_config *) eq_p;
+ param_info.module_id = ASM_MODULE_ID_EQUALIZER;
+ param_info.instance_id = INSTANCE_ID_0;
+ param_info.param_id = ASM_PARAM_ID_EQUALIZER_PARAMETERS;
+ param_info.param_size = sizeof(eq);
eq.enable_flag = eq_params->enable;
eq.num_bands = eq_params->num_bands;
@@ -7753,32 +7541,11 @@ int q6asm_equalizer(struct audio_client *ac, void *eq_p)
pr_debug("%s: q_factor:%d bandnum:%d\n", __func__,
eq_params->eq_bands[i].q_factor, i);
}
- rc = apr_send_pkt(ac->apr, (uint32_t *)&eq);
- if (rc < 0) {
+ rc = q6asm_pack_and_set_pp_param_in_band(ac, param_info, (u8 *) &eq);
+ if (rc)
pr_err("%s: set-params send failed paramid[0x%x] rc %d\n",
- __func__, eq.data.param_id, rc);
- rc = -EINVAL;
- goto fail_cmd;
- }
+ __func__, param_info.param_id, rc);
- rc = wait_event_timeout(ac->cmd_wait,
- (atomic_read(&ac->cmd_state_pp) >= 0), 5*HZ);
- if (!rc) {
- pr_err("%s: timeout, set-params paramid[0x%x]\n", __func__,
- eq.data.param_id);
- rc = -ETIMEDOUT;
- goto fail_cmd;
- }
- if (atomic_read(&ac->cmd_state_pp) > 0) {
- pr_err("%s: DSP returned error[%s] set-params paramid[0x%x]\n",
- __func__, adsp_err_get_err_str(
- atomic_read(&ac->cmd_state_pp)),
- eq.data.param_id);
- rc = adsp_err_get_lnx_err_code(
- atomic_read(&ac->cmd_state_pp));
- goto fail_cmd;
- }
- rc = 0;
fail_cmd:
return rc;
}
@@ -8293,7 +8060,7 @@ int q6asm_get_session_time(struct audio_client *ac, uint64_t *tstamp)
mtmx_params.param_info.param_id =
ASM_SESSION_MTMX_STRTR_PARAM_SESSION_TIME_V3;
mtmx_params.param_info.param_max_size =
- sizeof(struct asm_stream_param_data_v2) +
+ sizeof(struct param_hdr_v1) +
sizeof(struct asm_session_mtmx_strtr_param_session_time_v3_t);
atomic_set(&ac->time_flag, 1);
@@ -8366,79 +8133,6 @@ fail_cmd:
return -EINVAL;
}
-
-int q6asm_send_audio_effects_params(struct audio_client *ac, char *params,
- uint32_t params_length)
-{
- char *asm_params = NULL;
- struct apr_hdr hdr;
- struct asm_stream_cmd_set_pp_params_v2 payload_params;
- int sz, rc;
-
- pr_debug("%s:\n", __func__);
- if (!ac) {
- pr_err("%s: APR handle NULL\n", __func__);
- return -EINVAL;
- }
- if (ac->apr == NULL) {
- pr_err("%s: AC APR handle NULL\n", __func__);
- return -EINVAL;
- }
- if (params == NULL) {
- pr_err("%s: params NULL\n", __func__);
- return -EINVAL;
- }
- sz = sizeof(struct apr_hdr) +
- sizeof(struct asm_stream_cmd_set_pp_params_v2) +
- params_length;
- asm_params = kzalloc(sz, GFP_KERNEL);
- if (!asm_params) {
- pr_err("%s, asm params memory alloc failed", __func__);
- return -ENOMEM;
- }
- q6asm_add_hdr_async(ac, &hdr, (sizeof(struct apr_hdr) +
- sizeof(struct asm_stream_cmd_set_pp_params_v2) +
- params_length), TRUE);
- atomic_set(&ac->cmd_state_pp, -1);
- hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
- payload_params.data_payload_addr_lsw = 0;
- payload_params.data_payload_addr_msw = 0;
- payload_params.mem_map_handle = 0;
- payload_params.data_payload_size = params_length;
- memcpy(((u8 *)asm_params), &hdr, sizeof(struct apr_hdr));
- memcpy(((u8 *)asm_params + sizeof(struct apr_hdr)), &payload_params,
- sizeof(struct asm_stream_cmd_set_pp_params_v2));
- memcpy(((u8 *)asm_params + sizeof(struct apr_hdr) +
- sizeof(struct asm_stream_cmd_set_pp_params_v2)),
- params, params_length);
- rc = apr_send_pkt(ac->apr, (uint32_t *) asm_params);
- if (rc < 0) {
- pr_err("%s: audio effects set-params send failed\n", __func__);
- rc = -EINVAL;
- goto fail_send_param;
- }
- rc = wait_event_timeout(ac->cmd_wait,
- (atomic_read(&ac->cmd_state_pp) >= 0), 1*HZ);
- if (!rc) {
- pr_err("%s: timeout, audio effects set-params\n", __func__);
- rc = -ETIMEDOUT;
- goto fail_send_param;
- }
- if (atomic_read(&ac->cmd_state_pp) > 0) {
- pr_err("%s: DSP returned error[%s] set-params\n",
- __func__, adsp_err_get_err_str(
- atomic_read(&ac->cmd_state_pp)));
- rc = adsp_err_get_lnx_err_code(
- atomic_read(&ac->cmd_state_pp));
- goto fail_send_param;
- }
-
- rc = 0;
-fail_send_param:
- kfree(asm_params);
- return rc;
-}
-
int q6asm_send_mtmx_strtr_window(struct audio_client *ac,
struct asm_session_mtmx_strtr_param_window_v2_t *window_param,
uint32_t param_id)
@@ -8471,7 +8165,7 @@ int q6asm_send_mtmx_strtr_window(struct audio_client *ac,
matrix.param.data_payload_addr_msw = 0;
matrix.param.mem_map_handle = 0;
matrix.param.data_payload_size =
- sizeof(struct asm_stream_param_data_v2) +
+ sizeof(struct param_hdr_v1) +
sizeof(struct asm_session_mtmx_strtr_param_window_v2_t);
matrix.param.direction = 0; /* RX */
matrix.data.module_id = ASM_SESSION_MTMX_STRTR_MODULE_ID_AVSYNC;
@@ -8556,7 +8250,7 @@ int q6asm_send_mtmx_strtr_render_mode(struct audio_client *ac,
matrix.param.data_payload_addr_msw = 0;
matrix.param.mem_map_handle = 0;
matrix.param.data_payload_size =
- sizeof(struct asm_stream_param_data_v2) +
+ sizeof(struct param_hdr_v1) +
sizeof(struct asm_session_mtmx_strtr_param_render_mode_t);
matrix.param.direction = 0; /* RX */
matrix.data.module_id = ASM_SESSION_MTMX_STRTR_MODULE_ID_AVSYNC;
@@ -8641,7 +8335,7 @@ int q6asm_send_mtmx_strtr_clk_rec_mode(struct audio_client *ac,
matrix.param.data_payload_addr_msw = 0;
matrix.param.mem_map_handle = 0;
matrix.param.data_payload_size =
- sizeof(struct asm_stream_param_data_v2) +
+ sizeof(struct param_hdr_v1) +
sizeof(struct asm_session_mtmx_strtr_param_clk_rec_t);
matrix.param.direction = 0; /* RX */
matrix.data.module_id = ASM_SESSION_MTMX_STRTR_MODULE_ID_AVSYNC;
@@ -8716,7 +8410,7 @@ int q6asm_send_mtmx_strtr_enable_adjust_session_clock(struct audio_client *ac,
matrix.param.data_payload_addr_msw = 0;
matrix.param.mem_map_handle = 0;
matrix.param.data_payload_size =
- sizeof(struct asm_stream_param_data_v2) +
+ sizeof(struct param_hdr_v1) +
sizeof(struct asm_session_mtmx_param_adjust_session_time_ctl_t);
matrix.param.direction = 0; /* RX */
matrix.data.module_id = ASM_SESSION_MTMX_STRTR_MODULE_ID_AVSYNC;
@@ -9266,7 +8960,7 @@ int q6asm_get_apr_service_id(int session_id)
return -EINVAL;
}
- return ((struct apr_svc *)session[session_id]->apr)->id;
+ return ((struct apr_svc *)(session[session_id].ac)->apr)->id;
}
int q6asm_get_asm_topology(int session_id)
@@ -9277,12 +8971,12 @@ int q6asm_get_asm_topology(int session_id)
pr_err("%s: invalid session_id = %d\n", __func__, session_id);
goto done;
}
- if (session[session_id] == NULL) {
+ if (session[session_id].ac == NULL) {
pr_err("%s: session not created for session id = %d\n",
__func__, session_id);
goto done;
}
- topology = session[session_id]->topology;
+ topology = (session[session_id].ac)->topology;
done:
return topology;
}
@@ -9295,12 +8989,12 @@ int q6asm_get_asm_app_type(int session_id)
pr_err("%s: invalid session_id = %d\n", __func__, session_id);
goto done;
}
- if (session[session_id] == NULL) {
+ if (session[session_id].ac == NULL) {
pr_err("%s: session not created for session id = %d\n",
__func__, session_id);
goto done;
}
- app_type = session[session_id]->app_type;
+ app_type = (session[session_id].ac)->app_type;
done:
return app_type;
}
@@ -9355,19 +9049,14 @@ done:
int q6asm_send_cal(struct audio_client *ac)
{
struct cal_block_data *cal_block = NULL;
- struct apr_hdr hdr;
- char *asm_params = NULL;
- struct asm_stream_cmd_set_pp_params_v2 payload_params;
- int sz, rc = -EINVAL;
+ struct mem_mapping_hdr mem_hdr = {0};
+ u32 payload_size = 0;
+ int rc = -EINVAL;
pr_debug("%s:\n", __func__);
if (!ac) {
- pr_err("%s: APR handle NULL\n", __func__);
- goto done;
- }
- if (ac->apr == NULL) {
- pr_err("%s: AC APR handle NULL\n", __func__);
- goto done;
+ pr_err("%s: Audio client is NULL\n", __func__);
+ return -EINVAL;
}
if (ac->io_mode & NT_MODE) {
pr_debug("%s: called for NT MODE, exiting\n", __func__);
@@ -9404,62 +9093,26 @@ int q6asm_send_cal(struct audio_client *ac)
goto unlock;
}
- sz = sizeof(struct apr_hdr) +
- sizeof(struct asm_stream_cmd_set_pp_params_v2);
- asm_params = kzalloc(sz, GFP_KERNEL);
- if (!asm_params) {
- pr_err("%s, asm params memory alloc failed", __func__);
- rc = -ENOMEM;
- goto unlock;
- }
-
- /* asm_stream_cmd_set_pp_params_v2 has no APR header in it */
- q6asm_add_hdr_async(ac, &hdr, (sizeof(struct apr_hdr) +
- sizeof(struct asm_stream_cmd_set_pp_params_v2)), TRUE);
-
- atomic_set(&ac->cmd_state_pp, -1);
- hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
- payload_params.data_payload_addr_lsw =
- lower_32_bits(cal_block->cal_data.paddr);
- payload_params.data_payload_addr_msw =
- msm_audio_populate_upper_32_bits(
- cal_block->cal_data.paddr);
- payload_params.mem_map_handle = cal_block->map_data.q6map_handle;
- payload_params.data_payload_size = cal_block->cal_data.size;
- memcpy(((u8 *)asm_params), &hdr, sizeof(struct apr_hdr));
- memcpy(((u8 *)asm_params + sizeof(struct apr_hdr)), &payload_params,
- sizeof(struct asm_stream_cmd_set_pp_params_v2));
+ mem_hdr.data_payload_addr_lsw =
+ lower_32_bits(cal_block->cal_data.paddr);
+ mem_hdr.data_payload_addr_msw =
+ msm_audio_populate_upper_32_bits(cal_block->cal_data.paddr);
+ mem_hdr.mem_map_handle = cal_block->map_data.q6map_handle;
+ payload_size = cal_block->cal_data.size;
pr_debug("%s: phyaddr lsw = %x msw = %x, maphdl = %x calsize = %d\n",
- __func__, payload_params.data_payload_addr_lsw,
- payload_params.data_payload_addr_msw,
- payload_params.mem_map_handle,
- payload_params.data_payload_size);
+ __func__, mem_hdr.data_payload_addr_lsw,
+ mem_hdr.data_payload_addr_msw, mem_hdr.mem_map_handle,
+ payload_size);
- rc = apr_send_pkt(ac->apr, (uint32_t *) asm_params);
- if (rc < 0) {
+ rc = q6asm_set_pp_params(ac, &mem_hdr, NULL, payload_size);
+ if (rc) {
pr_err("%s: audio audstrm cal send failed\n", __func__);
- rc = -EINVAL;
- goto free;
- }
- rc = wait_event_timeout(ac->cmd_wait,
- (atomic_read(&ac->cmd_state_pp) >= 0), 5 * HZ);
- if (!rc) {
- pr_err("%s: timeout, audio audstrm cal send\n", __func__);
- rc = -ETIMEDOUT;
- goto free;
- }
- if (atomic_read(&ac->cmd_state_pp) > 0) {
- pr_err("%s: DSP returned error[%d] audio audstrm cal send\n",
- __func__, atomic_read(&ac->cmd_state_pp));
- rc = -EINVAL;
- goto free;
+ goto unlock;
}
rc = 0;
-free:
- kfree(asm_params);
unlock:
mutex_unlock(&cal_data[ASM_AUDSTRM_CAL]->lock);
done:
@@ -9643,7 +9296,10 @@ static int __init q6asm_init(void)
int lcnt, ret;
pr_debug("%s:\n", __func__);
- memset(session, 0, sizeof(session));
+ memset(session, 0, sizeof(struct audio_session) *
+ (ASM_ACTIVE_STREAMS_ALLOWED + 1));
+ for (lcnt = 0; lcnt <= ASM_ACTIVE_STREAMS_ALLOWED; lcnt++)
+ spin_lock_init(&(session[lcnt].session_lock));
set_custom_topology = 1;
/*setup common client used for cal mem map */
diff --git a/sound/soc/msm/qdsp6v2/q6common.c b/sound/soc/msm/qdsp6v2/q6common.c
new file mode 100644
index 000000000000..88e9af1cb86b
--- /dev/null
+++ b/sound/soc/msm/qdsp6v2/q6common.c
@@ -0,0 +1,85 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <sound/q6common.h>
+
+struct q6common_ctl {
+ bool instance_id_supported;
+};
+
+static struct q6common_ctl common;
+
+void q6common_update_instance_id_support(bool supported)
+{
+ common.instance_id_supported = supported;
+}
+EXPORT_SYMBOL(q6common_update_instance_id_support);
+
+bool q6common_is_instance_id_supported(void)
+{
+ return common.instance_id_supported;
+}
+EXPORT_SYMBOL(q6common_is_instance_id_supported);
+
+int q6common_pack_pp_params(u8 *dest, struct param_hdr_v3 *v3_hdr,
+ u8 *param_data, u32 *total_size)
+{
+ struct param_hdr_v1 *v1_hdr = NULL;
+ u32 packed_size = 0;
+ u32 param_size = 0;
+ bool iid_supported = q6common_is_instance_id_supported();
+
+ if (dest == NULL) {
+ pr_err("%s: Received NULL pointer for destination\n", __func__);
+ return -EINVAL;
+ } else if (v3_hdr == NULL) {
+ pr_err("%s: Received NULL pointer for header\n", __func__);
+ return -EINVAL;
+ } else if (total_size == NULL) {
+ pr_err("%s: Received NULL pointer for total size\n", __func__);
+ return -EINVAL;
+ }
+
+ param_size = v3_hdr->param_size;
+ packed_size = iid_supported ? sizeof(struct param_hdr_v3) :
+ sizeof(struct param_hdr_v1);
+
+ if (iid_supported) {
+ memcpy(dest, v3_hdr, packed_size);
+ } else {
+ v1_hdr = (struct param_hdr_v1 *) dest;
+ v1_hdr->module_id = v3_hdr->module_id;
+ v1_hdr->param_id = v3_hdr->param_id;
+
+ if (param_size > U16_MAX) {
+ pr_err("%s: Invalid param size for V1 %d\n", __func__,
+ param_size);
+ return -EINVAL;
+ }
+ v1_hdr->param_size = param_size;
+ v1_hdr->reserved = 0;
+ }
+
+ /*
+ * Make param_data optional for cases when there is no data
+ * present as in some set cases and all get cases.
+ */
+ if (param_data != NULL) {
+ memcpy(dest + packed_size, param_data, param_size);
+ packed_size += param_size;
+ }
+
+ *total_size = packed_size;
+
+ return 0;
+}
+EXPORT_SYMBOL(q6common_pack_pp_params);
diff --git a/sound/soc/msm/qdsp6v2/q6lsm.c b/sound/soc/msm/qdsp6v2/q6lsm.c
index 11574a874a5a..1161bb31c434 100644
--- a/sound/soc/msm/qdsp6v2/q6lsm.c
+++ b/sound/soc/msm/qdsp6v2/q6lsm.c
@@ -26,6 +26,7 @@
#include <sound/apr_audio-v2.h>
#include <sound/lsm_params.h>
#include <sound/q6core.h>
+#include <sound/q6common.h>
#include <sound/q6lsm.h>
#include <asm/ioctls.h>
#include <linux/memory.h>
@@ -73,11 +74,6 @@ struct lsm_common {
struct mutex apr_lock;
};
-struct lsm_module_param_ids {
- uint32_t module_id;
- uint32_t param_id;
-};
-
static struct lsm_common lsm_common;
/*
* mmap_handle_p can point either client->sound_model.mem_map_handle or
@@ -98,38 +94,6 @@ static int q6lsm_memory_map_regions(struct lsm_client *client,
static int q6lsm_memory_unmap_regions(struct lsm_client *client,
uint32_t handle);
-static void q6lsm_set_param_hdr_info(
- struct lsm_set_params_hdr *param_hdr,
- u32 payload_size, u32 addr_lsw, u32 addr_msw,
- u32 mmap_handle)
-{
- param_hdr->data_payload_size = payload_size;
- param_hdr->data_payload_addr_lsw = addr_lsw;
- param_hdr->data_payload_addr_msw = addr_msw;
- param_hdr->mem_map_handle = mmap_handle;
-}
-
-static void q6lsm_set_param_common(
- struct lsm_param_payload_common *common,
- struct lsm_module_param_ids *ids,
- u32 param_size, u32 set_param_version)
-{
- common->module_id = ids->module_id;
- common->param_id = ids->param_id;
-
- switch (set_param_version) {
- case LSM_SESSION_CMD_SET_PARAMS_V2:
- common->p_size.param_size = param_size;
- break;
- case LSM_SESSION_CMD_SET_PARAMS:
- default:
- common->p_size.sr.param_size =
- (u16) param_size;
- common->p_size.sr.reserved = 0;
- break;
- }
-}
-
static int q6lsm_callback(struct apr_client_data *data, void *priv)
{
struct lsm_client *client = (struct lsm_client *)priv;
@@ -199,6 +163,7 @@ static int q6lsm_callback(struct apr_client_data *data, void *priv)
case LSM_SESSION_CMD_OPEN_TX_V2:
case LSM_CMD_ADD_TOPOLOGIES:
case LSM_SESSION_CMD_SET_PARAMS_V2:
+ case LSM_SESSION_CMD_SET_PARAMS_V3:
if (token != client->session &&
payload[0] !=
LSM_SESSION_CMD_DEREGISTER_SOUND_MODEL) {
@@ -433,6 +398,189 @@ static void q6lsm_add_hdr(struct lsm_client *client, struct apr_hdr *hdr,
hdr->token = client->session;
}
+/*
+ * LSM still supports 3 versions of commands so it cannot use the common
+ * Q6Common packing function. No need to check parameter pointers as it
+ * is static and should only be called internally.
+ */
+static int q6lsm_pack_params(u8 *dest, struct param_hdr_v3 *param_info,
+ u8 *param_data, size_t *final_length,
+ u32 set_param_opcode)
+{
+ bool iid_supported = q6common_is_instance_id_supported();
+ union param_hdrs *param_hdr = NULL;
+ u32 param_size = param_info->param_size;
+ size_t hdr_size;
+ size_t provided_size = *final_length;
+
+ hdr_size = iid_supported ? sizeof(struct param_hdr_v3) :
+ sizeof(struct param_hdr_v2);
+ if (provided_size < hdr_size) {
+ pr_err("%s: Provided size %zu is not large enough, need %zu\n",
+ __func__, provided_size, hdr_size);
+ return -EINVAL;
+ }
+
+ if (iid_supported) {
+ memcpy(dest, param_info, hdr_size);
+ } else {
+ /* MID, PID and structure size are the same in V1 and V2 */
+ param_hdr = (union param_hdrs *) dest;
+ param_hdr->v2.module_id = param_info->module_id;
+ param_hdr->v2.param_id = param_info->param_id;
+
+ switch (set_param_opcode) {
+ case LSM_SESSION_CMD_SET_PARAMS_V2:
+ param_hdr->v2.param_size = param_size;
+ break;
+ case LSM_SESSION_CMD_SET_PARAMS:
+ default:
+ if (param_size > U16_MAX) {
+ pr_err("%s: Invalid param size %d\n", __func__,
+ param_size);
+ return -EINVAL;
+ }
+
+ param_hdr->v1.param_size = param_size;
+ param_hdr->v1.reserved = 0;
+ break;
+ }
+ }
+
+ *final_length = hdr_size;
+
+ if (param_data != NULL) {
+ if (provided_size < hdr_size + param_size) {
+ pr_err("%s: Provided size %zu is not large enough, need %zu\n",
+ __func__, provided_size, hdr_size + param_size);
+ return -EINVAL;
+ }
+ memcpy(dest + hdr_size, param_data, param_size);
+ *final_length += param_size;
+ }
+ return 0;
+}
+
+static int q6lsm_set_params_v2(struct lsm_client *client,
+ struct mem_mapping_hdr *mem_hdr,
+ uint8_t *param_data, uint32_t param_size,
+ uint32_t set_param_opcode)
+{
+ struct lsm_session_cmd_set_params_v2 *lsm_set_param = NULL;
+ uint32_t pkt_size = 0;
+ int ret;
+
+ pkt_size = sizeof(struct lsm_session_cmd_set_params_v2);
+ /* Only include param size in packet size when inband */
+ if (param_data != NULL)
+ pkt_size += param_size;
+
+ lsm_set_param = kzalloc(pkt_size, GFP_KERNEL);
+ if (!lsm_set_param)
+ return -ENOMEM;
+
+ q6lsm_add_hdr(client, &lsm_set_param->apr_hdr, pkt_size, true);
+ lsm_set_param->apr_hdr.opcode = set_param_opcode;
+ lsm_set_param->payload_size = param_size;
+
+ if (mem_hdr != NULL) {
+ lsm_set_param->mem_hdr = *mem_hdr;
+ } else if (param_data != NULL) {
+ memcpy(lsm_set_param->param_data, param_data, param_size);
+ } else {
+ pr_err("%s: Received NULL pointers for both memory header and data\n",
+ __func__);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ ret = q6lsm_apr_send_pkt(client, client->apr, lsm_set_param, true,
+ NULL);
+done:
+ kfree(lsm_set_param);
+ return ret;
+}
+
+static int q6lsm_set_params_v3(struct lsm_client *client,
+ struct mem_mapping_hdr *mem_hdr,
+ uint8_t *param_data, uint32_t param_size)
+{
+ struct lsm_session_cmd_set_params_v3 *lsm_set_param = NULL;
+ uint16_t pkt_size = 0;
+ int ret = 0;
+
+ pkt_size = sizeof(struct lsm_session_cmd_set_params_v3);
+ /* Only include param size in packet size when inband */
+ if (param_data != NULL)
+ pkt_size += param_size;
+
+ lsm_set_param = kzalloc(pkt_size, GFP_KERNEL);
+ if (!lsm_set_param)
+ return -ENOMEM;
+
+ q6lsm_add_hdr(client, &lsm_set_param->apr_hdr, pkt_size, true);
+ lsm_set_param->apr_hdr.opcode = LSM_SESSION_CMD_SET_PARAMS_V3;
+ lsm_set_param->payload_size = param_size;
+
+ if (mem_hdr != NULL) {
+ lsm_set_param->mem_hdr = *mem_hdr;
+ } else if (param_data != NULL) {
+ memcpy(lsm_set_param->param_data, param_data, param_size);
+ } else {
+ pr_err("%s: Received NULL pointers for both memory header and data\n",
+ __func__);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ ret = q6lsm_apr_send_pkt(client, client->apr, lsm_set_param, true,
+ NULL);
+done:
+ kfree(lsm_set_param);
+ return ret;
+}
+
+static int q6lsm_set_params(struct lsm_client *client,
+ struct mem_mapping_hdr *mem_hdr,
+ uint8_t *param_data, uint32_t param_size,
+ uint32_t set_param_opcode)
+
+{
+ if (q6common_is_instance_id_supported())
+ return q6lsm_set_params_v3(client, mem_hdr, param_data,
+ param_size);
+ else
+ return q6lsm_set_params_v2(client, mem_hdr, param_data,
+ param_size, set_param_opcode);
+}
+
+static int q6lsm_pack_and_set_params(struct lsm_client *client,
+ struct param_hdr_v3 *param_info,
+ uint8_t *param_data,
+ uint32_t set_param_opcode)
+
+{
+ u8 *packed_data = NULL;
+ size_t total_size = 0;
+ int ret = 0;
+
+ total_size = sizeof(union param_hdrs) + param_info->param_size;
+ packed_data = kzalloc(total_size, GFP_KERNEL);
+ if (!packed_data)
+ return -ENOMEM;
+
+ ret = q6lsm_pack_params(packed_data, param_info, param_data,
+ &total_size, set_param_opcode);
+ if (ret)
+ goto done;
+
+ ret = q6lsm_set_params(client, NULL, packed_data, total_size,
+ set_param_opcode);
+
+done:
+ kfree(packed_data);
+ return ret;
+}
static int q6lsm_send_custom_topologies(struct lsm_client *client)
{
@@ -586,14 +734,18 @@ void q6lsm_sm_set_param_data(struct lsm_client *client,
struct lsm_params_info *p_info,
size_t *offset)
{
- struct lsm_param_payload_common *param;
-
- param = (struct lsm_param_payload_common *)
- client->sound_model.data;
- param->module_id = p_info->module_id;
- param->param_id = p_info->param_id;
- param->p_size.param_size = client->sound_model.size;
- *offset = sizeof(*param);
+ struct param_hdr_v3 param_hdr = {0};
+ int ret = 0;
+
+ param_hdr.module_id = p_info->module_id;
+ param_hdr.instance_id = INSTANCE_ID_0;
+ param_hdr.param_id = p_info->param_id;
+ param_hdr.param_size = client->sound_model.size;
+
+ ret = q6lsm_pack_params((u8 *) client->sound_model.data, &param_hdr,
+ NULL, offset, LSM_SESSION_CMD_SET_PARAMS_V2);
+ if (ret)
+ pr_err("%s: Failed to pack params, error %d\n", __func__, ret);
}
int q6lsm_open(struct lsm_client *client, uint16_t app_id)
@@ -644,109 +796,64 @@ done:
return rc;
}
-static int q6lsm_send_confidence_levels(
- struct lsm_client *client,
- struct lsm_module_param_ids *ids,
- u32 set_param_opcode)
+static int q6lsm_send_confidence_levels(struct lsm_client *client,
+ struct param_hdr_v3 *param_info,
+ uint32_t set_param_opcode)
{
- u8 *packet;
- size_t pkt_size;
- struct lsm_cmd_set_params_conf *conf_params;
- struct apr_hdr *msg_hdr;
- struct lsm_param_min_confidence_levels *cfl;
+ struct lsm_param_confidence_levels *conf_levels = NULL;
+ uint32_t num_conf_levels = client->num_confidence_levels;
uint8_t i = 0;
uint8_t padd_size = 0;
- u8 *conf_levels;
- int rc;
- u32 payload_size, param_size;
+ uint32_t param_size = 0;
+ int rc = 0;
- padd_size = (4 - (client->num_confidence_levels % 4)) - 1;
- pkt_size = sizeof(*conf_params) + padd_size +
- client->num_confidence_levels;
+ /* Data must be 4 byte alligned so add any necessary padding. */
+ padd_size = (4 - (num_conf_levels % 4)) - 1;
+ param_size = (sizeof(uint8_t) + num_conf_levels + padd_size) *
+ sizeof(uint8_t);
+ param_info->param_size = param_size;
+ pr_debug("%s: Set Conf Levels PARAM SIZE = %d\n", __func__, param_size);
- packet = kzalloc(pkt_size, GFP_KERNEL);
- if (!packet) {
- pr_err("%s: no memory for confidence level, size = %zd\n",
- __func__, pkt_size);
+ conf_levels = kzalloc(param_size, GFP_KERNEL);
+ if (!conf_levels)
return -ENOMEM;
- }
- conf_params = (struct lsm_cmd_set_params_conf *) packet;
- conf_levels = (u8 *) (packet + sizeof(*conf_params));
- msg_hdr = &conf_params->msg_hdr;
- q6lsm_add_hdr(client, msg_hdr,
- pkt_size, true);
- msg_hdr->opcode = set_param_opcode;
- payload_size = pkt_size - sizeof(*msg_hdr) -
- sizeof(conf_params->params_hdr);
- q6lsm_set_param_hdr_info(&conf_params->params_hdr,
- payload_size, 0, 0, 0);
- cfl = &conf_params->conf_payload;
- param_size = ((sizeof(uint8_t) + padd_size +
- client->num_confidence_levels)) *
- sizeof(uint8_t);
- q6lsm_set_param_common(&cfl->common, ids,
- param_size, set_param_opcode);
- cfl->num_confidence_levels = client->num_confidence_levels;
-
- pr_debug("%s: CMD PARAM SIZE = %d\n",
- __func__, param_size);
- pr_debug("%s: Num conf_level = %d\n",
- __func__, client->num_confidence_levels);
-
- memcpy(conf_levels, client->confidence_levels,
- client->num_confidence_levels);
- for (i = 0; i < client->num_confidence_levels; i++)
- pr_debug("%s: Confidence_level[%d] = %d\n",
- __func__, i, conf_levels[i]);
+ conf_levels->num_confidence_levels = num_conf_levels;
+ pr_debug("%s: Num conf_level = %d\n", __func__, num_conf_levels);
- rc = q6lsm_apr_send_pkt(client, client->apr,
- packet, true, NULL);
+ memcpy(conf_levels->confidence_levels, client->confidence_levels,
+ num_conf_levels);
+ for (i = 0; i < num_conf_levels; i++)
+ pr_debug("%s: Confidence_level[%d] = %d\n", __func__, i,
+ conf_levels->confidence_levels[i]);
+
+ rc = q6lsm_pack_and_set_params(client, param_info,
+ (uint8_t *) conf_levels,
+ set_param_opcode);
if (rc)
- pr_err("%s: confidence_levels cmd failed, err = %d\n",
- __func__, rc);
- kfree(packet);
+ pr_err("%s: Send confidence_levels cmd failed, err = %d\n",
+ __func__, rc);
+ kfree(conf_levels);
return rc;
}
static int q6lsm_send_param_opmode(struct lsm_client *client,
- struct lsm_module_param_ids *opmode_ids,
- u32 set_param_opcode)
+ struct param_hdr_v3 *param_info,
+ u32 set_param_opcode)
{
- int rc;
- struct lsm_cmd_set_params_opmode opmode_params;
- struct apr_hdr *msg_hdr;
-
- struct lsm_param_op_mode *op_mode;
- u32 data_payload_size, param_size;
-
- msg_hdr = &opmode_params.msg_hdr;
- q6lsm_add_hdr(client, msg_hdr,
- sizeof(opmode_params), true);
- msg_hdr->opcode = set_param_opcode;
- data_payload_size = sizeof(opmode_params) -
- sizeof(*msg_hdr) -
- sizeof(opmode_params.params_hdr);
- q6lsm_set_param_hdr_info(&opmode_params.params_hdr,
- data_payload_size, 0, 0, 0);
- op_mode = &opmode_params.op_mode;
-
-
- param_size = sizeof(struct lsm_param_op_mode) -
- sizeof(op_mode->common);
- q6lsm_set_param_common(&op_mode->common,
- opmode_ids, param_size,
- set_param_opcode);
- op_mode->minor_version = QLSM_PARAM_ID_MINOR_VERSION;
- op_mode->mode = client->mode;
- op_mode->reserved = 0;
- pr_debug("%s: mode = 0x%x", __func__, op_mode->mode);
+ struct lsm_param_op_mode op_mode = {0};
+ int rc = 0;
- rc = q6lsm_apr_send_pkt(client, client->apr,
- &opmode_params, true, NULL);
+ param_info->param_size = sizeof(op_mode);
+
+ op_mode.minor_version = QLSM_PARAM_ID_MINOR_VERSION;
+ op_mode.mode = client->mode;
+ pr_debug("%s: mode = 0x%x", __func__, op_mode.mode);
+
+ rc = q6lsm_pack_and_set_params(client, param_info, (uint8_t *) &op_mode,
+ set_param_opcode);
if (rc)
- pr_err("%s: Failed set_params opcode 0x%x, rc %d\n",
- __func__, msg_hdr->opcode, rc);
+ pr_err("%s: Failed set_params, rc %d\n", __func__, rc);
pr_debug("%s: leave %d\n", __func__, rc);
return rc;
@@ -764,138 +871,81 @@ int get_lsm_port(void)
int q6lsm_set_port_connected(struct lsm_client *client)
{
- int rc;
- struct lsm_cmd_set_connectport connectport;
- struct lsm_module_param_ids connectport_ids;
- struct apr_hdr *msg_hdr;
- struct lsm_param_connect_to_port *connect_to_port;
- u32 data_payload_size, param_size, set_param_opcode;
+ struct lsm_param_connect_to_port connect_port = {0};
+ struct param_hdr_v3 connectport_hdr = {0};
+ u32 set_param_opcode = 0;
+ int rc = 0;
if (client->use_topology) {
set_param_opcode = LSM_SESSION_CMD_SET_PARAMS_V2;
- connectport_ids.module_id = LSM_MODULE_ID_FRAMEWORK;
- connectport_ids.param_id = LSM_PARAM_ID_CONNECT_TO_PORT;
+ connectport_hdr.module_id = LSM_MODULE_ID_FRAMEWORK;
} else {
set_param_opcode = LSM_SESSION_CMD_SET_PARAMS;
- connectport_ids.module_id = LSM_MODULE_ID_VOICE_WAKEUP;
- connectport_ids.param_id = LSM_PARAM_ID_CONNECT_TO_PORT;
+ connectport_hdr.module_id = LSM_MODULE_ID_VOICE_WAKEUP;
}
- client->connect_to_port = get_lsm_port();
+ connectport_hdr.instance_id = INSTANCE_ID_0;
+ connectport_hdr.param_id = LSM_PARAM_ID_CONNECT_TO_PORT;
+ connectport_hdr.param_size = sizeof(connect_port);
- msg_hdr = &connectport.msg_hdr;
- q6lsm_add_hdr(client, msg_hdr,
- sizeof(connectport), true);
- msg_hdr->opcode = set_param_opcode;
- data_payload_size = sizeof(connectport) -
- sizeof(*msg_hdr) -
- sizeof(connectport.params_hdr);
- q6lsm_set_param_hdr_info(&connectport.params_hdr,
- data_payload_size, 0, 0, 0);
- connect_to_port = &connectport.connect_to_port;
-
- param_size = (sizeof(struct lsm_param_connect_to_port) -
- sizeof(connect_to_port->common));
- q6lsm_set_param_common(&connect_to_port->common,
- &connectport_ids, param_size,
- set_param_opcode);
- connect_to_port->minor_version = QLSM_PARAM_ID_MINOR_VERSION;
- connect_to_port->port_id = client->connect_to_port;
- connect_to_port->reserved = 0;
- pr_debug("%s: port= %d", __func__, connect_to_port->port_id);
+ client->connect_to_port = get_lsm_port();
+ connect_port.minor_version = QLSM_PARAM_ID_MINOR_VERSION;
+ connect_port.port_id = client->connect_to_port;
- rc = q6lsm_apr_send_pkt(client, client->apr,
- &connectport, true, NULL);
+ rc = q6lsm_pack_and_set_params(client, &connectport_hdr,
+ (uint8_t *) &connect_port,
+ set_param_opcode);
if (rc)
- pr_err("%s: Failed set_params opcode 0x%x, rc %d\n",
- __func__, msg_hdr->opcode, rc);
-
+ pr_err("%s: Failed set_params, rc %d\n", __func__, rc);
return rc;
}
+
static int q6lsm_send_param_polling_enable(struct lsm_client *client,
- bool poll_en,
- struct lsm_module_param_ids *poll_enable_ids,
- u32 set_param_opcode)
+ bool poll_en,
+ struct param_hdr_v3 *param_info,
+ u32 set_param_opcode)
{
+ struct lsm_param_poll_enable polling_enable = {0};
int rc = 0;
- struct lsm_cmd_poll_enable cmd;
- struct apr_hdr *msg_hdr;
- struct lsm_param_poll_enable *poll_enable;
- u32 data_payload_size, param_size;
-
- msg_hdr = &cmd.msg_hdr;
- q6lsm_add_hdr(client, msg_hdr,
- sizeof(struct lsm_cmd_poll_enable), true);
- msg_hdr->opcode = set_param_opcode;
- data_payload_size = sizeof(struct lsm_cmd_poll_enable) -
- sizeof(struct apr_hdr) -
- sizeof(struct lsm_set_params_hdr);
- q6lsm_set_param_hdr_info(&cmd.params_hdr,
- data_payload_size, 0, 0, 0);
- poll_enable = &cmd.poll_enable;
-
- param_size = (sizeof(struct lsm_param_poll_enable) -
- sizeof(poll_enable->common));
- q6lsm_set_param_common(&poll_enable->common,
- poll_enable_ids, param_size,
- set_param_opcode);
- poll_enable->minor_version = QLSM_PARAM_ID_MINOR_VERSION;
- poll_enable->polling_enable = (poll_en) ? 1 : 0;
- pr_debug("%s: poll enable= %d", __func__, poll_enable->polling_enable);
- rc = q6lsm_apr_send_pkt(client, client->apr,
- &cmd, true, NULL);
- if (rc)
- pr_err("%s: Failed set_params opcode 0x%x, rc %d\n",
- __func__, msg_hdr->opcode, rc);
+ param_info->param_size = sizeof(polling_enable);
+
+ polling_enable.minor_version = QLSM_PARAM_ID_MINOR_VERSION;
+ polling_enable.polling_enable = (poll_en) ? 1 : 0;
+ rc = q6lsm_pack_and_set_params(client, param_info,
+ (uint8_t *) &polling_enable,
+ set_param_opcode);
+ if (rc)
+ pr_err("%s: Failed set_params, rc %d\n", __func__, rc);
return rc;
}
int q6lsm_set_fwk_mode_cfg(struct lsm_client *client,
uint32_t event_mode)
{
+ struct lsm_param_fwk_mode_cfg fwk_mode_cfg = {0};
+ struct param_hdr_v3 fwk_mode_cfg_hdr = {0};
int rc = 0;
- struct lsm_cmd_set_fwk_mode_cfg cmd;
- struct lsm_module_param_ids fwk_mode_cfg_ids;
- struct apr_hdr *msg_hdr;
- struct lsm_param_fwk_mode_cfg *fwk_mode_cfg;
- u32 data_payload_size, param_size, set_param_opcode;
- if (client->use_topology) {
- set_param_opcode = LSM_SESSION_CMD_SET_PARAMS_V2;
- fwk_mode_cfg_ids.module_id = LSM_MODULE_ID_FRAMEWORK;
- fwk_mode_cfg_ids.param_id = LSM_PARAM_ID_FWK_MODE_CONFIG;
- } else {
+ if (!client->use_topology) {
pr_debug("%s: Ignore sending event mode\n", __func__);
return rc;
}
- msg_hdr = &cmd.msg_hdr;
- q6lsm_add_hdr(client, msg_hdr,
- sizeof(struct lsm_cmd_set_fwk_mode_cfg), true);
- msg_hdr->opcode = set_param_opcode;
- data_payload_size = sizeof(struct lsm_cmd_set_fwk_mode_cfg) -
- sizeof(struct apr_hdr) -
- sizeof(struct lsm_set_params_hdr);
- q6lsm_set_param_hdr_info(&cmd.params_hdr,
- data_payload_size, 0, 0, 0);
- fwk_mode_cfg = &cmd.fwk_mode_cfg;
-
- param_size = (sizeof(struct lsm_param_fwk_mode_cfg) -
- sizeof(fwk_mode_cfg->common));
- q6lsm_set_param_common(&fwk_mode_cfg->common,
- &fwk_mode_cfg_ids, param_size,
- set_param_opcode);
+ fwk_mode_cfg_hdr.module_id = LSM_MODULE_ID_FRAMEWORK;
+ fwk_mode_cfg_hdr.instance_id = INSTANCE_ID_0;
+ fwk_mode_cfg_hdr.param_id = LSM_PARAM_ID_FWK_MODE_CONFIG;
+ fwk_mode_cfg_hdr.param_size = sizeof(fwk_mode_cfg);
- fwk_mode_cfg->minor_version = QLSM_PARAM_ID_MINOR_VERSION;
- fwk_mode_cfg->mode = event_mode;
- pr_debug("%s: mode = %d\n", __func__, fwk_mode_cfg->mode);
+ fwk_mode_cfg.minor_version = QLSM_PARAM_ID_MINOR_VERSION;
+ fwk_mode_cfg.mode = event_mode;
+ pr_debug("%s: mode = %d\n", __func__, fwk_mode_cfg.mode);
- rc = q6lsm_apr_send_pkt(client, client->apr,
- &cmd, true, NULL);
+ rc = q6lsm_pack_and_set_params(client, &fwk_mode_cfg_hdr,
+ (uint8_t *) &fwk_mode_cfg,
+ LSM_SESSION_CMD_SET_PARAMS_V2);
if (rc)
- pr_err("%s: Failed set_params opcode 0x%x, rc %d\n",
- __func__, msg_hdr->opcode, rc);
+ pr_err("%s: Failed set_params, rc %d\n", __func__, rc);
return rc;
}
@@ -935,58 +985,38 @@ static int q6lsm_arrange_mch_map(struct lsm_param_media_fmt *media_fmt,
int q6lsm_set_media_fmt_params(struct lsm_client *client)
{
- int rc = 0;
- struct lsm_cmd_set_media_fmt cmd;
- struct lsm_module_param_ids media_fmt_ids;
- struct apr_hdr *msg_hdr;
- struct lsm_param_media_fmt *media_fmt;
- u32 data_payload_size, param_size, set_param_opcode;
+ struct lsm_param_media_fmt media_fmt = {0};
struct lsm_hw_params param = client->hw_params;
+ struct param_hdr_v3 media_fmt_hdr = {0};
+ int rc = 0;
- if (client->use_topology) {
- set_param_opcode = LSM_SESSION_CMD_SET_PARAMS_V2;
- media_fmt_ids.module_id = LSM_MODULE_ID_FRAMEWORK;
- media_fmt_ids.param_id = LSM_PARAM_ID_MEDIA_FMT;
- } else {
+ if (!client->use_topology) {
pr_debug("%s: Ignore sending media format\n", __func__);
goto err_ret;
}
- msg_hdr = &cmd.msg_hdr;
- q6lsm_add_hdr(client, msg_hdr,
- sizeof(struct lsm_cmd_set_media_fmt), true);
- msg_hdr->opcode = set_param_opcode;
- data_payload_size = sizeof(struct lsm_cmd_set_media_fmt) -
- sizeof(struct apr_hdr) -
- sizeof(struct lsm_set_params_hdr);
- q6lsm_set_param_hdr_info(&cmd.params_hdr,
- data_payload_size, 0, 0, 0);
- media_fmt = &cmd.media_fmt;
-
- param_size = (sizeof(struct lsm_param_media_fmt) -
- sizeof(media_fmt->common));
- q6lsm_set_param_common(&media_fmt->common,
- &media_fmt_ids, param_size,
- set_param_opcode);
+ media_fmt_hdr.module_id = LSM_MODULE_ID_FRAMEWORK;
+ media_fmt_hdr.instance_id = INSTANCE_ID_0;
+ media_fmt_hdr.param_id = LSM_PARAM_ID_MEDIA_FMT;
+ media_fmt_hdr.param_size = sizeof(media_fmt);
- media_fmt->minor_version = QLSM_PARAM_ID_MINOR_VERSION_2;
- media_fmt->sample_rate = param.sample_rate;
- media_fmt->num_channels = param.num_chs;
- media_fmt->bit_width = param.sample_size;
-
- rc = q6lsm_arrange_mch_map(media_fmt, media_fmt->num_channels);
+ media_fmt.minor_version = QLSM_PARAM_ID_MINOR_VERSION_2;
+ media_fmt.sample_rate = param.sample_rate;
+ media_fmt.num_channels = param.num_chs;
+ media_fmt.bit_width = param.sample_size;
+ rc = q6lsm_arrange_mch_map(&media_fmt, media_fmt.num_channels);
if (rc)
goto err_ret;
- pr_debug("%s: sample rate= %d, channels %d bit width %d\n",
- __func__, media_fmt->sample_rate, media_fmt->num_channels,
- media_fmt->bit_width);
+ pr_debug("%s: sample rate= %d, channels %d bit width %d\n", __func__,
+ media_fmt.sample_rate, media_fmt.num_channels,
+ media_fmt.bit_width);
- rc = q6lsm_apr_send_pkt(client, client->apr,
- &cmd, true, NULL);
+ rc = q6lsm_pack_and_set_params(client, &media_fmt_hdr,
+ (uint8_t *) &media_fmt,
+ LSM_SESSION_CMD_SET_PARAMS_V2);
if (rc)
- pr_err("%s: Failed set_params opcode 0x%x, rc %d\n",
- __func__, msg_hdr->opcode, rc);
+ pr_err("%s: Failed set_params, rc %d\n", __func__, rc);
err_ret:
return rc;
}
@@ -995,9 +1025,8 @@ int q6lsm_set_data(struct lsm_client *client,
enum lsm_detection_mode mode,
bool detectfailure)
{
+ struct param_hdr_v3 param_hdr = {0};
int rc = 0;
- struct lsm_module_param_ids opmode_ids;
- struct lsm_module_param_ids conf_levels_ids;
if (!client->confidence_levels) {
/*
@@ -1021,22 +1050,20 @@ int q6lsm_set_data(struct lsm_client *client,
}
client->mode |= detectfailure << 2;
- opmode_ids.module_id = LSM_MODULE_ID_VOICE_WAKEUP;
- opmode_ids.param_id = LSM_PARAM_ID_OPERATION_MODE;
-
- rc = q6lsm_send_param_opmode(client, &opmode_ids,
- LSM_SESSION_CMD_SET_PARAMS);
+ param_hdr.module_id = LSM_MODULE_ID_VOICE_WAKEUP;
+ param_hdr.instance_id = INSTANCE_ID_0;
+ param_hdr.param_id = LSM_PARAM_ID_OPERATION_MODE;
+ rc = q6lsm_send_param_opmode(client, &param_hdr,
+ LSM_SESSION_CMD_SET_PARAMS);
if (rc) {
pr_err("%s: Failed to set lsm config params %d\n",
__func__, rc);
goto err_ret;
}
- conf_levels_ids.module_id = LSM_MODULE_ID_VOICE_WAKEUP;
- conf_levels_ids.param_id = LSM_PARAM_ID_MIN_CONFIDENCE_LEVELS;
-
- rc = q6lsm_send_confidence_levels(client, &conf_levels_ids,
- LSM_SESSION_CMD_SET_PARAMS);
+ param_hdr.param_id = LSM_PARAM_ID_MIN_CONFIDENCE_LEVELS;
+ rc = q6lsm_send_confidence_levels(client, &param_hdr,
+ LSM_SESSION_CMD_SET_PARAMS);
if (rc) {
pr_err("%s: Failed to send conf_levels, err = %d\n",
__func__, rc);
@@ -1226,9 +1253,7 @@ static int q6lsm_send_cal(struct lsm_client *client,
u32 set_params_opcode)
{
int rc = 0;
- struct lsm_cmd_set_params params;
- struct lsm_set_params_hdr *params_hdr = &params.param_hdr;
- struct apr_hdr *msg_hdr = &params.msg_hdr;
+ struct mem_mapping_hdr mem_hdr = {0};
struct cal_block_data *cal_block = NULL;
pr_debug("%s: Session id %d\n", __func__, client->session);
@@ -1258,21 +1283,16 @@ static int q6lsm_send_cal(struct lsm_client *client,
}
/* Cache mmap address, only map once or if new addr */
lsm_common.common_client[client->session].session = client->session;
- q6lsm_add_hdr(client, msg_hdr, sizeof(params), true);
- msg_hdr->opcode = set_params_opcode;
- q6lsm_set_param_hdr_info(params_hdr,
- cal_block->cal_data.size,
- lower_32_bits(client->lsm_cal_phy_addr),
- msm_audio_populate_upper_32_bits(
- client->lsm_cal_phy_addr),
- client->sound_model.mem_map_handle);
-
- pr_debug("%s: Cal Size = %zd", __func__,
- cal_block->cal_data.size);
- rc = q6lsm_apr_send_pkt(client, client->apr, &params, true, NULL);
+ mem_hdr.data_payload_addr_lsw = lower_32_bits(client->lsm_cal_phy_addr);
+ mem_hdr.data_payload_addr_msw =
+ msm_audio_populate_upper_32_bits(client->lsm_cal_phy_addr);
+ mem_hdr.mem_map_handle = client->sound_model.mem_map_handle;
+
+ pr_debug("%s: Cal Size = %zd", __func__, cal_block->cal_data.size);
+ rc = q6lsm_set_params(client, &mem_hdr, NULL, cal_block->cal_data.size,
+ set_params_opcode);
if (rc)
- pr_err("%s: Failed set_params opcode 0x%x, rc %d\n",
- __func__, msg_hdr->opcode, rc);
+ pr_err("%s: Failed set_params, rc %d\n", __func__, rc);
unlock:
mutex_unlock(&lsm_common.cal_data[LSM_CAL_IDX]->lock);
done:
@@ -1444,7 +1464,7 @@ int q6lsm_snd_model_buf_alloc(struct lsm_client *client, size_t len,
* set_param payload as well.
*/
if (allocate_module_data)
- len += sizeof(struct lsm_param_payload_common);
+ len += sizeof(union param_hdrs);
client->sound_model.size = len;
pad_zero = (LSM_ALIGN_BOUNDARY -
@@ -1539,66 +1559,44 @@ static int q6lsm_cmd(struct lsm_client *client, int opcode, bool wait)
return rc;
}
-static int q6lsm_send_param_epd_thres(
- struct lsm_client *client,
- void *data, struct lsm_module_param_ids *ids)
+static int q6lsm_send_param_epd_thres(struct lsm_client *client, void *data,
+ struct param_hdr_v3 *param_info)
{
- struct snd_lsm_ep_det_thres *ep_det_data;
- struct lsm_cmd_set_epd_threshold epd_cmd;
- struct apr_hdr *msg_hdr = &epd_cmd.msg_hdr;
- struct lsm_set_params_hdr *param_hdr =
- &epd_cmd.param_hdr;
- struct lsm_param_epd_thres *epd_thres =
- &epd_cmd.epd_thres;
- int rc;
+ struct snd_lsm_ep_det_thres *ep_det_data = NULL;
+ struct lsm_param_epd_thres epd_thres = {0};
+ int rc = 0;
+
+ param_info->param_size = sizeof(epd_thres);
ep_det_data = (struct snd_lsm_ep_det_thres *) data;
- q6lsm_add_hdr(client, msg_hdr,
- sizeof(epd_cmd), true);
- msg_hdr->opcode = LSM_SESSION_CMD_SET_PARAMS_V2;
- q6lsm_set_param_hdr_info(param_hdr,
- sizeof(*epd_thres), 0, 0, 0);
- q6lsm_set_param_common(&epd_thres->common, ids,
- sizeof(*epd_thres) - sizeof(epd_thres->common),
- LSM_SESSION_CMD_SET_PARAMS_V2);
- epd_thres->minor_version = QLSM_PARAM_ID_MINOR_VERSION;
- epd_thres->epd_begin = ep_det_data->epd_begin;
- epd_thres->epd_end = ep_det_data->epd_end;
+ epd_thres.minor_version = QLSM_PARAM_ID_MINOR_VERSION;
+ epd_thres.epd_begin = ep_det_data->epd_begin;
+ epd_thres.epd_end = ep_det_data->epd_end;
- rc = q6lsm_apr_send_pkt(client, client->apr,
- &epd_cmd, true, NULL);
+ rc = q6lsm_pack_and_set_params(client, param_info,
+ (uint8_t *) &epd_thres,
+ LSM_SESSION_CMD_SET_PARAMS_V2);
if (unlikely(rc))
- pr_err("%s: EPD_THRESHOLD failed, rc %d\n",
- __func__, rc);
+ pr_err("%s: EPD_THRESHOLD failed, rc %d\n", __func__, rc);
return rc;
}
-static int q6lsm_send_param_gain(
- struct lsm_client *client,
- u16 gain, struct lsm_module_param_ids *ids)
+static int q6lsm_send_param_gain(struct lsm_client *client, u16 gain,
+ struct param_hdr_v3 *param_info)
{
- struct lsm_cmd_set_gain lsm_cmd_gain;
- struct apr_hdr *msg_hdr = &lsm_cmd_gain.msg_hdr;
- struct lsm_param_gain *lsm_gain = &lsm_cmd_gain.lsm_gain;
- int rc;
+ struct lsm_param_gain lsm_gain = {0};
+ int rc = 0;
- q6lsm_add_hdr(client, msg_hdr,
- sizeof(lsm_cmd_gain), true);
- msg_hdr->opcode = LSM_SESSION_CMD_SET_PARAMS_V2;
- q6lsm_set_param_hdr_info(&lsm_cmd_gain.param_hdr,
- sizeof(*lsm_gain), 0, 0, 0);
- q6lsm_set_param_common(&lsm_gain->common, ids,
- sizeof(*lsm_gain) - sizeof(lsm_gain->common),
- LSM_SESSION_CMD_SET_PARAMS_V2);
- lsm_gain->minor_version = QLSM_PARAM_ID_MINOR_VERSION;
- lsm_gain->gain = gain;
- lsm_gain->reserved = 0;
+ param_info->param_size = sizeof(lsm_gain);
- rc = q6lsm_apr_send_pkt(client, client->apr,
- &lsm_cmd_gain, true, NULL);
+ lsm_gain.minor_version = QLSM_PARAM_ID_MINOR_VERSION;
+ lsm_gain.gain = gain;
+
+ rc = q6lsm_pack_and_set_params(client, param_info,
+ (uint8_t *) &lsm_gain,
+ LSM_SESSION_CMD_SET_PARAMS_V2);
if (unlikely(rc))
- pr_err("%s: LSM_GAIN CMD send failed, rc %d\n",
- __func__, rc);
+ pr_err("%s: LSM_GAIN CMD send failed, rc %d\n", __func__, rc);
return rc;
}
@@ -1606,23 +1604,23 @@ int q6lsm_set_one_param(struct lsm_client *client,
struct lsm_params_info *p_info, void *data,
uint32_t param_type)
{
- int rc = 0, pkt_sz;
- struct lsm_module_param_ids ids;
- u8 *packet;
+ struct param_hdr_v3 param_info = {0};
+ int rc = 0;
- memset(&ids, 0, sizeof(ids));
switch (param_type) {
case LSM_ENDPOINT_DETECT_THRESHOLD: {
- ids.module_id = p_info->module_id;
- ids.param_id = p_info->param_id;
- rc = q6lsm_send_param_epd_thres(client, data,
- &ids);
+ param_info.module_id = p_info->module_id;
+ param_info.instance_id = INSTANCE_ID_0;
+ param_info.param_id = p_info->param_id;
+ rc = q6lsm_send_param_epd_thres(client, data, &param_info);
+ if (rc)
+ pr_err("%s: LSM_ENDPOINT_DETECT_THRESHOLD failed, rc %d\n",
+ __func__, rc);
break;
}
case LSM_OPERATION_MODE: {
struct snd_lsm_detect_mode *det_mode = data;
- struct lsm_module_param_ids opmode_ids;
if (det_mode->mode == LSM_MODE_KEYWORD_ONLY_DETECTION) {
client->mode = 0x01;
@@ -1636,11 +1634,12 @@ int q6lsm_set_one_param(struct lsm_client *client,
client->mode |= det_mode->detect_failure << 2;
- opmode_ids.module_id = p_info->module_id;
- opmode_ids.param_id = p_info->param_id;
+ param_info.module_id = p_info->module_id;
+ param_info.instance_id = INSTANCE_ID_0;
+ param_info.param_id = p_info->param_id;
- rc = q6lsm_send_param_opmode(client, &opmode_ids,
- LSM_SESSION_CMD_SET_PARAMS_V2);
+ rc = q6lsm_send_param_opmode(client, &param_info,
+ LSM_SESSION_CMD_SET_PARAMS_V2);
if (rc)
pr_err("%s: OPERATION_MODE failed, rc %d\n",
__func__, rc);
@@ -1649,9 +1648,10 @@ int q6lsm_set_one_param(struct lsm_client *client,
case LSM_GAIN: {
struct snd_lsm_gain *lsm_gain = (struct snd_lsm_gain *) data;
- ids.module_id = p_info->module_id;
- ids.param_id = p_info->param_id;
- rc = q6lsm_send_param_gain(client, lsm_gain->gain, &ids);
+ param_info.module_id = p_info->module_id;
+ param_info.instance_id = INSTANCE_ID_0;
+ param_info.param_id = p_info->param_id;
+ rc = q6lsm_send_param_gain(client, lsm_gain->gain, &param_info);
if (rc)
pr_err("%s: LSM_GAIN command failed, rc %d\n",
__func__, rc);
@@ -1659,10 +1659,11 @@ int q6lsm_set_one_param(struct lsm_client *client,
}
case LSM_MIN_CONFIDENCE_LEVELS:
- ids.module_id = p_info->module_id;
- ids.param_id = p_info->param_id;
- rc = q6lsm_send_confidence_levels(client, &ids,
- LSM_SESSION_CMD_SET_PARAMS_V2);
+ param_info.module_id = p_info->module_id;
+ param_info.instance_id = INSTANCE_ID_0;
+ param_info.param_id = p_info->param_id;
+ rc = q6lsm_send_confidence_levels(
+ client, &param_info, LSM_SESSION_CMD_SET_PARAMS_V2);
if (rc)
pr_err("%s: CONFIDENCE_LEVELS cmd failed, rc %d\n",
__func__, rc);
@@ -1670,11 +1671,12 @@ int q6lsm_set_one_param(struct lsm_client *client,
case LSM_POLLING_ENABLE: {
struct snd_lsm_poll_enable *lsm_poll_enable =
(struct snd_lsm_poll_enable *) data;
- ids.module_id = p_info->module_id;
- ids.param_id = p_info->param_id;
- rc = q6lsm_send_param_polling_enable(client,
- lsm_poll_enable->poll_en, &ids,
- LSM_SESSION_CMD_SET_PARAMS_V2);
+ param_info.module_id = p_info->module_id;
+ param_info.instance_id = INSTANCE_ID_0;
+ param_info.param_id = p_info->param_id;
+ rc = q6lsm_send_param_polling_enable(
+ client, lsm_poll_enable->poll_en, &param_info,
+ LSM_SESSION_CMD_SET_PARAMS_V2);
if (rc)
pr_err("%s: POLLING ENABLE cmd failed, rc %d\n",
__func__, rc);
@@ -1682,24 +1684,25 @@ int q6lsm_set_one_param(struct lsm_client *client,
}
case LSM_REG_SND_MODEL: {
- struct lsm_cmd_set_params model_param;
+ struct mem_mapping_hdr mem_hdr = {0};
u32 payload_size;
- memset(&model_param, 0, sizeof(model_param));
- q6lsm_add_hdr(client, &model_param.msg_hdr,
- sizeof(model_param), true);
- model_param.msg_hdr.opcode = LSM_SESSION_CMD_SET_PARAMS_V2;
- payload_size = p_info->param_size +
- sizeof(struct lsm_param_payload_common);
- q6lsm_set_param_hdr_info(&model_param.param_hdr,
- payload_size,
- lower_32_bits(client->sound_model.phys),
- msm_audio_populate_upper_32_bits(
- client->sound_model.phys),
- client->sound_model.mem_map_handle);
-
- rc = q6lsm_apr_send_pkt(client, client->apr,
- &model_param, true, NULL);
+ if (q6common_is_instance_id_supported())
+ payload_size = p_info->param_size +
+ sizeof(struct param_hdr_v3);
+ else
+ payload_size = p_info->param_size +
+ sizeof(struct param_hdr_v2);
+
+ mem_hdr.data_payload_addr_lsw =
+ lower_32_bits(client->sound_model.phys);
+ mem_hdr.data_payload_addr_msw =
+ msm_audio_populate_upper_32_bits(
+ client->sound_model.phys),
+ mem_hdr.mem_map_handle = client->sound_model.mem_map_handle;
+
+ rc = q6lsm_set_params(client, &mem_hdr, NULL, payload_size,
+ LSM_SESSION_CMD_SET_PARAMS_V2);
if (rc) {
pr_err("%s: REG_SND_MODEL failed, rc %d\n",
__func__, rc);
@@ -1714,69 +1717,33 @@ int q6lsm_set_one_param(struct lsm_client *client,
}
case LSM_DEREG_SND_MODEL: {
- struct lsm_param_payload_common *common;
- struct lsm_cmd_set_params *param;
-
- pkt_sz = sizeof(*param) + sizeof(*common);
- packet = kzalloc(pkt_sz, GFP_KERNEL);
- if (!packet) {
- pr_err("%s: No memory for DEREG_SND_MODEL pkt, size = %d\n",
- __func__, pkt_sz);
- return -ENOMEM;
- }
-
- param = (struct lsm_cmd_set_params *) packet;
- common = (struct lsm_param_payload_common *)
- (packet + sizeof(*param));
- q6lsm_add_hdr(client, &param->msg_hdr, pkt_sz, true);
- param->msg_hdr.opcode = LSM_SESSION_CMD_SET_PARAMS_V2;
- q6lsm_set_param_hdr_info(&param->param_hdr,
- sizeof(*common),
- 0, 0, 0);
- ids.module_id = p_info->module_id;
- ids.param_id = p_info->param_id;
- q6lsm_set_param_common(common, &ids, 0,
- LSM_SESSION_CMD_SET_PARAMS_V2);
- rc = q6lsm_apr_send_pkt(client, client->apr,
- packet, true, NULL);
+ param_info.module_id = p_info->module_id;
+ param_info.instance_id = INSTANCE_ID_0;
+ param_info.param_id = p_info->param_id;
+ param_info.param_size = 0;
+ rc = q6lsm_pack_and_set_params(client, &param_info, NULL,
+ LSM_SESSION_CMD_SET_PARAMS_V2);
if (rc)
pr_err("%s: DEREG_SND_MODEL failed, rc %d\n",
__func__, rc);
- kfree(packet);
break;
}
case LSM_CUSTOM_PARAMS: {
- struct apr_hdr *hdr;
- u8 *custom_data;
+ u32 param_size = p_info->param_size;
- if (p_info->param_size <
- sizeof(struct lsm_param_payload_common)) {
- pr_err("%s: Invalid param_size %d\n",
- __func__, p_info->param_size);
+ /* Check minimum size, V2 structure is smaller than V3 */
+ if (param_size < sizeof(struct param_hdr_v2)) {
+ pr_err("%s: Invalid param_size %d\n", __func__,
+ param_size);
return -EINVAL;
}
- pkt_sz = p_info->param_size + sizeof(*hdr);
- packet = kzalloc(pkt_sz, GFP_KERNEL);
- if (!packet) {
- pr_err("%s: no memory for CUSTOM_PARAMS, size = %d\n",
- __func__, pkt_sz);
- return -ENOMEM;
- }
-
- hdr = (struct apr_hdr *) packet;
- custom_data = (u8 *) (packet + sizeof(*hdr));
- q6lsm_add_hdr(client, hdr, pkt_sz, true);
- hdr->opcode = LSM_SESSION_CMD_SET_PARAMS_V2;
- memcpy(custom_data, data, p_info->param_size);
-
- rc = q6lsm_apr_send_pkt(client, client->apr,
- packet, true, NULL);
+ rc = q6lsm_set_params(client, NULL, data, param_size,
+ LSM_SESSION_CMD_SET_PARAMS_V2);
if (rc)
pr_err("%s: CUSTOM_PARAMS failed, rc %d\n",
__func__, rc);
- kfree(packet);
break;
}
default:
@@ -1805,60 +1772,51 @@ int q6lsm_close(struct lsm_client *client)
int q6lsm_lab_control(struct lsm_client *client, u32 enable)
{
+ struct lsm_param_lab_enable lab_enable = {0};
+ struct param_hdr_v3 lab_enable_hdr = {0};
+ struct lsm_param_lab_config lab_config = {0};
+ struct param_hdr_v3 lab_config_hdr = {0};
int rc = 0;
- struct lsm_params_lab_enable lab_enable;
- struct lsm_params_lab_config lab_config;
- struct lsm_module_param_ids lab_ids;
- u32 param_size;
if (!client) {
pr_err("%s: invalid param client %pK\n", __func__, client);
return -EINVAL;
}
+
/* enable/disable lab on dsp */
- q6lsm_add_hdr(client, &lab_enable.msg_hdr, sizeof(lab_enable), true);
- lab_enable.msg_hdr.opcode = LSM_SESSION_CMD_SET_PARAMS;
- q6lsm_set_param_hdr_info(&lab_enable.params_hdr,
- sizeof(struct lsm_lab_enable),
- 0, 0, 0);
- param_size = (sizeof(struct lsm_lab_enable) -
- sizeof(struct lsm_param_payload_common));
- lab_ids.module_id = LSM_MODULE_ID_LAB;
- lab_ids.param_id = LSM_PARAM_ID_LAB_ENABLE;
- q6lsm_set_param_common(&lab_enable.lab_enable.common,
- &lab_ids, param_size,
- LSM_SESSION_CMD_SET_PARAMS);
- lab_enable.lab_enable.enable = (enable) ? 1 : 0;
- rc = q6lsm_apr_send_pkt(client, client->apr, &lab_enable, true, NULL);
+ lab_enable_hdr.module_id = LSM_MODULE_ID_LAB;
+ lab_enable_hdr.instance_id = INSTANCE_ID_0;
+ lab_enable_hdr.param_id = LSM_PARAM_ID_LAB_ENABLE;
+ lab_enable_hdr.param_size = sizeof(lab_enable);
+ lab_enable.enable = (enable) ? 1 : 0;
+ rc = q6lsm_pack_and_set_params(client, &lab_enable_hdr,
+ (uint8_t *) &lab_enable,
+ LSM_SESSION_CMD_SET_PARAMS);
if (rc) {
pr_err("%s: Lab enable failed rc %d\n", __func__, rc);
return rc;
}
if (!enable)
goto exit;
+
/* lab session is being enabled set the config values */
- q6lsm_add_hdr(client, &lab_config.msg_hdr, sizeof(lab_config), true);
- lab_config.msg_hdr.opcode = LSM_SESSION_CMD_SET_PARAMS;
- q6lsm_set_param_hdr_info(&lab_config.params_hdr,
- sizeof(struct lsm_lab_config),
- 0, 0, 0);
- lab_ids.module_id = LSM_MODULE_ID_LAB;
- lab_ids.param_id = LSM_PARAM_ID_LAB_CONFIG;
- param_size = (sizeof(struct lsm_lab_config) -
- sizeof(struct lsm_param_payload_common));
- q6lsm_set_param_common(&lab_config.lab_config.common,
- &lab_ids, param_size,
- LSM_SESSION_CMD_SET_PARAMS);
- lab_config.lab_config.minor_version = 1;
- lab_config.lab_config.wake_up_latency_ms = 250;
- rc = q6lsm_apr_send_pkt(client, client->apr, &lab_config, true, NULL);
+ lab_config_hdr.module_id = LSM_MODULE_ID_LAB;
+ lab_config_hdr.instance_id = INSTANCE_ID_0;
+ lab_config_hdr.param_id = LSM_PARAM_ID_LAB_CONFIG;
+ lab_config_hdr.param_size = sizeof(lab_config);
+ lab_config.minor_version = 1;
+ lab_config.wake_up_latency_ms = 250;
+ rc = q6lsm_pack_and_set_params(client, &lab_config_hdr,
+ (uint8_t *) &lab_config,
+ LSM_SESSION_CMD_SET_PARAMS);
if (rc) {
pr_err("%s: Lab config failed rc %d disable lab\n",
__func__, rc);
/* Lab config failed disable lab */
- lab_enable.lab_enable.enable = 0;
- if (q6lsm_apr_send_pkt(client, client->apr,
- &lab_enable, true, NULL))
+ lab_enable.enable = 0;
+ if (q6lsm_pack_and_set_params(client, &lab_enable_hdr,
+ (uint8_t *) &lab_enable,
+ LSM_SESSION_CMD_SET_PARAMS))
pr_err("%s: Lab disable failed\n", __func__);
}
exit:
@@ -2142,6 +2100,8 @@ static int __init q6lsm_init(void)
{
int i = 0;
pr_debug("%s:\n", __func__);
+
+ memset(&lsm_common, 0, sizeof(struct lsm_common));
spin_lock_init(&lsm_session_lock);
spin_lock_init(&mmap_lock);
mutex_init(&lsm_common.apr_lock);
diff --git a/sound/soc/msm/qdsp6v2/q6voice.c b/sound/soc/msm/qdsp6v2/q6voice.c
index 01e31578f107..a0f30a32f8e6 100644
--- a/sound/soc/msm/qdsp6v2/q6voice.c
+++ b/sound/soc/msm/qdsp6v2/q6voice.c
@@ -24,6 +24,7 @@
#include "sound/q6audio-v2.h"
#include "sound/apr_audio-v2.h"
#include "sound/q6afe-v2.h"
+#include <sound/q6common.h>
#include <sound/audio_cal_utils.h>
#include "q6voice.h"
#include <sound/adsp_err.h>
@@ -93,8 +94,9 @@ static int32_t qdsp_mvm_callback(struct apr_client_data *data, void *priv);
static int32_t qdsp_cvs_callback(struct apr_client_data *data, void *priv);
static int32_t qdsp_cvp_callback(struct apr_client_data *data, void *priv);
-static int voice_send_set_pp_enable_cmd(struct voice_data *v,
- uint32_t module_id, int enable);
+static int voice_send_set_pp_enable_cmd(
+ struct voice_data *v, struct module_instance_info mod_inst_info,
+ int enable);
static int is_cal_memory_allocated(void);
static bool is_cvd_version_queried(void);
static int is_voip_memory_allocated(void);
@@ -126,6 +128,12 @@ static int voice_send_get_sound_focus_cmd(struct voice_data *v,
struct sound_focus_param *soundFocusData);
static int voice_send_get_source_tracking_cmd(struct voice_data *v,
struct source_tracking_param *sourceTrackingData);
+static int voice_pack_and_set_cvp_param(struct voice_data *v,
+ struct param_hdr_v3 param_hdr,
+ u8 *param_data);
+static int voice_pack_and_set_cvs_ui_property(struct voice_data *v,
+ struct param_hdr_v3 param_hdr,
+ u8 *param_data);
static void voice_itr_init(struct voice_session_itr *itr,
u32 session_id)
@@ -1451,70 +1459,29 @@ fail:
return ret;
}
-static int voice_send_set_pp_enable_cmd(struct voice_data *v,
- uint32_t module_id, int enable)
+static int voice_send_set_pp_enable_cmd(
+ struct voice_data *v, struct module_instance_info mod_inst_info,
+ int enable)
{
- struct cvs_set_pp_enable_cmd cvs_set_pp_cmd;
+ struct enable_param enable_param = {0};
+ struct param_hdr_v3 param_hdr = {0};
int ret = 0;
- void *apr_cvs;
- u16 cvs_handle;
- if (v == NULL) {
- pr_err("%s: v is NULL\n", __func__);
- return -EINVAL;
- }
- apr_cvs = common.apr_q6_cvs;
+ param_hdr.module_id = mod_inst_info.module_id;
+ param_hdr.instance_id = mod_inst_info.instance_id;
+ param_hdr.param_id = VOICE_PARAM_MOD_ENABLE;
+ param_hdr.param_size = sizeof(enable_param);
+ enable_param.enable = enable ? 1 : 0;
- if (!apr_cvs) {
- pr_err("%s: apr_cvs is NULL.\n", __func__);
- return -EINVAL;
- }
- cvs_handle = voice_get_cvs_handle(v);
+ pr_debug("%s: voice_send_set_pp_enable_cmd, module_id=%d, instance_id=%d, enable=%d\n",
+ __func__, mod_inst_info.module_id, mod_inst_info.instance_id,
+ enable);
- cvs_set_pp_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
- APR_HDR_LEN(APR_HDR_SIZE),
- APR_PKT_VER);
- cvs_set_pp_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
- sizeof(cvs_set_pp_cmd) -
- APR_HDR_SIZE);
- cvs_set_pp_cmd.hdr.src_port = voice_get_idx_for_session(v->session_id);
- cvs_set_pp_cmd.hdr.dest_port = cvs_handle;
- cvs_set_pp_cmd.hdr.token = 0;
- cvs_set_pp_cmd.hdr.opcode = VSS_ICOMMON_CMD_SET_UI_PROPERTY;
-
- cvs_set_pp_cmd.vss_set_pp.module_id = module_id;
- cvs_set_pp_cmd.vss_set_pp.param_id = VOICE_PARAM_MOD_ENABLE;
- cvs_set_pp_cmd.vss_set_pp.param_size = MOD_ENABLE_PARAM_LEN;
- cvs_set_pp_cmd.vss_set_pp.reserved = 0;
- cvs_set_pp_cmd.vss_set_pp.enable = enable;
- cvs_set_pp_cmd.vss_set_pp.reserved_field = 0;
- pr_debug("voice_send_set_pp_enable_cmd, module_id=%d, enable=%d\n",
- module_id, enable);
+ ret = voice_pack_and_set_cvs_ui_property(v, param_hdr,
+ (uint8_t *) &enable_param);
+ if (ret < 0)
+ pr_err("Fail: sending cvs set pp enable\n");
- v->cvs_state = CMD_STATUS_FAIL;
- v->async_err = 0;
- ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_set_pp_cmd);
- if (ret < 0) {
- pr_err("Fail: sending cvs set pp enable,\n");
- goto fail;
- }
- ret = wait_event_timeout(v->cvs_wait,
- (v->cvs_state == CMD_STATUS_SUCCESS),
- msecs_to_jiffies(TIMEOUT_MS));
- if (!ret) {
- pr_err("%s: wait_event timeout\n", __func__);
- goto fail;
- }
- if (v->async_err > 0) {
- pr_err("%s: DSP returned error[%s]\n",
- __func__, adsp_err_get_err_str(
- v->async_err));
- ret = adsp_err_get_lnx_err_code(
- v->async_err);
- goto fail;
- }
- return 0;
-fail:
return ret;
}
@@ -3823,6 +3790,7 @@ done:
static int voice_setup_vocproc(struct voice_data *v)
{
+ struct module_instance_info mod_inst_info = {0};
int ret = 0;
ret = voice_send_cvp_create_cmd(v);
@@ -3845,6 +3813,9 @@ static int voice_setup_vocproc(struct voice_data *v)
goto fail;
}
+ mod_inst_info.module_id = MODULE_ID_VOICE_MODULE_ST;
+ mod_inst_info.instance_id = INSTANCE_ID_0;
+
voice_send_cvs_register_cal_cmd(v);
voice_send_cvp_register_dev_cfg_cmd(v);
voice_send_cvp_register_cal_cmd(v);
@@ -3878,9 +3849,7 @@ static int voice_setup_vocproc(struct voice_data *v)
}
if (v->st_enable && !v->tty_mode)
- voice_send_set_pp_enable_cmd(v,
- MODULE_ID_VOICE_MODULE_ST,
- v->st_enable);
+ voice_send_set_pp_enable_cmd(v, mod_inst_info, v->st_enable);
/* Start in-call music delivery if this feature is enabled */
if (v->music_info.play_enable)
voice_cvs_start_playback(v);
@@ -4017,14 +3986,9 @@ done:
static int voice_send_cvp_media_format_cmd(struct voice_data *v,
uint32_t param_type)
{
+ struct vss_param_endpoint_media_format_info media_fmt_info = {0};
+ struct param_hdr_v3 param_hdr = {0};
int ret = 0;
- struct cvp_set_media_format_cmd cvp_set_media_format_cmd;
- void *apr_cvp;
- u16 cvp_handle;
- struct vss_icommon_param_data_t *media_fmt_param_data =
- &cvp_set_media_format_cmd.cvp_set_param_v2.param_data;
- struct vss_param_endpoint_media_format_info_t *media_fmt_info =
- &media_fmt_param_data->media_format_info;
if (v == NULL) {
pr_err("%s: v is NULL\n", __func__);
@@ -4032,75 +3996,41 @@ static int voice_send_cvp_media_format_cmd(struct voice_data *v,
goto done;
}
- apr_cvp = common.apr_q6_cvp;
- if (!apr_cvp) {
- pr_err("%s: apr_cvp is NULL.\n", __func__);
- ret = -EINVAL;
- goto done;
- }
-
- cvp_handle = voice_get_cvp_handle(v);
- memset(&cvp_set_media_format_cmd, 0, sizeof(cvp_set_media_format_cmd));
+ param_hdr.module_id = VSS_MODULE_CVD_GENERIC;
+ param_hdr.instance_id = INSTANCE_ID_0;
+ param_hdr.param_size = sizeof(media_fmt_info);
- /* Fill header data */
- cvp_set_media_format_cmd.hdr.hdr_field =
- APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE),
- APR_PKT_VER);
- cvp_set_media_format_cmd.hdr.pkt_size =
- APR_PKT_SIZE(APR_HDR_SIZE,
- sizeof(cvp_set_media_format_cmd) - APR_HDR_SIZE);
- cvp_set_media_format_cmd.hdr.src_svc = 0;
- cvp_set_media_format_cmd.hdr.src_domain = APR_DOMAIN_APPS;
- cvp_set_media_format_cmd.hdr.src_port =
- voice_get_idx_for_session(v->session_id);
- cvp_set_media_format_cmd.hdr.dest_svc = 0;
- cvp_set_media_format_cmd.hdr.dest_domain = APR_DOMAIN_ADSP;
- cvp_set_media_format_cmd.hdr.dest_port = cvp_handle;
- cvp_set_media_format_cmd.hdr.token = VOC_SET_MEDIA_FORMAT_PARAM_TOKEN;
- cvp_set_media_format_cmd.hdr.opcode = VSS_ICOMMON_CMD_SET_PARAM_V2;
-
- /* Fill param data */
- cvp_set_media_format_cmd.cvp_set_param_v2.mem_size =
- sizeof(struct vss_icommon_param_data_t);
- media_fmt_param_data->module_id = VSS_MODULE_CVD_GENERIC;
- media_fmt_param_data->param_size =
- sizeof(struct vss_param_endpoint_media_format_info_t);
-
- /* Fill device specific data */
switch (param_type) {
case RX_PATH:
- media_fmt_param_data->param_id =
- VSS_PARAM_RX_PORT_ENDPOINT_MEDIA_INFO;
- media_fmt_info->port_id = v->dev_rx.port_id;
- media_fmt_info->num_channels = v->dev_rx.no_of_channels;
- media_fmt_info->bits_per_sample = v->dev_rx.bits_per_sample;
- media_fmt_info->sample_rate = v->dev_rx.sample_rate;
- memcpy(&media_fmt_info->channel_mapping,
+ param_hdr.param_id = VSS_PARAM_RX_PORT_ENDPOINT_MEDIA_INFO;
+ media_fmt_info.port_id = v->dev_rx.port_id;
+ media_fmt_info.num_channels = v->dev_rx.no_of_channels;
+ media_fmt_info.bits_per_sample = v->dev_rx.bits_per_sample;
+ media_fmt_info.sample_rate = v->dev_rx.sample_rate;
+ memcpy(&media_fmt_info.channel_mapping,
&v->dev_rx.channel_mapping, VSS_CHANNEL_MAPPING_SIZE);
break;
case TX_PATH:
- media_fmt_param_data->param_id =
- VSS_PARAM_TX_PORT_ENDPOINT_MEDIA_INFO;
- media_fmt_info->port_id = v->dev_tx.port_id;
- media_fmt_info->num_channels = v->dev_tx.no_of_channels;
- media_fmt_info->bits_per_sample = v->dev_tx.bits_per_sample;
- media_fmt_info->sample_rate = v->dev_tx.sample_rate;
- memcpy(&media_fmt_info->channel_mapping,
+ param_hdr.param_id = VSS_PARAM_TX_PORT_ENDPOINT_MEDIA_INFO;
+ media_fmt_info.port_id = v->dev_tx.port_id;
+ media_fmt_info.num_channels = v->dev_tx.no_of_channels;
+ media_fmt_info.bits_per_sample = v->dev_tx.bits_per_sample;
+ media_fmt_info.sample_rate = v->dev_tx.sample_rate;
+ memcpy(&media_fmt_info.channel_mapping,
&v->dev_tx.channel_mapping, VSS_CHANNEL_MAPPING_SIZE);
break;
case EC_REF_PATH:
- media_fmt_param_data->param_id =
- VSS_PARAM_EC_REF_PORT_ENDPOINT_MEDIA_INFO;
- media_fmt_info->port_id = common.ec_media_fmt_info.port_id;
- media_fmt_info->num_channels =
+ param_hdr.param_id = VSS_PARAM_EC_REF_PORT_ENDPOINT_MEDIA_INFO;
+ media_fmt_info.port_id = common.ec_media_fmt_info.port_id;
+ media_fmt_info.num_channels =
common.ec_media_fmt_info.num_channels;
- media_fmt_info->bits_per_sample =
+ media_fmt_info.bits_per_sample =
common.ec_media_fmt_info.bits_per_sample;
- media_fmt_info->sample_rate =
+ media_fmt_info.sample_rate =
common.ec_media_fmt_info.sample_rate;
- memcpy(&media_fmt_info->channel_mapping,
+ memcpy(&media_fmt_info.channel_mapping,
&common.ec_media_fmt_info.channel_mapping,
VSS_CHANNEL_MAPPING_SIZE);
break;
@@ -4111,32 +4041,11 @@ static int voice_send_cvp_media_format_cmd(struct voice_data *v,
goto done;
}
- /* Send command */
- v->cvp_state = CMD_STATUS_FAIL;
- v->async_err = 0;
- ret = apr_send_pkt(apr_cvp, (uint32_t *) &cvp_set_media_format_cmd);
- if (ret < 0) {
- pr_err("%s: Fail in sending VSS_ICOMMON_CMD_SET_PARAM_V2\n",
- __func__);
- ret = -EINVAL;
- goto done;
- }
-
- ret = wait_event_timeout(v->cvp_wait,
- (v->cvp_state == CMD_STATUS_SUCCESS),
- msecs_to_jiffies(TIMEOUT_MS));
- if (!ret) {
- pr_err("%s: wait_event timeout\n", __func__);
- ret = -EINVAL;
- goto done;
- }
-
- if (v->async_err > 0) {
- pr_err("%s: DSP returned error[%s] handle = %d\n", __func__,
- adsp_err_get_err_str(v->async_err), cvp_handle);
- ret = adsp_err_get_lnx_err_code(v->async_err);
- goto done;
- }
+ ret = voice_pack_and_set_cvp_param(v, param_hdr,
+ (u8 *) &media_fmt_info);
+ if (ret)
+ pr_err("%s: Failed to set media format params on CVP, err %d\n",
+ __func__, ret);
done:
return ret;
@@ -4532,6 +4441,7 @@ static int voice_destroy_vocproc(struct voice_data *v)
{
struct mvm_detach_vocproc_cmd mvm_d_vocproc_cmd;
struct apr_hdr cvp_destroy_session_cmd;
+ struct module_instance_info mod_inst_info = {0};
int ret = 0;
void *apr_mvm, *apr_cvp;
u16 mvm_handle, cvp_handle;
@@ -4550,9 +4460,12 @@ static int voice_destroy_vocproc(struct voice_data *v)
mvm_handle = voice_get_mvm_handle(v);
cvp_handle = voice_get_cvp_handle(v);
+ mod_inst_info.module_id = MODULE_ID_VOICE_MODULE_ST;
+ mod_inst_info.instance_id = INSTANCE_ID_0;
+
/* disable slowtalk if st_enable is set */
if (v->st_enable)
- voice_send_set_pp_enable_cmd(v, MODULE_ID_VOICE_MODULE_ST, 0);
+ voice_send_set_pp_enable_cmd(v, mod_inst_info, 0);
/* Disable HD Voice if hd_enable is set */
if (v->hd_enable)
@@ -5789,11 +5702,15 @@ uint8_t voc_get_tty_mode(uint32_t session_id)
return ret;
}
-int voc_set_pp_enable(uint32_t session_id, uint32_t module_id, uint32_t enable)
+int voc_set_pp_enable(uint32_t session_id,
+ struct module_instance_info mod_inst_info,
+ uint32_t enable)
{
struct voice_data *v = NULL;
int ret = 0;
struct voice_session_itr itr;
+ int mid = mod_inst_info.module_id;
+ int iid = mod_inst_info.instance_id;
voice_itr_init(&itr, session_id);
while (voice_itr_get_next_session(&itr, &v)) {
@@ -5802,15 +5719,15 @@ int voc_set_pp_enable(uint32_t session_id, uint32_t module_id, uint32_t enable)
continue;
mutex_lock(&v->lock);
- if (module_id == MODULE_ID_VOICE_MODULE_ST)
+ if (mid == MODULE_ID_VOICE_MODULE_ST &&
+ iid == INSTANCE_ID_0)
v->st_enable = enable;
if (v->voc_state == VOC_RUN) {
- if ((module_id == MODULE_ID_VOICE_MODULE_ST) &&
- (!v->tty_mode))
- ret = voice_send_set_pp_enable_cmd(v,
- MODULE_ID_VOICE_MODULE_ST,
- enable);
+ if ((mid == MODULE_ID_VOICE_MODULE_ST) &&
+ iid == INSTANCE_ID_0 && (!v->tty_mode))
+ ret = voice_send_set_pp_enable_cmd(
+ v, mod_inst_info, enable);
}
mutex_unlock(&v->lock);
} else {
@@ -5893,7 +5810,8 @@ bool voc_get_afe_sidetone(void)
return ret;
}
-int voc_get_pp_enable(uint32_t session_id, uint32_t module_id)
+int voc_get_pp_enable(uint32_t session_id,
+ struct module_instance_info mod_inst_info)
{
struct voice_data *v = voice_get_session(session_id);
int ret = 0;
@@ -5905,7 +5823,8 @@ int voc_get_pp_enable(uint32_t session_id, uint32_t module_id)
}
mutex_lock(&v->lock);
- if (module_id == MODULE_ID_VOICE_MODULE_ST)
+ if (mod_inst_info.module_id == MODULE_ID_VOICE_MODULE_ST &&
+ mod_inst_info.instance_id == INSTANCE_ID_0)
ret = v->st_enable;
mutex_unlock(&v->lock);
@@ -6180,6 +6099,7 @@ done:
int voc_enable_device(uint32_t session_id)
{
struct voice_data *v = voice_get_session(session_id);
+ struct module_instance_info mod_inst_info = {0};
int ret = 0;
if (v == NULL) {
@@ -6197,15 +6117,15 @@ int voc_enable_device(uint32_t session_id)
/* Not a critical error, allow voice call to continue */
}
+ mod_inst_info.module_id = MODULE_ID_VOICE_MODULE_ST;
+ mod_inst_info.instance_id = INSTANCE_ID_0;
+
if (v->tty_mode) {
/* disable slowtalk */
- voice_send_set_pp_enable_cmd(v,
- MODULE_ID_VOICE_MODULE_ST,
- 0);
+ voice_send_set_pp_enable_cmd(v, mod_inst_info, 0);
} else {
/* restore slowtalk */
- voice_send_set_pp_enable_cmd(v,
- MODULE_ID_VOICE_MODULE_ST,
+ voice_send_set_pp_enable_cmd(v, mod_inst_info,
v->st_enable);
}
@@ -6787,6 +6707,7 @@ static int32_t qdsp_cvs_callback(struct apr_client_data *data, void *priv)
case VSS_ICOMMON_CMD_MAP_MEMORY:
case VSS_ICOMMON_CMD_UNMAP_MEMORY:
case VSS_ICOMMON_CMD_SET_UI_PROPERTY:
+ case VSS_ICOMMON_CMD_SET_UI_PROPERTY_V2:
case VSS_IPLAYBACK_CMD_START:
case VSS_IPLAYBACK_CMD_STOP:
case VSS_IRECORD_CMD_START:
@@ -6800,12 +6721,14 @@ static int32_t qdsp_cvs_callback(struct apr_client_data *data, void *priv)
wake_up(&v->cvs_wait);
break;
case VSS_ICOMMON_CMD_SET_PARAM_V2:
- pr_debug("%s: VSS_ICOMMON_CMD_SET_PARAM_V2\n",
+ case VSS_ICOMMON_CMD_SET_PARAM_V3:
+ pr_debug("%s: VSS_ICOMMON_CMD_SET_PARAM\n",
__func__);
rtac_make_voice_callback(RTAC_CVS, ptr,
data->payload_size);
break;
case VSS_ICOMMON_CMD_GET_PARAM_V2:
+ case VSS_ICOMMON_CMD_GET_PARAM_V3:
pr_debug("%s: VSS_ICOMMON_CMD_GET_PARAM_V2\n",
__func__);
/* Should only come here if there is an APR */
@@ -6938,7 +6861,8 @@ static int32_t qdsp_cvs_callback(struct apr_client_data *data, void *priv)
pr_debug("Recd VSS_ISTREAM_EVT_NOT_READY\n");
} else if (data->opcode == VSS_ISTREAM_EVT_READY) {
pr_debug("Recd VSS_ISTREAM_EVT_READY\n");
- } else if (data->opcode == VSS_ICOMMON_RSP_GET_PARAM) {
+ } else if (data->opcode == VSS_ICOMMON_RSP_GET_PARAM ||
+ VSS_ICOMMON_RSP_GET_PARAM_V3) {
pr_debug("%s: VSS_ICOMMON_RSP_GET_PARAM\n", __func__);
ptr = data->payload;
if (ptr[0] != 0) {
@@ -7081,28 +7005,30 @@ static int32_t qdsp_cvp_callback(struct apr_client_data *data, void *priv)
case VSS_IVPCM_EVT_PUSH_BUFFER_V2:
break;
case VSS_ICOMMON_CMD_SET_PARAM_V2:
+ case VSS_ICOMMON_CMD_SET_PARAM_V3:
switch (data->token) {
case VOC_SET_MEDIA_FORMAT_PARAM_TOKEN:
- pr_debug("%s: VSS_ICOMMON_CMD_SET_PARAM_V2 called by voice_send_cvp_media_format_cmd\n",
+ pr_debug("%s: VSS_ICOMMON_CMD_SET_PARAM called by voice_send_cvp_media_format_cmd\n",
__func__);
v->cvp_state = CMD_STATUS_SUCCESS;
v->async_err = ptr[1];
wake_up(&v->cvp_wait);
break;
case VOC_RTAC_SET_PARAM_TOKEN:
- pr_debug("%s: VSS_ICOMMON_CMD_SET_PARAM_V2 called by rtac\n",
+ pr_debug("%s: VSS_ICOMMON_CMD_SET_PARAM called by rtac\n",
__func__);
rtac_make_voice_callback(
RTAC_CVP, ptr,
data->payload_size);
break;
default:
- pr_debug("%s: invalid token for command VSS_ICOMMON_CMD_SET_PARAM_V2: %d\n",
+ pr_debug("%s: invalid token for command VSS_ICOMMON_CMD_SET_PARAM: %d\n",
__func__, data->token);
break;
}
break;
case VSS_ICOMMON_CMD_GET_PARAM_V2:
+ case VSS_ICOMMON_CMD_GET_PARAM_V3:
pr_debug("%s: VSS_ICOMMON_CMD_GET_PARAM_V2\n",
__func__);
/* Should only come here if there is an APR */
@@ -7169,7 +7095,8 @@ static int32_t qdsp_cvp_callback(struct apr_client_data *data, void *priv)
break;
}
}
- } else if (data->opcode == VSS_ICOMMON_RSP_GET_PARAM) {
+ } else if (data->opcode == VSS_ICOMMON_RSP_GET_PARAM ||
+ VSS_ICOMMON_RSP_GET_PARAM_V3) {
pr_debug("%s: VSS_ICOMMON_RSP_GET_PARAM\n", __func__);
ptr = data->payload;
if (ptr[0] != 0) {
@@ -8578,6 +8505,199 @@ int voc_get_source_tracking(struct source_tracking_param *sourceTrackingData)
return ret;
}
+static int voice_set_cvp_param(struct voice_data *v,
+ struct vss_icommon_mem_mapping_hdr *mem_hdr,
+ u32 *param_data, u32 param_size)
+{
+ struct vss_icommon_cmd_set_param *set_param = NULL;
+ uint32_t pkt_size = sizeof(struct vss_icommon_cmd_set_param);
+ void *apr_cvp;
+ int ret = 0;
+
+ apr_cvp = common.apr_q6_cvp;
+ if (!apr_cvp) {
+ pr_err("%s: apr_cvp is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ if (param_data != NULL)
+ pkt_size += param_size;
+ set_param = kzalloc(pkt_size, GFP_KERNEL);
+ if (!set_param)
+ return -ENOMEM;
+
+ set_param->apr_hdr.hdr_field =
+ APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ set_param->apr_hdr.pkt_size =
+ APR_PKT_SIZE(APR_HDR_SIZE, pkt_size - APR_HDR_SIZE);
+ set_param->apr_hdr.src_svc = 0;
+ set_param->apr_hdr.src_domain = APR_DOMAIN_APPS;
+ set_param->apr_hdr.src_port = voice_get_idx_for_session(v->session_id);
+ set_param->apr_hdr.dest_svc = 0;
+ set_param->apr_hdr.dest_domain = APR_DOMAIN_ADSP;
+ set_param->apr_hdr.dest_port = voice_get_cvp_handle(v);
+ set_param->apr_hdr.token = VOC_SET_MEDIA_FORMAT_PARAM_TOKEN;
+ set_param->apr_hdr.opcode = q6common_is_instance_id_supported() ?
+ VSS_ICOMMON_CMD_SET_PARAM_V3 :
+ VSS_ICOMMON_CMD_SET_PARAM_V2;
+
+ set_param->payload_size = param_size;
+
+ if (mem_hdr != NULL) {
+ set_param->mem_hdr = *mem_hdr;
+ } else if (param_data != NULL) {
+ memcpy(set_param->param_data, param_data, param_size);
+ } else {
+ pr_err("%s: Both memory header and param data are NULL\n",
+ __func__);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ v->cvp_state = CMD_STATUS_FAIL;
+ v->async_err = 0;
+ ret = apr_send_pkt(apr_cvp, (u32 *) set_param);
+ if (ret < 0) {
+ pr_err("%s: Failed to send apr packet, error %d\n", __func__,
+ ret);
+ goto done;
+ }
+
+ ret = wait_event_timeout(v->cvp_wait,
+ v->cvp_state == CMD_STATUS_SUCCESS,
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: wait_event timeout\n", __func__);
+ ret = -ETIMEDOUT;
+ goto done;
+ }
+
+ if (v->async_err > 0) {
+ pr_err("%s: DSP returned error[%s]\n", __func__,
+ adsp_err_get_err_str(v->async_err));
+ ret = adsp_err_get_lnx_err_code(v->async_err);
+ goto done;
+ }
+ ret = 0;
+
+done:
+ kfree(set_param);
+ return ret;
+}
+
+static int voice_pack_and_set_cvp_param(struct voice_data *v,
+ struct param_hdr_v3 param_hdr,
+ u8 *param_data)
+{
+ u8 *packed_data = NULL;
+ u32 total_size = 0;
+ int ret = 0;
+
+ total_size = sizeof(union param_hdrs) + param_hdr.param_size;
+ packed_data = kzalloc(total_size, GFP_KERNEL);
+ if (!packed_data)
+ return -ENOMEM;
+
+ ret = q6common_pack_pp_params(packed_data, &param_hdr, param_data,
+ &total_size);
+ if (ret) {
+ pr_err("%s: Failed to pack params, error %d", __func__, ret);
+ goto done;
+ }
+
+ ret = voice_set_cvp_param(v, NULL, (u32 *) packed_data, total_size);
+
+done:
+ kfree(packed_data);
+ return ret;
+}
+
+/*
+ * Out of band is not supported and there are currently no pre-packed cases,
+ * so pack and set in the same function. When needed, split up.
+ */
+static int voice_pack_and_set_cvs_ui_property(struct voice_data *v,
+ struct param_hdr_v3 param_hdr,
+ u8 *param_data)
+{
+ struct vss_icommon_cmd_set_ui_property *set_ui_property = NULL;
+ u32 total_size = 0;
+ bool iid_supported = q6common_is_instance_id_supported();
+ void *apr_cvs;
+ int ret = 0;
+
+ apr_cvs = common.apr_q6_cvs;
+ if (!apr_cvs) {
+ pr_err("%s: apr_cvs is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ total_size = sizeof(struct vss_icommon_cmd_set_ui_property) +
+ sizeof(union param_hdrs) + param_hdr.param_size;
+ set_ui_property = kzalloc(total_size, GFP_KERNEL);
+ if (!set_ui_property)
+ return -ENOMEM;
+
+ ret = q6common_pack_pp_params(set_ui_property->param_data, &param_hdr,
+ param_data, &total_size);
+ if (ret) {
+ pr_err("%s: Failed to pack params, error %d", __func__, ret);
+ goto done;
+ }
+
+ /*
+ * Pack the APR header after packing the data so we have the actual
+ * total size of the payload
+ */
+ set_ui_property->apr_hdr.hdr_field =
+ APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ set_ui_property->apr_hdr.pkt_size =
+ APR_PKT_SIZE(APR_HDR_SIZE, total_size - APR_HDR_SIZE);
+ set_ui_property->apr_hdr.src_svc = 0;
+ set_ui_property->apr_hdr.src_domain = APR_DOMAIN_APPS;
+ set_ui_property->apr_hdr.src_port =
+ voice_get_idx_for_session(v->session_id);
+ set_ui_property->apr_hdr.dest_svc = 0;
+ set_ui_property->apr_hdr.dest_domain = APR_DOMAIN_ADSP;
+ set_ui_property->apr_hdr.dest_port = voice_get_cvs_handle(v);
+ set_ui_property->apr_hdr.token = 0;
+
+ set_ui_property->apr_hdr.opcode =
+ iid_supported ? VSS_ICOMMON_CMD_SET_UI_PROPERTY_V2 :
+ VSS_ICOMMON_CMD_SET_UI_PROPERTY;
+
+ v->cvs_state = CMD_STATUS_FAIL;
+ v->async_err = 0;
+ ret = apr_send_pkt(apr_cvs, (u32 *) set_ui_property);
+ if (ret < 0) {
+ pr_err("%s: Failed to send apr packet, error %d\n", __func__,
+ ret);
+ goto done;
+ }
+
+ ret = wait_event_timeout(v->cvs_wait,
+ v->cvs_state == CMD_STATUS_SUCCESS,
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: wait_event timeout\n", __func__);
+ ret = -ETIMEDOUT;
+ goto done;
+ }
+
+ if (v->async_err > 0) {
+ pr_err("%s: DSP returned error[%s]\n", __func__,
+ adsp_err_get_err_str(v->async_err));
+ ret = adsp_err_get_lnx_err_code(v->async_err);
+ goto done;
+ }
+ ret = 0;
+done:
+ kfree(set_ui_property);
+ return ret;
+}
+
int is_voc_initialized(void)
{
return module_initialized;
diff --git a/sound/soc/msm/qdsp6v2/q6voice.h b/sound/soc/msm/qdsp6v2/q6voice.h
index f7ea650dfda9..f448e701d564 100644
--- a/sound/soc/msm/qdsp6v2/q6voice.h
+++ b/sound/soc/msm/qdsp6v2/q6voice.h
@@ -172,6 +172,7 @@ struct mem_map_table {
/* Common */
#define VSS_ICOMMON_CMD_SET_UI_PROPERTY 0x00011103
+#define VSS_ICOMMON_CMD_SET_UI_PROPERTY_V2 0x00013248
/* Set a UI property */
#define VSS_ICOMMON_CMD_MAP_MEMORY 0x00011025
#define VSS_ICOMMON_CMD_UNMAP_MEMORY 0x00011026
@@ -213,7 +214,7 @@ struct vss_unmap_memory_cmd {
struct vss_icommon_cmd_unmap_memory_t vss_unmap_mem;
} __packed;
-struct vss_param_endpoint_media_format_info_t {
+struct vss_param_endpoint_media_format_info {
/* AFE port ID to which this media format corresponds to. */
uint32_t port_id;
/*
@@ -240,29 +241,7 @@ struct vss_param_endpoint_media_format_info_t {
uint8_t channel_mapping[VSS_NUM_CHANNELS_MAX];
} __packed;
-struct vss_icommon_param_data_t {
- /* Valid ID of the module. */
- uint32_t module_id;
- /* Valid ID of the parameter. */
- uint32_t param_id;
- /*
- * Data size of the structure relating to the param_id/module_id
- * combination in uint8_t bytes.
- */
- uint16_t param_size;
- /* This field must be set to zero. */
- uint16_t reserved;
- /*
- * Parameter data payload when inband. Should have size param_size.
- * Bit size of payload must be a multiple of 4.
- */
- union {
- struct vss_param_endpoint_media_format_info_t media_format_info;
- };
-} __packed;
-
-/* Payload structure for the VSS_ICOMMON_CMD_SET_PARAM_V2 command. */
-struct vss_icommon_cmd_set_param_v2_t {
+struct vss_icommon_mem_mapping_hdr {
/*
* Pointer to the unique identifier for an address (physical/virtual).
*
@@ -275,6 +254,7 @@ struct vss_icommon_cmd_set_param_v2_t {
* data.
*/
uint32_t mem_handle;
+
/*
* Location of the parameter data payload.
*
@@ -282,12 +262,25 @@ struct vss_icommon_cmd_set_param_v2_t {
* mem_handle is 0, this field is ignored.
*/
uint64_t mem_address;
- /* Size of the parameter data payload in bytes. */
- uint32_t mem_size;
- /* Parameter data payload when the data is inband. */
- struct vss_icommon_param_data_t param_data;
+
} __packed;
+struct vss_icommon_cmd_set_param {
+ /* APR Header */
+ struct apr_hdr apr_hdr;
+
+ /* The memory mapping header to be used when sending outband */
+ struct vss_icommon_mem_mapping_hdr mem_hdr;
+
+ /* Size of the parameter data payload in bytes. */
+ uint32_t payload_size;
+
+ /*
+ * Parameter data payload when inband. Should have size param_size.
+ * Bit size of payload must be a multiple of 4.
+ */
+ uint8_t param_data[0];
+} __packed;
/* TO MVM commands */
#define VSS_IMVM_CMD_CREATE_PASSIVE_CONTROL_SESSION 0x000110FF
/**< No payload. Wait for APRV2_IBASIC_RSP_RESULT response. */
@@ -638,7 +631,6 @@ struct vss_imemory_cmd_unmap_t {
#define MODULE_ID_VOICE_MODULE_ST 0x00010EE3
#define VOICE_PARAM_MOD_ENABLE 0x00010E00
-#define MOD_ENABLE_PARAM_LEN 4
#define VSS_IPLAYBACK_CMD_START 0x000112BD
/* Start in-call music delivery on the Tx voice path. */
@@ -907,20 +899,20 @@ struct vss_istream_cmd_register_calibration_data_v2_t {
*/
} __packed;
-struct vss_icommon_cmd_set_ui_property_enable_t {
- uint32_t module_id;
- /* Unique ID of the module. */
- uint32_t param_id;
- /* Unique ID of the parameter. */
- uint16_t param_size;
- /* Size of the parameter in bytes: MOD_ENABLE_PARAM_LEN */
- uint16_t reserved;
- /* Reserved; set to 0. */
+struct enable_param {
uint16_t enable;
uint16_t reserved_field;
/* Reserved, set to 0. */
};
+struct vss_icommon_cmd_set_ui_property {
+ /* APR Header */
+ struct apr_hdr apr_hdr;
+
+ /* The parameter data to be filled when sent inband */
+ u8 param_data[0];
+} __packed;
+
/*
* Event sent by the stream to the client that enables Rx DTMF
* detection whenever DTMF is detected in the Rx path.
@@ -1029,10 +1021,6 @@ struct cvs_deregister_cal_data_cmd {
struct apr_hdr hdr;
} __packed;
-struct cvs_set_pp_enable_cmd {
- struct apr_hdr hdr;
- struct vss_icommon_cmd_set_ui_property_enable_t vss_set_pp;
-} __packed;
struct cvs_start_record_cmd {
struct apr_hdr hdr;
struct vss_irecord_cmd_start_t rec_mode;
@@ -1105,6 +1093,8 @@ struct vss_istream_cmd_set_packet_exchange_mode_t {
*/
#define VSS_IVOCPROC_CMD_DEREGISTER_DEVICE_CONFIG 0x00011372
+#define CVD_CAL_DATA_FORMAT_MINOR_VERSION_V0 0x00000000
+#define CVD_CAL_DATA_FORMAT_MINOR_VERSION_V1 0x00000001
#define VSS_IVOCPROC_CMD_REGISTER_CALIBRATION_DATA_V2 0x00011373
#define VSS_IVOCPROC_CMD_DEREGISTER_CALIBRATION_DATA 0x00011276
@@ -1484,11 +1474,6 @@ struct cvp_set_dev_channels_cmd {
struct vss_ivocproc_cmd_topology_set_dev_channels_t cvp_set_channels;
} __packed;
-struct cvp_set_media_format_cmd {
- struct apr_hdr hdr;
- struct vss_icommon_cmd_set_param_v2_t cvp_set_param_v2;
-} __packed;
-
struct cvp_set_vp3_data_cmd {
struct apr_hdr hdr;
} __packed;
@@ -1836,9 +1821,11 @@ enum {
#define VSID_MAX ALL_SESSION_VSID
/* called by alsa driver */
-int voc_set_pp_enable(uint32_t session_id, uint32_t module_id,
+int voc_set_pp_enable(uint32_t session_id,
+ struct module_instance_info mod_inst_info,
uint32_t enable);
-int voc_get_pp_enable(uint32_t session_id, uint32_t module_id);
+int voc_get_pp_enable(uint32_t session_id,
+ struct module_instance_info mod_inst_info);
int voc_set_hd_enable(uint32_t session_id, uint32_t enable);
uint8_t voc_get_tty_mode(uint32_t session_id);
int voc_set_tty_mode(uint32_t session_id, uint8_t tty_mode);
diff --git a/sound/soc/msm/qdsp6v2/rtac.c b/sound/soc/msm/qdsp6v2/rtac.c
index 77c6dfbbe8c1..5e33fb508455 100644
--- a/sound/soc/msm/qdsp6v2/rtac.c
+++ b/sound/soc/msm/qdsp6v2/rtac.c
@@ -27,6 +27,7 @@
#include <sound/q6afe-v2.h>
#include <sound/q6adm-v2.h>
#include <sound/apr_audio-v2.h>
+#include <sound/q6common.h>
#include "q6voice.h"
#include "msm-pcm-routing-v2.h"
#include <sound/adsp_err.h>
@@ -104,14 +105,10 @@ struct rtac_afe_user_data {
uint32_t cmd_size;
uint32_t port_id;
union {
- struct rtac_afe_set {
- struct afe_port_cmd_set_param_v2 cmd;
- struct afe_port_param_data_v2 data;
- } rtac_afe_set;
- struct rtac_afe_get {
- struct afe_port_cmd_get_param_v2 cmd;
- struct afe_port_param_data_v2 data;
- } rtac_afe_get;
+ struct afe_rtac_user_data_set_v2 v2_set;
+ struct afe_rtac_user_data_set_v3 v3_set;
+ struct afe_rtac_user_data_get_v2 v2_get;
+ struct afe_rtac_user_data_get_v3 v3_get;
};
} __packed;
@@ -800,7 +797,9 @@ int send_adm_apr(void *buf, u32 opcode)
goto err;
}
- if (opcode == ADM_CMD_SET_PP_PARAMS_V5) {
+ switch (opcode) {
+ case ADM_CMD_SET_PP_PARAMS_V5:
+ case ADM_CMD_SET_PP_PARAMS_V6:
/* set payload size to in-band payload */
/* set data size to actual out of band payload size */
data_size = payload_size - 4 * sizeof(u32);
@@ -818,12 +817,15 @@ int send_adm_apr(void *buf, u32 opcode)
buf + 7 * sizeof(u32), data_size)) {
pr_err("%s: Could not copy payload from user buffer\n",
__func__);
- result = -EINVAL;
+ result = -EFAULT;
goto err;
}
+
/* set payload size in packet */
rtac_adm_buffer[8] = data_size;
- } else {
+ break;
+ case ADM_CMD_GET_PP_PARAMS_V5:
+ case ADM_CMD_GET_PP_PARAMS_V6:
if (payload_size > MAX_PAYLOAD_SIZE) {
pr_err("%s: Invalid payload size = %d\n",
__func__, payload_size);
@@ -837,9 +839,14 @@ int send_adm_apr(void *buf, u32 opcode)
buf + 3 * sizeof(u32), payload_size)) {
pr_err("%s: Could not copy payload from user buffer\n",
__func__);
- result = -EINVAL;
+ result = -EFAULT;
goto err;
}
+ break;
+ default:
+ pr_err("%s: Invalid opcode %d\n", __func__, opcode);
+ result = -EINVAL;
+ goto err;
}
/* Pack header */
@@ -900,33 +907,39 @@ int send_adm_apr(void *buf, u32 opcode)
if (opcode == ADM_CMD_GET_PP_PARAMS_V5) {
bytes_returned = ((u32 *)rtac_cal[ADM_RTAC_CAL].cal_data.
kvaddr)[2] + 3 * sizeof(u32);
+ } else if (opcode == ADM_CMD_GET_PP_PARAMS_V6) {
+ bytes_returned =
+ ((u32 *) rtac_cal[ADM_RTAC_CAL].cal_data.kvaddr)[3] +
+ 4 * sizeof(u32);
+ } else {
+ bytes_returned = data_size;
+ goto unlock;
+ }
- if (bytes_returned > rtac_cal[ADM_RTAC_CAL].
- map_data.map_size) {
- pr_err("%s: Invalid data size = %d\n",
- __func__, bytes_returned);
- result = -EINVAL;
- goto err;
- }
+ if (bytes_returned > rtac_cal[ADM_RTAC_CAL].map_data.map_size) {
+ pr_err("%s: Invalid data size = %d\n", __func__,
+ bytes_returned);
+ result = -EINVAL;
+ goto err;
+ }
- if (bytes_returned > user_buf_size) {
- pr_err("%s: User buf not big enough, size = 0x%x, returned size = 0x%x\n",
- __func__, user_buf_size, bytes_returned);
- result = -EINVAL;
- goto err;
- }
+ if (bytes_returned > user_buf_size) {
+ pr_err("%s: User buf not big enough, size = 0x%x, returned size = 0x%x\n",
+ __func__, user_buf_size, bytes_returned);
+ result = -EINVAL;
+ goto err;
+ }
- if (copy_to_user(buf, (void *)
- rtac_cal[ADM_RTAC_CAL].cal_data.kvaddr,
- bytes_returned)) {
- pr_err("%s: Could not copy buffer to user,size = %d\n",
- __func__, bytes_returned);
- result = -EINVAL;
- goto err;
- }
- } else {
- bytes_returned = data_size;
+ if (copy_to_user((void __user *) buf,
+ rtac_cal[ADM_RTAC_CAL].cal_data.kvaddr,
+ bytes_returned)) {
+ pr_err("%s: Could not copy buffer to user,size = %d\n",
+ __func__, bytes_returned);
+ result = -EFAULT;
+ goto err;
}
+
+unlock:
mutex_unlock(&rtac_adm_apr_mutex);
done:
return bytes_returned;
@@ -1027,7 +1040,9 @@ int send_rtac_asm_apr(void *buf, u32 opcode)
goto err;
}
- if (opcode == ASM_STREAM_CMD_SET_PP_PARAMS_V2) {
+ switch (opcode) {
+ case ASM_STREAM_CMD_SET_PP_PARAMS_V2:
+ case ASM_STREAM_CMD_SET_PP_PARAMS_V3:
/* set payload size to in-band payload */
/* set data size to actual out of band payload size */
data_size = payload_size - 4 * sizeof(u32);
@@ -1045,13 +1060,14 @@ int send_rtac_asm_apr(void *buf, u32 opcode)
buf + 7 * sizeof(u32), data_size)) {
pr_err("%s: Could not copy payload from user buffer\n",
__func__);
- result = -EINVAL;
+ result = -EFAULT;
goto err;
}
/* set payload size in packet */
rtac_asm_buffer[8] = data_size;
-
- } else {
+ break;
+ case ASM_STREAM_CMD_GET_PP_PARAMS_V2:
+ case ASM_STREAM_CMD_GET_PP_PARAMS_V3:
if (payload_size > MAX_PAYLOAD_SIZE) {
pr_err("%s: Invalid payload size = %d\n",
__func__, payload_size);
@@ -1065,9 +1081,15 @@ int send_rtac_asm_apr(void *buf, u32 opcode)
buf + 3 * sizeof(u32), payload_size)) {
pr_err("%s: Could not copy payload from user buffer\n",
__func__);
- result = -EINVAL;
+ result = -EFAULT;
goto err;
}
+
+ break;
+ default:
+ pr_err("%s: Invalid opcode %d\n", __func__, opcode);
+ result = -EINVAL;
+ goto err;
}
/* Pack header */
@@ -1130,33 +1152,39 @@ int send_rtac_asm_apr(void *buf, u32 opcode)
if (opcode == ASM_STREAM_CMD_GET_PP_PARAMS_V2) {
bytes_returned = ((u32 *)rtac_cal[ASM_RTAC_CAL].cal_data.
kvaddr)[2] + 3 * sizeof(u32);
+ } else if (opcode == ASM_STREAM_CMD_GET_PP_PARAMS_V3) {
+ bytes_returned =
+ ((u32 *) rtac_cal[ASM_RTAC_CAL].cal_data.kvaddr)[3] +
+ 4 * sizeof(u32);
+ } else {
+ bytes_returned = data_size;
+ goto unlock;
+ }
- if (bytes_returned > rtac_cal[ASM_RTAC_CAL].
- map_data.map_size) {
- pr_err("%s: Invalid data size = %d\n",
- __func__, bytes_returned);
- result = -EINVAL;
- goto err;
- }
+ if (bytes_returned > rtac_cal[ASM_RTAC_CAL].map_data.map_size) {
+ pr_err("%s: Invalid data size = %d\n", __func__,
+ bytes_returned);
+ result = -EINVAL;
+ goto err;
+ }
- if (bytes_returned > user_buf_size) {
- pr_err("%s: User buf not big enough, size = 0x%x, returned size = 0x%x\n",
- __func__, user_buf_size, bytes_returned);
- result = -EINVAL;
- goto err;
- }
+ if (bytes_returned > user_buf_size) {
+ pr_err("%s: User buf not big enough, size = 0x%x, returned size = 0x%x\n",
+ __func__, user_buf_size, bytes_returned);
+ result = -EINVAL;
+ goto err;
+ }
- if (copy_to_user(buf, (void *)
- rtac_cal[ASM_RTAC_CAL].cal_data.kvaddr,
- bytes_returned)) {
- pr_err("%s: Could not copy buffer to user,size = %d\n",
- __func__, bytes_returned);
- result = -EINVAL;
- goto err;
- }
- } else {
- bytes_returned = data_size;
+ if (copy_to_user((void __user *) buf,
+ rtac_cal[ASM_RTAC_CAL].cal_data.kvaddr,
+ bytes_returned)) {
+ pr_err("%s: Could not copy buffer to user,size = %d\n",
+ __func__, bytes_returned);
+ result = -EFAULT;
+ goto err;
}
+
+unlock:
mutex_unlock(&rtac_asm_apr_mutex);
done:
return bytes_returned;
@@ -1213,13 +1241,18 @@ static int fill_afe_apr_hdr(struct apr_hdr *apr_hdr, uint32_t port,
return 0;
}
-static int send_rtac_afe_apr(void *buf, uint32_t opcode)
+static int send_rtac_afe_apr(void __user *buf, uint32_t opcode)
{
int32_t result;
uint32_t bytes_returned = 0;
+ uint32_t payload_size = 0;
uint32_t port_index = 0;
+ uint32_t *afe_cmd = NULL;
uint32_t apr_msg_size = 0;
struct rtac_afe_user_data user_afe_buf;
+ struct mem_mapping_hdr *mem_hdr = NULL;
+ struct param_hdr_v1 *get_resp_v2;
+ struct param_hdr_v3 *get_resp_v3;
pr_debug("%s\n", __func__);
@@ -1267,93 +1300,126 @@ static int send_rtac_afe_apr(void *buf, uint32_t opcode)
result = -EINVAL;
goto err;
}
- if (opcode == AFE_PORT_CMD_SET_PARAM_V2) {
- struct afe_port_cmd_set_param_v2 *afe_set_apr_msg;
- /* set data size to actual out of band payload size */
- if (user_afe_buf.rtac_afe_set.cmd.payload_size >
- rtac_cal[AFE_RTAC_CAL].map_data.map_size) {
- pr_err("%s: Invalid data size = %d\n",
- __func__,
- user_afe_buf.rtac_afe_set.cmd.payload_size);
+ afe_cmd =
+ (u32 *) rtac_afe_buffer + sizeof(struct apr_hdr) / sizeof(u32);
+
+ switch (opcode) {
+ case AFE_PORT_CMD_SET_PARAM_V2:
+ apr_msg_size = sizeof(struct afe_port_cmd_set_param_v2);
+ payload_size = user_afe_buf.v2_set.payload_size;
+ if (payload_size > rtac_cal[AFE_RTAC_CAL].map_data.map_size) {
+ pr_err("%s: Invalid payload size = %d\n", __func__,
+ payload_size);
result = -EINVAL;
goto err;
}
- /* Copy buffer to out-of-band payload */
- if (copy_from_user((void *)
- rtac_cal[AFE_RTAC_CAL].cal_data.kvaddr,
- buf+offsetof(struct rtac_afe_user_data,
- rtac_afe_set.data),
- user_afe_buf.rtac_afe_set.cmd.payload_size)) {
+ /* Copy the command to the rtac buffer */
+ memcpy(afe_cmd, &user_afe_buf.v2_set,
+ sizeof(user_afe_buf.v2_set));
+
+ /* Copy the param data to the out-of-band location */
+ if (copy_from_user(rtac_cal[AFE_RTAC_CAL].cal_data.kvaddr,
+ (void __user *) buf +
+ offsetof(struct rtac_afe_user_data,
+ v2_set.param_hdr),
+ payload_size)) {
pr_err("%s: Could not copy payload from user buffer\n",
__func__);
+ result = -EFAULT;
+ goto err;
+ }
+ break;
+ case AFE_PORT_CMD_SET_PARAM_V3:
+ apr_msg_size = sizeof(struct afe_port_cmd_set_param_v3);
+ payload_size = user_afe_buf.v3_set.payload_size;
+ if (payload_size > rtac_cal[AFE_RTAC_CAL].map_data.map_size) {
+ pr_err("%s: Invalid payload size = %d\n", __func__,
+ payload_size);
result = -EINVAL;
goto err;
}
- /* Copy AFE APR Message */
- afe_set_apr_msg = (struct afe_port_cmd_set_param_v2 *)
- ((u8 *)rtac_afe_buffer +
- sizeof(struct apr_hdr));
- if (copy_from_user((void *)
- afe_set_apr_msg,
- buf + offsetof(struct rtac_afe_user_data,
- rtac_afe_set.cmd) ,
- sizeof(struct afe_port_cmd_set_param_v2))) {
+ /* Copy the command to the rtac buffer */
+ memcpy(afe_cmd, &user_afe_buf.v3_set,
+ sizeof(user_afe_buf.v3_set));
+
+ /* Copy the param data to the out-of-band location */
+ if (copy_from_user(rtac_cal[AFE_RTAC_CAL].cal_data.kvaddr,
+ (void __user *) buf +
+ offsetof(struct rtac_afe_user_data,
+ v3_get.param_hdr),
+ payload_size)) {
pr_err("%s: Could not copy payload from user buffer\n",
__func__);
- result = -EINVAL;
+ result = -EFAULT;
goto err;
}
+ break;
+ case AFE_PORT_CMD_GET_PARAM_V2:
+ apr_msg_size = sizeof(struct afe_port_cmd_get_param_v2);
- afe_set_apr_msg->payload_address_lsw =
- lower_32_bits(rtac_cal[AFE_RTAC_CAL].cal_data.paddr);
- afe_set_apr_msg->payload_address_msw =
- msm_audio_populate_upper_32_bits(
- rtac_cal[AFE_RTAC_CAL].cal_data.paddr);
- afe_set_apr_msg->mem_map_handle =
- rtac_cal[AFE_RTAC_CAL].map_data.map_handle;
-
- apr_msg_size = sizeof(struct apr_hdr) +
- sizeof(struct afe_port_cmd_set_param_v2);
+ if (user_afe_buf.cmd_size > MAX_PAYLOAD_SIZE) {
+ pr_err("%s: Invalid payload size = %d\n", __func__,
+ user_afe_buf.cmd_size);
+ result = -EINVAL;
+ goto err;
+ }
- } else {
- struct afe_port_cmd_get_param_v2 *afe_get_apr_msg;
+ /* Copy the command and param data in-band */
+ if (copy_from_user(afe_cmd,
+ (void __user *) buf +
+ offsetof(struct rtac_afe_user_data,
+ v2_get),
+ user_afe_buf.cmd_size)) {
+ pr_err("%s: Could not copy payload from user buffer\n",
+ __func__);
+ result = -EFAULT;
+ goto err;
+ }
+ break;
+ case AFE_PORT_CMD_GET_PARAM_V3:
+ apr_msg_size = sizeof(struct afe_port_cmd_get_param_v3);
if (user_afe_buf.cmd_size > MAX_PAYLOAD_SIZE) {
- pr_err("%s: Invalid payload size = %d\n",
- __func__, user_afe_buf.cmd_size);
+ pr_err("%s: Invalid payload size = %d\n", __func__,
+ user_afe_buf.cmd_size);
result = -EINVAL;
goto err;
}
- /* Copy buffer to in-band payload */
- afe_get_apr_msg = (struct afe_port_cmd_get_param_v2 *)
- ((u8 *) rtac_afe_buffer +
- sizeof(struct apr_hdr));
- if (copy_from_user((void *)afe_get_apr_msg,
- buf+offsetof(struct rtac_afe_user_data,
- rtac_afe_get.cmd),
- sizeof(struct afe_port_cmd_get_param_v2))) {
+ /* Copy the command and param data in-band */
+ if (copy_from_user(afe_cmd,
+ (void __user *) buf +
+ offsetof(struct rtac_afe_user_data,
+ v3_get),
+ user_afe_buf.cmd_size)) {
pr_err("%s: Could not copy payload from user buffer\n",
__func__);
- result = -EINVAL;
+ result = -EFAULT;
goto err;
}
-
- afe_get_apr_msg->payload_address_lsw =
- lower_32_bits(rtac_cal[AFE_RTAC_CAL].cal_data.paddr);
- afe_get_apr_msg->payload_address_msw =
- msm_audio_populate_upper_32_bits(
- rtac_cal[AFE_RTAC_CAL].cal_data.paddr);
- afe_get_apr_msg->mem_map_handle =
- rtac_cal[AFE_RTAC_CAL].map_data.map_handle;
- afe_get_apr_msg->payload_size -= sizeof(struct apr_hdr);
- apr_msg_size = sizeof(struct apr_hdr) +
- sizeof(struct afe_port_cmd_get_param_v2);
+ break;
+ default:
+ pr_err("%s: Invalid opcode %d\n", __func__, opcode);
+ result = -EINVAL;
+ goto err;
}
+ /*
+ * The memory header is in the same location in all commands. Therefore,
+ * it doesn't matter what command the buffer is cast into.
+ */
+ mem_hdr = &((struct afe_port_cmd_set_param_v3 *) rtac_afe_buffer)
+ ->mem_hdr;
+ mem_hdr->data_payload_addr_lsw =
+ lower_32_bits(rtac_cal[AFE_RTAC_CAL].cal_data.paddr);
+ mem_hdr->data_payload_addr_msw = msm_audio_populate_upper_32_bits(
+ rtac_cal[AFE_RTAC_CAL].cal_data.paddr);
+ mem_hdr->mem_map_handle = rtac_cal[AFE_RTAC_CAL].map_data.map_handle;
+
+ /* Fill the APR header at the end so we have the correct message size */
fill_afe_apr_hdr((struct apr_hdr *) rtac_afe_buffer,
port_index, opcode, apr_msg_size);
@@ -1391,40 +1457,44 @@ static int send_rtac_afe_apr(void *buf, uint32_t opcode)
}
if (opcode == AFE_PORT_CMD_GET_PARAM_V2) {
- struct afe_port_param_data_v2 *get_resp;
- get_resp = (struct afe_port_param_data_v2 *)
- rtac_cal[AFE_RTAC_CAL].cal_data.kvaddr;
-
- bytes_returned = get_resp->param_size +
- sizeof(struct afe_port_param_data_v2);
+ get_resp_v2 = (struct param_hdr_v1 *) rtac_cal[AFE_RTAC_CAL]
+ .cal_data.kvaddr;
+ bytes_returned =
+ get_resp_v2->param_size + sizeof(struct param_hdr_v1);
+ } else if (opcode == AFE_PORT_CMD_GET_PARAM_V3) {
+ get_resp_v3 = (struct param_hdr_v3 *) rtac_cal[AFE_RTAC_CAL]
+ .cal_data.kvaddr;
+ bytes_returned =
+ get_resp_v3->param_size + sizeof(struct param_hdr_v3);
+ } else {
+ bytes_returned = payload_size;
+ goto unlock;
+ }
- if (bytes_returned > rtac_cal[AFE_RTAC_CAL].
- map_data.map_size) {
- pr_err("%s: Invalid data size = %d\n",
- __func__, bytes_returned);
- result = -EINVAL;
- goto err;
- }
+ if (bytes_returned > rtac_cal[AFE_RTAC_CAL].map_data.map_size) {
+ pr_err("%s: Invalid data size = %d\n", __func__,
+ bytes_returned);
+ result = -EINVAL;
+ goto err;
+ }
- if (bytes_returned > user_afe_buf.buf_size) {
- pr_err("%s: user size = 0x%x, returned size = 0x%x\n",
- __func__, user_afe_buf.buf_size,
- bytes_returned);
- result = -EINVAL;
- goto err;
- }
+ if (bytes_returned > user_afe_buf.buf_size) {
+ pr_err("%s: user size = 0x%x, returned size = 0x%x\n", __func__,
+ user_afe_buf.buf_size, bytes_returned);
+ result = -EINVAL;
+ goto err;
+ }
- if (copy_to_user(buf, (void *)
- rtac_cal[AFE_RTAC_CAL].cal_data.kvaddr,
- bytes_returned)) {
- pr_err("%s: Could not copy buffer to user,size = %d\n",
- __func__, bytes_returned);
- result = -EINVAL;
- goto err;
- }
- } else {
- bytes_returned = user_afe_buf.rtac_afe_set.cmd.payload_size;
+ if (copy_to_user((void __user *) buf,
+ rtac_cal[AFE_RTAC_CAL].cal_data.kvaddr,
+ bytes_returned)) {
+ pr_err("%s: Could not copy buffer to user,size = %d\n",
+ __func__, bytes_returned);
+ result = -EFAULT;
+ goto err;
}
+
+unlock:
mutex_unlock(&rtac_afe_apr_mutex);
done:
return bytes_returned;
@@ -1526,7 +1596,9 @@ int send_voice_apr(u32 mode, void *buf, u32 opcode)
goto err;
}
- if (opcode == VSS_ICOMMON_CMD_SET_PARAM_V2) {
+ switch (opcode) {
+ case VSS_ICOMMON_CMD_SET_PARAM_V2:
+ case VSS_ICOMMON_CMD_SET_PARAM_V3:
/* set payload size to in-band payload */
/* set data size to actual out of band payload size */
data_size = payload_size - 4 * sizeof(u32);
@@ -1544,12 +1616,16 @@ int send_voice_apr(u32 mode, void *buf, u32 opcode)
buf + 7 * sizeof(u32), data_size)) {
pr_err("%s: Could not copy payload from user buffer\n",
__func__);
- result = -EINVAL;
+ result = -EFAULT;
goto err;
}
/* set payload size in packet */
rtac_voice_buffer[8] = data_size;
- } else {
+ /* set token for set param case */
+ voice_params.token = VOC_RTAC_SET_PARAM_TOKEN;
+ break;
+ case VSS_ICOMMON_CMD_GET_PARAM_V2:
+ case VSS_ICOMMON_CMD_GET_PARAM_V3:
if (payload_size > MAX_PAYLOAD_SIZE) {
pr_err("%s: Invalid payload size = %d\n",
__func__, payload_size);
@@ -1563,9 +1639,16 @@ int send_voice_apr(u32 mode, void *buf, u32 opcode)
buf + 3 * sizeof(u32), payload_size)) {
pr_err("%s: Could not copy payload from user buffer\n",
__func__);
- result = -EINVAL;
+ result = -EFAULT;
goto err;
}
+ /* set token for get param case */
+ voice_params.token = 0;
+ break;
+ default:
+ pr_err("%s: Invalid opcode %d\n", __func__, opcode);
+ result = -EINVAL;
+ goto err;
}
/* Pack header */
@@ -1579,18 +1662,14 @@ int send_voice_apr(u32 mode, void *buf, u32 opcode)
voice_params.dest_svc = 0;
voice_params.dest_domain = APR_DOMAIN_MODEM;
voice_params.dest_port = (u16)dest_port;
- voice_params.token = (opcode == VSS_ICOMMON_CMD_SET_PARAM_V2) ?
- VOC_RTAC_SET_PARAM_TOKEN :
- 0;
voice_params.opcode = opcode;
/* fill for out-of-band */
rtac_voice_buffer[5] = rtac_cal[VOICE_RTAC_CAL].map_data.map_handle;
rtac_voice_buffer[6] =
lower_32_bits(rtac_cal[VOICE_RTAC_CAL].cal_data.paddr);
- rtac_voice_buffer[7] =
- msm_audio_populate_upper_32_bits(
- rtac_cal[VOICE_RTAC_CAL].cal_data.paddr);
+ rtac_voice_buffer[7] = msm_audio_populate_upper_32_bits(
+ rtac_cal[VOICE_RTAC_CAL].cal_data.paddr);
memcpy(rtac_voice_buffer, &voice_params, sizeof(voice_params));
atomic_set(&rtac_voice_apr_data[mode].cmd_state, 1);
@@ -1629,33 +1708,39 @@ int send_voice_apr(u32 mode, void *buf, u32 opcode)
if (opcode == VSS_ICOMMON_CMD_GET_PARAM_V2) {
bytes_returned = ((u32 *)rtac_cal[VOICE_RTAC_CAL].cal_data.
kvaddr)[2] + 3 * sizeof(u32);
+ } else if (opcode == VSS_ICOMMON_CMD_GET_PARAM_V3) {
+ bytes_returned =
+ ((u32 *) rtac_cal[VOICE_RTAC_CAL].cal_data.kvaddr)[3] +
+ 4 * sizeof(u32);
+ } else {
+ bytes_returned = data_size;
+ goto unlock;
+ }
- if (bytes_returned > rtac_cal[VOICE_RTAC_CAL].
- map_data.map_size) {
- pr_err("%s: Invalid data size = %d\n",
- __func__, bytes_returned);
- result = -EINVAL;
- goto err;
- }
+ if (bytes_returned > rtac_cal[VOICE_RTAC_CAL].map_data.map_size) {
+ pr_err("%s: Invalid data size = %d\n", __func__,
+ bytes_returned);
+ result = -EINVAL;
+ goto err;
+ }
- if (bytes_returned > user_buf_size) {
- pr_err("%s: User buf not big enough, size = 0x%x, returned size = 0x%x\n",
- __func__, user_buf_size, bytes_returned);
- result = -EINVAL;
- goto err;
- }
+ if (bytes_returned > user_buf_size) {
+ pr_err("%s: User buf not big enough, size = 0x%x, returned size = 0x%x\n",
+ __func__, user_buf_size, bytes_returned);
+ result = -EINVAL;
+ goto err;
+ }
- if (copy_to_user(buf, (void *)
- rtac_cal[VOICE_RTAC_CAL].cal_data.kvaddr,
- bytes_returned)) {
- pr_err("%s: Could not copy buffer to user, size = %d\n",
- __func__, bytes_returned);
- result = -EINVAL;
- goto err;
- }
- } else {
- bytes_returned = data_size;
+ if (copy_to_user((void __user *) buf,
+ rtac_cal[VOICE_RTAC_CAL].cal_data.kvaddr,
+ bytes_returned)) {
+ pr_err("%s: Could not copy buffer to user, size = %d\n",
+ __func__, bytes_returned);
+ result = -EFAULT;
+ goto err;
}
+
+unlock:
mutex_unlock(&rtac_voice_apr_mutex);
done:
return bytes_returned;
@@ -1675,6 +1760,7 @@ void get_rtac_adm_data(struct rtac_adm *adm_data)
static long rtac_ioctl_shared(struct file *f,
unsigned int cmd, void *arg)
{
+ u32 opcode;
int result = 0;
if (!arg) {
pr_err("%s: No data sent to driver!\n", __func__);
@@ -1713,42 +1799,64 @@ static long rtac_ioctl_shared(struct file *f,
}
case AUDIO_GET_RTAC_ADM_CAL:
- result = send_adm_apr((void *)arg, ADM_CMD_GET_PP_PARAMS_V5);
+ opcode = q6common_is_instance_id_supported() ?
+ ADM_CMD_GET_PP_PARAMS_V6 :
+ ADM_CMD_GET_PP_PARAMS_V5;
+ result = send_adm_apr((void *) arg, opcode);
break;
case AUDIO_SET_RTAC_ADM_CAL:
- result = send_adm_apr((void *)arg, ADM_CMD_SET_PP_PARAMS_V5);
+ opcode = q6common_is_instance_id_supported() ?
+ ADM_CMD_SET_PP_PARAMS_V6 :
+ ADM_CMD_SET_PP_PARAMS_V5;
+ result = send_adm_apr((void *) arg, opcode);
break;
case AUDIO_GET_RTAC_ASM_CAL:
- result = send_rtac_asm_apr((void *)arg,
- ASM_STREAM_CMD_GET_PP_PARAMS_V2);
+ opcode = q6common_is_instance_id_supported() ?
+ ASM_STREAM_CMD_GET_PP_PARAMS_V3 :
+ ASM_STREAM_CMD_GET_PP_PARAMS_V2;
+ result = send_rtac_asm_apr((void *) arg, opcode);
break;
case AUDIO_SET_RTAC_ASM_CAL:
- result = send_rtac_asm_apr((void *)arg,
- ASM_STREAM_CMD_SET_PP_PARAMS_V2);
+ opcode = q6common_is_instance_id_supported() ?
+ ASM_STREAM_CMD_SET_PP_PARAMS_V3 :
+ ASM_STREAM_CMD_SET_PP_PARAMS_V2;
+ result = send_rtac_asm_apr((void *) arg, opcode);
break;
case AUDIO_GET_RTAC_CVS_CAL:
- result = send_voice_apr(RTAC_CVS, (void *) arg,
- VSS_ICOMMON_CMD_GET_PARAM_V2);
+ opcode = q6common_is_instance_id_supported() ?
+ VSS_ICOMMON_CMD_GET_PARAM_V3 :
+ VSS_ICOMMON_CMD_GET_PARAM_V2;
+ result = send_voice_apr(RTAC_CVS, (void *) arg, opcode);
break;
case AUDIO_SET_RTAC_CVS_CAL:
- result = send_voice_apr(RTAC_CVS, (void *) arg,
- VSS_ICOMMON_CMD_SET_PARAM_V2);
+ opcode = q6common_is_instance_id_supported() ?
+ VSS_ICOMMON_CMD_SET_PARAM_V3 :
+ VSS_ICOMMON_CMD_SET_PARAM_V2;
+ result = send_voice_apr(RTAC_CVS, (void *) arg, opcode);
break;
case AUDIO_GET_RTAC_CVP_CAL:
- result = send_voice_apr(RTAC_CVP, (void *) arg,
- VSS_ICOMMON_CMD_GET_PARAM_V2);
+ opcode = q6common_is_instance_id_supported() ?
+ VSS_ICOMMON_CMD_GET_PARAM_V3 :
+ VSS_ICOMMON_CMD_GET_PARAM_V2;
+ result = send_voice_apr(RTAC_CVP, (void *) arg, opcode);
break;
case AUDIO_SET_RTAC_CVP_CAL:
- result = send_voice_apr(RTAC_CVP, (void *) arg,
- VSS_ICOMMON_CMD_SET_PARAM_V2);
+ opcode = q6common_is_instance_id_supported() ?
+ VSS_ICOMMON_CMD_SET_PARAM_V3 :
+ VSS_ICOMMON_CMD_SET_PARAM_V2;
+ result = send_voice_apr(RTAC_CVP, (void *) arg, opcode);
break;
case AUDIO_GET_RTAC_AFE_CAL:
- result = send_rtac_afe_apr((void *)arg,
- AFE_PORT_CMD_GET_PARAM_V2);
+ opcode = q6common_is_instance_id_supported() ?
+ AFE_PORT_CMD_GET_PARAM_V3 :
+ AFE_PORT_CMD_GET_PARAM_V2;
+ result = send_rtac_afe_apr((void __user *) arg, opcode);
break;
case AUDIO_SET_RTAC_AFE_CAL:
- result = send_rtac_afe_apr((void *)arg,
- AFE_PORT_CMD_SET_PARAM_V2);
+ opcode = q6common_is_instance_id_supported() ?
+ AFE_PORT_CMD_SET_PARAM_V3 :
+ AFE_PORT_CMD_SET_PARAM_V2;
+ result = send_rtac_afe_apr((void __user *) arg, opcode);
break;
default:
pr_err("%s: Invalid IOCTL, command = %d!\n",
diff --git a/sound/soc/msm/sdm660-common.h b/sound/soc/msm/sdm660-common.h
index bca8cd788a39..549c3879d1af 100644
--- a/sound/soc/msm/sdm660-common.h
+++ b/sound/soc/msm/sdm660-common.h
@@ -17,6 +17,9 @@
#include <sound/q6afe-v2.h>
#include "../codecs/wcd-mbhc-v2.h"
+#define DEFAULT_MCLK_RATE 9600000
+#define NATIVE_MCLK_RATE 11289600
+
#define SAMPLING_RATE_8KHZ 8000
#define SAMPLING_RATE_11P025KHZ 11025
#define SAMPLING_RATE_16KHZ 16000
diff --git a/sound/soc/msm/sdm660-internal.c b/sound/soc/msm/sdm660-internal.c
index 948fb287023d..3d86abd6964f 100644
--- a/sound/soc/msm/sdm660-internal.c
+++ b/sound/soc/msm/sdm660-internal.c
@@ -24,9 +24,6 @@
#define __CHIPSET__ "SDM660 "
#define MSM_DAILINK_NAME(name) (__CHIPSET__#name)
-#define DEFAULT_MCLK_RATE 9600000
-#define NATIVE_MCLK_RATE 11289600
-
#define WCD_MBHC_DEF_RLOADS 5
#define WCN_CDC_SLIM_RX_CH_MAX 2
@@ -439,7 +436,7 @@ static int int_mi2s_ch_put(struct snd_kcontrol *kcontrol,
static const struct snd_soc_dapm_widget msm_int_dapm_widgets[] = {
SND_SOC_DAPM_SUPPLY_S("INT_MCLK0", -1, SND_SOC_NOPM, 0, 0,
- msm_int_mclk0_event, SND_SOC_DAPM_POST_PMD),
+ msm_int_mclk0_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_MIC("Handset Mic", NULL),
SND_SOC_DAPM_MIC("Headset Mic", NULL),
SND_SOC_DAPM_MIC("Secondary Mic", NULL),
@@ -730,6 +727,8 @@ static int msm_int_enable_dig_cdc_clk(struct snd_soc_codec *codec,
cancel_delayed_work_sync(&pdata->disable_int_mclk0_work);
mutex_lock(&pdata->cdc_int_mclk0_mutex);
if (atomic_read(&pdata->int_mclk0_enabled) == true) {
+ pdata->digital_cdc_core_clk.clk_freq_in_hz =
+ DEFAULT_MCLK_RATE;
pdata->digital_cdc_core_clk.enable = 0;
ret = afe_set_lpass_clock_v2(
AFE_PORT_ID_INT0_MI2S_RX,
@@ -738,6 +737,7 @@ static int msm_int_enable_dig_cdc_clk(struct snd_soc_codec *codec,
pr_err("%s: failed to disable CCLK\n",
__func__);
atomic_set(&pdata->int_mclk0_enabled, false);
+ atomic_set(&pdata->int_mclk0_rsc_ref, 0);
}
mutex_unlock(&pdata->cdc_int_mclk0_mutex);
}
@@ -959,6 +959,16 @@ static int msm_int_mclk0_event(struct snd_soc_dapm_widget *w,
pdata = snd_soc_card_get_drvdata(codec->component.card);
pr_debug("%s: event = %d\n", __func__, event);
switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ ret = msm_cdc_pinctrl_select_active_state(pdata->pdm_gpio_p);
+ if (ret < 0) {
+ pr_err("%s: gpio set cannot be activated %s\n",
+ __func__, "int_pdm");
+ return ret;
+ }
+ msm_int_enable_dig_cdc_clk(codec, 1, true);
+ msm_anlg_cdc_mclk_enable(codec, 1, true);
+ break;
case SND_SOC_DAPM_POST_PMD:
pr_debug("%s: mclk_res_ref = %d\n",
__func__, atomic_read(&pdata->int_mclk0_rsc_ref));
@@ -968,12 +978,10 @@ static int msm_int_mclk0_event(struct snd_soc_dapm_widget *w,
__func__, "int_pdm");
return ret;
}
- if (atomic_read(&pdata->int_mclk0_rsc_ref) == 0) {
- pr_debug("%s: disabling MCLK\n", __func__);
- /* disable the codec mclk config*/
- msm_anlg_cdc_mclk_enable(codec, 0, true);
- msm_int_enable_dig_cdc_clk(codec, 0, true);
- }
+ pr_debug("%s: disabling MCLK\n", __func__);
+ /* disable the codec mclk config*/
+ msm_anlg_cdc_mclk_enable(codec, 0, true);
+ msm_int_enable_dig_cdc_clk(codec, 0, true);
break;
default:
pr_err("%s: invalid DAPM event %d\n", __func__, event);
@@ -1158,19 +1166,6 @@ static int msm_int_mi2s_snd_startup(struct snd_pcm_substream *substream)
__func__, ret);
return ret;
}
- ret = msm_int_enable_dig_cdc_clk(codec, 1, true);
- if (ret < 0) {
- pr_err("failed to enable mclk\n");
- return ret;
- }
- /* Enable the codec mclk config */
- ret = msm_cdc_pinctrl_select_active_state(pdata->pdm_gpio_p);
- if (ret < 0) {
- pr_err("%s: gpio set cannot be activated %s\n",
- __func__, "int_pdm");
- return ret;
- }
- msm_anlg_cdc_mclk_enable(codec, 1, true);
ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_CBS_CFS);
if (ret < 0)
pr_err("%s: set fmt cpu dai failed; ret=%d\n", __func__, ret);
@@ -1181,9 +1176,6 @@ static int msm_int_mi2s_snd_startup(struct snd_pcm_substream *substream)
static void msm_int_mi2s_snd_shutdown(struct snd_pcm_substream *substream)
{
int ret;
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_card *card = rtd->card;
- struct msm_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
pr_debug("%s(): substream = %s stream = %d\n", __func__,
substream->name, substream->stream);
@@ -1192,12 +1184,6 @@ static void msm_int_mi2s_snd_shutdown(struct snd_pcm_substream *substream)
if (ret < 0)
pr_err("%s:clock disable failed; ret=%d\n", __func__,
ret);
- if (atomic_read(&pdata->int_mclk0_rsc_ref) > 0) {
- atomic_dec(&pdata->int_mclk0_rsc_ref);
- pr_debug("%s: decrementing mclk_res_ref %d\n",
- __func__,
- atomic_read(&pdata->int_mclk0_rsc_ref));
- }
}
static void *def_msm_int_wcd_mbhc_cal(void)
@@ -2975,6 +2961,8 @@ static void msm_disable_int_mclk0(struct work_struct *work)
&& atomic_read(&pdata->int_mclk0_rsc_ref) == 0) {
pr_debug("Disable the mclk\n");
pdata->digital_cdc_core_clk.enable = 0;
+ pdata->digital_cdc_core_clk.clk_freq_in_hz =
+ DEFAULT_MCLK_RATE;
ret = afe_set_lpass_clock_v2(
AFE_PORT_ID_INT0_MI2S_RX,
&pdata->digital_cdc_core_clk);
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
index 362446c36c9e..e00dfbec22c5 100644
--- a/sound/soc/sh/rcar/core.c
+++ b/sound/soc/sh/rcar/core.c
@@ -1049,10 +1049,8 @@ static int __rsnd_kctrl_new(struct rsnd_mod *mod,
return -ENOMEM;
ret = snd_ctl_add(card, kctrl);
- if (ret < 0) {
- snd_ctl_free_one(kctrl);
+ if (ret < 0)
return ret;
- }
cfg->update = update;
cfg->card = card;
diff --git a/sound/usb/clock.c b/sound/usb/clock.c
index 2cd09ceba5e9..2899797610e8 100644
--- a/sound/usb/clock.c
+++ b/sound/usb/clock.c
@@ -43,7 +43,7 @@ static struct uac_clock_source_descriptor *
while ((cs = snd_usb_find_csint_desc(ctrl_iface->extra,
ctrl_iface->extralen,
cs, UAC2_CLOCK_SOURCE))) {
- if (cs->bClockID == clock_id)
+ if (cs->bLength >= sizeof(*cs) && cs->bClockID == clock_id)
return cs;
}
@@ -59,8 +59,11 @@ static struct uac_clock_selector_descriptor *
while ((cs = snd_usb_find_csint_desc(ctrl_iface->extra,
ctrl_iface->extralen,
cs, UAC2_CLOCK_SELECTOR))) {
- if (cs->bClockID == clock_id)
+ if (cs->bLength >= sizeof(*cs) && cs->bClockID == clock_id) {
+ if (cs->bLength < 5 + cs->bNrInPins)
+ return NULL;
return cs;
+ }
}
return NULL;
@@ -75,7 +78,7 @@ static struct uac_clock_multiplier_descriptor *
while ((cs = snd_usb_find_csint_desc(ctrl_iface->extra,
ctrl_iface->extralen,
cs, UAC2_CLOCK_MULTIPLIER))) {
- if (cs->bClockID == clock_id)
+ if (cs->bLength >= sizeof(*cs) && cs->bClockID == clock_id)
return cs;
}
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 413824566102..9d864648c901 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -1541,6 +1541,12 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid,
__u8 *bmaControls;
if (state->mixer->protocol == UAC_VERSION_1) {
+ if (hdr->bLength < 7) {
+ usb_audio_err(state->chip,
+ "unit %u: invalid UAC_FEATURE_UNIT descriptor\n",
+ unitid);
+ return -EINVAL;
+ }
csize = hdr->bControlSize;
if (!csize) {
usb_audio_dbg(state->chip,
@@ -1558,6 +1564,12 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid,
}
} else if (state->mixer->protocol == UAC_VERSION_2) {
struct uac2_feature_unit_descriptor *ftr = _ftr;
+ if (hdr->bLength < 6) {
+ usb_audio_err(state->chip,
+ "unit %u: invalid UAC_FEATURE_UNIT descriptor\n",
+ unitid);
+ return -EINVAL;
+ }
csize = 4;
channels = (hdr->bLength - 6) / 4 - 1;
bmaControls = ftr->bmaControls;
@@ -2277,7 +2289,8 @@ static int parse_audio_selector_unit(struct mixer_build *state, int unitid,
const struct usbmix_name_map *map;
char **namelist;
- if (!desc->bNrInPins || desc->bLength < 5 + desc->bNrInPins) {
+ if (desc->bLength < 5 || !desc->bNrInPins ||
+ desc->bLength < 5 + desc->bNrInPins) {
usb_audio_err(state->chip,
"invalid SELECTOR UNIT descriptor %d\n", unitid);
return -EINVAL;
diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/Build b/tools/perf/scripts/perl/Perf-Trace-Util/Build
index 928e110179cb..34faecf774ae 100644
--- a/tools/perf/scripts/perl/Perf-Trace-Util/Build
+++ b/tools/perf/scripts/perl/Perf-Trace-Util/Build
@@ -1,3 +1,5 @@
libperf-y += Context.o
-CFLAGS_Context.o += $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-nested-externs -Wno-undef -Wno-switch-default
+CFLAGS_Context.o += $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes
+CFLAGS_Context.o += -Wno-unused-parameter -Wno-nested-externs -Wno-undef
+CFLAGS_Context.o += -Wno-switch-default -Wno-shadow
diff --git a/tools/perf/tests/attr.c b/tools/perf/tests/attr.c
index 638875a0960a..79547c225c14 100644
--- a/tools/perf/tests/attr.c
+++ b/tools/perf/tests/attr.c
@@ -150,7 +150,7 @@ static int run_dir(const char *d, const char *perf)
snprintf(cmd, 3*PATH_MAX, PYTHON " %s/attr.py -d %s/attr/ -p %s %.*s",
d, d, perf, vcnt, v);
- return system(cmd);
+ return system(cmd) ? TEST_FAIL : TEST_OK;
}
int test__attr(void)
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 881bbb5e7912..6e825dbaddea 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -292,10 +292,11 @@ __add_event(struct list_head *list, int *idx,
event_attr_init(attr);
- evsel = perf_evsel__new_idx(attr, (*idx)++);
+ evsel = perf_evsel__new_idx(attr, *idx);
if (!evsel)
return NULL;
+ (*idx)++;
evsel->cpus = cpu_map__get(cpus);
evsel->own_cpus = cpu_map__get(cpus);
diff --git a/tools/testing/selftests/firmware/fw_filesystem.sh b/tools/testing/selftests/firmware/fw_filesystem.sh
index c4366dc74e01..856a1f327b3f 100755
--- a/tools/testing/selftests/firmware/fw_filesystem.sh
+++ b/tools/testing/selftests/firmware/fw_filesystem.sh
@@ -48,8 +48,16 @@ echo "ABCD0123" >"$FW"
NAME=$(basename "$FW")
+if printf '\000' >"$DIR"/trigger_request 2> /dev/null; then
+ echo "$0: empty filename should not succeed" >&2
+ exit 1
+fi
+
# Request a firmware that doesn't exist, it should fail.
-echo -n "nope-$NAME" >"$DIR"/trigger_request
+if echo -n "nope-$NAME" >"$DIR"/trigger_request 2> /dev/null; then
+ echo "$0: firmware shouldn't have loaded" >&2
+ exit 1
+fi
if diff -q "$FW" /dev/test_firmware >/dev/null ; then
echo "$0: firmware was not expected to match" >&2
exit 1
diff --git a/tools/testing/selftests/firmware/fw_userhelper.sh b/tools/testing/selftests/firmware/fw_userhelper.sh
index b9983f8e09f6..01c626a1f226 100755
--- a/tools/testing/selftests/firmware/fw_userhelper.sh
+++ b/tools/testing/selftests/firmware/fw_userhelper.sh
@@ -64,9 +64,33 @@ trap "test_finish" EXIT
echo "ABCD0123" >"$FW"
NAME=$(basename "$FW")
+DEVPATH="$DIR"/"nope-$NAME"/loading
+
# Test failure when doing nothing (timeout works).
-echo 1 >/sys/class/firmware/timeout
-echo -n "$NAME" >"$DIR"/trigger_request
+echo -n 2 >/sys/class/firmware/timeout
+echo -n "nope-$NAME" >"$DIR"/trigger_request 2>/dev/null &
+
+# Give the kernel some time to load the loading file, must be less
+# than the timeout above.
+sleep 1
+if [ ! -f $DEVPATH ]; then
+ echo "$0: fallback mechanism immediately cancelled"
+ echo ""
+ echo "The file never appeared: $DEVPATH"
+ echo ""
+ echo "This might be a distribution udev rule setup by your distribution"
+ echo "to immediately cancel all fallback requests, this must be"
+ echo "removed before running these tests. To confirm look for"
+ echo "a firmware rule like /lib/udev/rules.d/50-firmware.rules"
+ echo "and see if you have something like this:"
+ echo ""
+ echo "SUBSYSTEM==\"firmware\", ACTION==\"add\", ATTR{loading}=\"-1\""
+ echo ""
+ echo "If you do remove this file or comment out this line before"
+ echo "proceeding with these tests."
+ exit 1
+fi
+
if diff -q "$FW" /dev/test_firmware >/dev/null ; then
echo "$0: firmware was not expected to match" >&2
exit 1
diff --git a/tools/testing/selftests/x86/ldt_gdt.c b/tools/testing/selftests/x86/ldt_gdt.c
index 923e59eb82c7..412b845412d2 100644
--- a/tools/testing/selftests/x86/ldt_gdt.c
+++ b/tools/testing/selftests/x86/ldt_gdt.c
@@ -351,9 +351,24 @@ static void do_simple_tests(void)
install_invalid(&desc, false);
desc.seg_not_present = 0;
- desc.read_exec_only = 0;
desc.seg_32bit = 1;
+ desc.read_exec_only = 0;
+ desc.limit = 0xfffff;
+
install_valid(&desc, AR_DPL3 | AR_TYPE_RWDATA | AR_S | AR_P | AR_DB);
+
+ desc.limit_in_pages = 1;
+
+ install_valid(&desc, AR_DPL3 | AR_TYPE_RWDATA | AR_S | AR_P | AR_DB | AR_G);
+ desc.read_exec_only = 1;
+ install_valid(&desc, AR_DPL3 | AR_TYPE_RODATA | AR_S | AR_P | AR_DB | AR_G);
+ desc.contents = 1;
+ desc.read_exec_only = 0;
+ install_valid(&desc, AR_DPL3 | AR_TYPE_RWDATA_EXPDOWN | AR_S | AR_P | AR_DB | AR_G);
+ desc.read_exec_only = 1;
+ install_valid(&desc, AR_DPL3 | AR_TYPE_RODATA_EXPDOWN | AR_S | AR_P | AR_DB | AR_G);
+
+ desc.limit = 0;
install_invalid(&desc, true);
}
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index a7b9022b5c8f..7f38db2a46c8 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -84,9 +84,6 @@ static void kvm_timer_inject_irq_work(struct work_struct *work)
struct kvm_vcpu *vcpu;
vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired);
- vcpu->arch.timer_cpu.armed = false;
-
- WARN_ON(!kvm_timer_should_fire(vcpu));
/*
* If the vcpu is blocked we want to wake it up so that it will see