summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/acpi/acpica/evxfevnt.c18
-rw-r--r--drivers/acpi/acpica/psobject.c14
-rw-r--r--drivers/acpi/numa.c10
-rw-r--r--drivers/acpi/pci_irq.c3
-rw-r--r--drivers/acpi/pmic/intel_pmic_xpower.c50
-rw-r--r--drivers/acpi/processor_driver.c10
-rw-r--r--drivers/acpi/processor_throttling.c62
-rw-r--r--drivers/acpi/sbshc.c4
-rw-r--r--drivers/acpi/video_detect.c9
-rw-r--r--drivers/amba/bus.c17
-rw-r--r--drivers/android/binder.c41
-rw-r--r--drivers/ata/ahci.c41
-rw-r--r--drivers/ata/libahci_platform.c5
-rw-r--r--drivers/ata/libata-core.c29
-rw-r--r--drivers/ata/libata-scsi.c4
-rw-r--r--drivers/atm/zatm.c3
-rw-r--r--drivers/base/regmap/regmap.c2
-rw-r--r--drivers/block/loop.c17
-rw-r--r--drivers/block/pktcdvd.c4
-rw-r--r--drivers/bluetooth/btsdio.c9
-rw-r--r--drivers/bluetooth/btusb.c19
-rw-r--r--drivers/bluetooth/hci_qca.c3
-rw-r--r--drivers/bus/brcmstb_gisb.c42
-rw-r--r--drivers/cdrom/cdrom.c2
-rw-r--r--drivers/char/adsprpc.c33
-rw-r--r--drivers/char/adsprpc_compat.c109
-rw-r--r--drivers/char/adsprpc_shared.h18
-rw-r--r--drivers/char/agp/intel-gtt.c2
-rw-r--r--drivers/char/diag/diag_dci.c47
-rw-r--r--drivers/char/diag/diag_memorydevice.c65
-rw-r--r--drivers/char/diag/diag_memorydevice.h5
-rw-r--r--drivers/char/diag/diag_mux.c4
-rw-r--r--drivers/char/diag/diagchar.h5
-rw-r--r--drivers/char/diag/diagchar_core.c109
-rw-r--r--drivers/char/diag/diagfwd.c151
-rw-r--r--drivers/char/diag/diagfwd_peripheral.c22
-rw-r--r--drivers/char/hw_random/exynos-rng.c10
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c2
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c8
-rw-r--r--drivers/char/random.c12
-rw-r--r--drivers/char/tpm/st33zp24/st33zp24.c4
-rw-r--r--drivers/char/tpm/tpm-interface.c5
-rw-r--r--drivers/char/tpm/tpm2-cmd.c6
-rw-r--r--drivers/char/tpm/tpm_i2c_infineon.c5
-rw-r--r--drivers/char/tpm/tpm_i2c_nuvoton.c8
-rw-r--r--drivers/char/tpm/tpm_tis.c5
-rw-r--r--drivers/char/virtio_console.c49
-rw-r--r--drivers/clk/bcm/clk-bcm2835.c12
-rw-r--r--drivers/clk/bcm/clk-ns2.c2
-rw-r--r--drivers/clk/clk-conf.c2
-rw-r--r--drivers/clk/clk-scpi.c6
-rw-r--r--drivers/clk/clk-si5351.c2
-rw-r--r--drivers/clk/msm/clock-dummy.c55
-rw-r--r--drivers/clk/msm/clock-osm.c4
-rw-r--r--drivers/clk/msm/mdss/mdss-dsi-pll-8996-util.c6
-rw-r--r--drivers/clk/msm/virtclk-front-8996.c31
-rw-r--r--drivers/clk/msm/virtclk-front.c29
-rw-r--r--drivers/clk/msm/virtclk-front.h2
-rw-r--r--drivers/clk/mvebu/armada-38x.c15
-rw-r--r--drivers/clk/qcom/clk-cpu-osm.c4
-rw-r--r--drivers/clk/qcom/clk-rcg2.c20
-rw-r--r--drivers/clk/qcom/gcc-msm8916.c1
-rw-r--r--drivers/clk/qcom/gpucc-sdm660.c3
-rw-r--r--drivers/clk/qcom/mdss/mdss-dsi-pll-14nm-util.c6
-rw-r--r--drivers/clk/qcom/mmcc-sdm660.c6
-rw-r--r--drivers/cpufreq/Kconfig9
-rw-r--r--drivers/cpufreq/Makefile5
-rw-r--r--drivers/cpufreq/cpufreq.c3
-rw-r--r--drivers/cpufreq/cpufreq_times.c461
-rw-r--r--drivers/cpufreq/intel_pstate.c34
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c2
-rw-r--r--drivers/cpufreq/s3c24xx-cpufreq.c8
-rw-r--r--drivers/cpufreq/sh-cpufreq.c45
-rw-r--r--drivers/cpuidle/coupled.c1
-rw-r--r--drivers/cpuidle/cpuidle-powernv.c2
-rw-r--r--drivers/cpuidle/dt_idle_states.c4
-rw-r--r--drivers/cpuidle/lpm-levels-of.c31
-rw-r--r--drivers/cpuidle/lpm-levels.c3
-rw-r--r--drivers/crypto/caam/ctrl.c8
-rw-r--r--drivers/crypto/msm/qcedev.c22
-rw-r--r--drivers/crypto/msm/qcrypto.c4
-rw-r--r--drivers/crypto/s5p-sss.c13
-rw-r--r--drivers/devfreq/devfreq.c2
-rw-r--r--drivers/dma/at_hdmac.c4
-rw-r--r--drivers/dma/at_xdmac.c4
-rw-r--r--drivers/dma/dma-jz4740.c4
-rw-r--r--drivers/dma/dmatest.c2
-rw-r--r--drivers/dma/imx-sdma.c40
-rw-r--r--drivers/dma/ioat/init.c2
-rw-r--r--drivers/dma/ti-dma-crossbar.c10
-rw-r--r--drivers/dma/zx296702_dma.c2
-rw-r--r--drivers/edac/mv64x60_edac.c2
-rw-r--r--drivers/edac/octeon_edac-lmc.c1
-rw-r--r--drivers/esoc/esoc-mdm-4x.c11
-rw-r--r--drivers/esoc/esoc-mdm-pon.c4
-rw-r--r--drivers/gpio/gpio-intel-mid.c2
-rw-r--r--drivers/gpio/gpio-rcar.c46
-rw-r--r--drivers/gpio/gpio-xgene.c13
-rw-r--r--drivers/gpio/gpiolib.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c89
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c30
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c10
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c25
-rw-r--r--drivers/gpu/drm/drm_edid.c12
-rw-r--r--drivers/gpu/drm/drm_irq.c14
-rw-r--r--drivers/gpu/drm/drm_modeset_lock.c2
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c20
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_dpi.c10
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_output.c12
-rw-r--r--drivers/gpu/drm/msm-hyp/msm_drv_hyp.c26
-rw-r--r--drivers/gpu/drm/msm/dba_bridge.c18
-rw-r--r--drivers/gpu/drm/msm/dba_bridge.h3
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c46
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h5
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_1_4.c4
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_display.c139
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_display.h15
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_drm.c4
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_phy.c12
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_phy.h9
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.c25
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c6
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c16
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c99
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h8
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c100
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h3
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c18
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c7
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c38
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h6
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c73
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c4
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c3
-rw-r--r--drivers/gpu/drm/msm/msm_mmu.h4
-rw-r--r--drivers/gpu/drm/msm/msm_prop.c9
-rw-r--r--drivers/gpu/drm/msm/msm_smmu.c70
-rw-r--r--drivers/gpu/drm/msm/sde/sde_connector.c32
-rw-r--r--drivers/gpu/drm/msm/sde/sde_connector.h6
-rw-r--r--drivers/gpu/drm/msm/sde/sde_core_irq.c18
-rw-r--r--drivers/gpu/drm/msm/sde/sde_crtc.c126
-rw-r--r--drivers/gpu/drm/msm/sde/sde_encoder.c20
-rw-r--r--drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c18
-rw-r--r--drivers/gpu/drm/msm/sde/sde_formats.c179
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_catalog.c4
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_cdm.c1
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_ctl.c100
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_ctl.h29
-rw-r--r--drivers/gpu/drm/msm/sde/sde_irq.c26
-rw-r--r--drivers/gpu/drm/msm/sde/sde_kms.c58
-rw-r--r--drivers/gpu/drm/msm/sde/sde_plane.c114
-rw-r--r--drivers/gpu/drm/msm/sde/sde_plane.h6
-rw-r--r--drivers/gpu/drm/msm/sde/sde_rm.c248
-rw-r--r--drivers/gpu/drm/msm/sde/sde_rm.h12
-rw-r--r--drivers/gpu/drm/msm/sde/sde_splash.c417
-rw-r--r--drivers/gpu/drm/msm/sde/sde_splash.h89
-rw-r--r--drivers/gpu/drm/msm/sde/sde_trace.h18
-rw-r--r--drivers/gpu/drm/msm/sde_dbg.c200
-rw-r--r--drivers/gpu/drm/msm/sde_dbg.h35
-rw-r--r--drivers/gpu/drm/msm/sde_dbg_evtlog.c37
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c18
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c7
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c5
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_fb.c9
-rw-r--r--drivers/gpu/drm/radeon/cik.c31
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c105
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c2
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c4
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c56
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.h8
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c2
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c9
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c16
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c29
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c5
-rw-r--r--drivers/gpu/msm/adreno-gpulist.h18
-rw-r--r--drivers/gpu/msm/adreno.c2
-rw-r--r--drivers/gpu/msm/adreno.h4
-rw-r--r--drivers/gpu/msm/adreno_a5xx.c40
-rw-r--r--drivers/gpu/msm/adreno_a5xx_snapshot.c119
-rw-r--r--drivers/gpu/msm/adreno_dispatch.c12
-rw-r--r--drivers/gpu/msm/adreno_dispatch.h5
-rw-r--r--drivers/gpu/msm/kgsl.c28
-rw-r--r--drivers/gpu/msm/kgsl.h5
-rw-r--r--drivers/gpu/msm/kgsl_device.h6
-rw-r--r--drivers/gpu/msm/kgsl_pwrctrl.c6
-rw-r--r--drivers/hid/hid-core.c25
-rw-r--r--drivers/hid/hid-elo.c6
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/hid/hid-input.c23
-rw-r--r--drivers/hid/hid-multitouch.c5
-rw-r--r--drivers/hid/hid-rmi.c4
-rw-r--r--drivers/hid/hidraw.c5
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c13
-rw-r--r--drivers/hsi/clients/ssi_protocol.c5
-rw-r--r--drivers/hv/hv.c5
-rw-r--r--drivers/hwmon/ina2xx.c90
-rw-r--r--drivers/hwmon/pmbus/adm1275.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-tpiu.c13
-rw-r--r--drivers/hwtracing/coresight/of_coresight.c2
-rw-r--r--drivers/i2c/busses/i2c-msm-v2.c124
-rw-r--r--drivers/i2c/busses/i2c-scmi.c4
-rw-r--r--drivers/i2c/i2c-boardinfo.c4
-rw-r--r--drivers/idle/Kconfig1
-rw-r--r--drivers/iio/accel/st_accel_core.c9
-rw-r--r--drivers/iio/adc/axp288_adc.c2
-rw-r--r--drivers/iio/adc/hi8435.c27
-rw-r--r--drivers/iio/imu/adis_trigger.c7
-rw-r--r--drivers/iio/imu/inv_mpu/Kconfig63
-rw-r--r--drivers/iio/imu/inv_mpu/Makefile61
-rw-r--r--drivers/iio/imu/inv_mpu/README117
-rw-r--r--drivers/iio/imu/inv_mpu/iam20680/inv_mpu_core_20680.c1072
-rw-r--r--drivers/iio/imu/inv_mpu/iam20680/inv_mpu_iio_reg_20680.h236
-rw-r--r--drivers/iio/imu/inv_mpu/iam20680/inv_mpu_init_20680.c258
-rw-r--r--drivers/iio/imu/inv_mpu/iam20680/inv_mpu_parsing_20680.c421
-rw-r--r--drivers/iio/imu/inv_mpu/iam20680/inv_mpu_selftest_20680.c752
-rw-r--r--drivers/iio/imu/inv_mpu/iam20680/inv_mpu_setup_20680.c466
-rw-r--r--drivers/iio/imu/inv_mpu/inv_mpu_common.c988
-rw-r--r--drivers/iio/imu/inv_mpu/inv_mpu_dts.c343
-rw-r--r--drivers/iio/imu/inv_mpu/inv_mpu_dts.h25
-rw-r--r--drivers/iio/imu/inv_mpu/inv_mpu_i2c.c556
-rw-r--r--drivers/iio/imu/inv_mpu/inv_mpu_iio.h1138
-rw-r--r--drivers/iio/imu/inv_mpu/inv_mpu_ring.c643
-rw-r--r--drivers/iio/imu/inv_mpu/inv_mpu_spi.c410
-rw-r--r--drivers/iio/imu/inv_mpu/inv_mpu_timestamp.c280
-rw-r--r--drivers/iio/imu/inv_mpu/inv_test/Kconfig13
-rw-r--r--drivers/iio/imu/inv_mpu/inv_test/Makefile6
-rw-r--r--drivers/iio/imu/inv_mpu/inv_test/inv_counters.c159
-rw-r--r--drivers/iio/imu/inv_mpu/inv_test/inv_counters.h76
-rw-r--r--drivers/iio/industrialio-buffer.c2
-rw-r--r--drivers/iio/magnetometer/st_magn_spi.c2
-rw-r--r--drivers/iio/pressure/st_pressure_core.c10
-rw-r--r--drivers/infiniband/core/addr.c16
-rw-r--r--drivers/infiniband/core/cma.c6
-rw-r--r--drivers/infiniband/core/iwpm_util.c1
-rw-r--r--drivers/infiniband/core/ucma.c76
-rw-r--r--drivers/infiniband/core/umem.c2
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c13
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c5
-rw-r--r--drivers/infiniband/hw/mlx4/main.c19
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c2
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c7
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c22
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c13
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c41
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c16
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c18
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c6
-rw-r--r--drivers/input/input-leds.c8
-rw-r--r--drivers/input/keyboard/Kconfig10
-rw-r--r--drivers/input/keyboard/Makefile1
-rw-r--r--drivers/input/keyboard/goldfish_rotary.c200
-rw-r--r--drivers/input/keyboard/matrix_keypad.c4
-rw-r--r--drivers/input/keyboard/qt1070.c9
-rw-r--r--drivers/input/keyboard/tca8418_keypad.c23
-rw-r--r--drivers/input/misc/drv260x.c2
-rw-r--r--drivers/input/misc/hbtp_input.c36
-rw-r--r--drivers/input/misc/keychord.c2
-rw-r--r--drivers/input/misc/twl4030-pwrbutton.c2
-rw-r--r--drivers/input/mouse/elan_i2c_core.c7
-rw-r--r--drivers/input/mouse/elan_i2c_i2c.c9
-rw-r--r--drivers/input/mouse/elantech.c11
-rw-r--r--drivers/input/mousedev.c62
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h24
-rw-r--r--drivers/input/touchscreen/ar1021_i2c.c2
-rw-r--r--drivers/input/touchscreen/atmel_maxtouch_ts.c41
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c9
-rw-r--r--drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.c18
-rw-r--r--drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_fw_update.c4
-rw-r--r--drivers/input/touchscreen/tsc2007.c8
-rw-r--r--drivers/iommu/arm-smmu.c11
-rw-r--r--drivers/iommu/intel-svm.c10
-rw-r--r--drivers/iommu/iova.c2
-rw-r--r--drivers/iommu/omap-iommu.c21
-rw-r--r--drivers/irqchip/Kconfig8
-rw-r--r--drivers/irqchip/Makefile1
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c9
-rw-r--r--drivers/irqchip/irq-gic-v3.c2
-rw-r--r--drivers/irqchip/irq-gic.c123
-rw-r--r--drivers/irqchip/irq-goldfish-pic.c136
-rw-r--r--drivers/irqchip/irq-mips-gic.c22
-rw-r--r--drivers/isdn/hardware/eicon/message.c16
-rw-r--r--drivers/isdn/icn/icn.c2
-rw-r--r--drivers/isdn/mISDN/stack.c2
-rw-r--r--drivers/isdn/sc/init.c7
-rw-r--r--drivers/leds/led-triggers.c12
-rw-r--r--drivers/leds/leds-pca955x.c2
-rw-r--r--drivers/leds/leds-qpnp-flash-v2.c62
-rw-r--r--drivers/md/bcache/alloc.c19
-rw-r--r--drivers/md/bcache/super.c17
-rw-r--r--drivers/md/dm-bufio.c5
-rw-r--r--drivers/md/dm-io.c1
-rw-r--r--drivers/md/dm-ioctl.c4
-rw-r--r--drivers/md/dm-verity-target.c65
-rw-r--r--drivers/md/dm-verity.h1
-rw-r--r--drivers/md/dm.c3
-rw-r--r--drivers/md/md-cluster.c4
-rw-r--r--drivers/md/md.c10
-rw-r--r--drivers/md/raid10.c7
-rw-r--r--drivers/md/raid5.c30
-rw-r--r--drivers/media/common/b2c2/flexcop-fe-tuner.c4
-rw-r--r--drivers/media/dvb-core/dvb_ca_en50221.c23
-rw-r--r--drivers/media/dvb-frontends/m88ds3103.c7
-rw-r--r--drivers/media/dvb-frontends/si2168.c3
-rw-r--r--drivers/media/dvb-frontends/ts2020.c4
-rw-r--r--drivers/media/i2c/adv7481.c266
-rw-r--r--drivers/media/i2c/adv7481_reg.h13
-rw-r--r--drivers/media/i2c/cx25840/cx25840-core.c36
-rw-r--r--drivers/media/i2c/s5k6aa.c5
-rw-r--r--drivers/media/i2c/soc_camera/ov6650.c2
-rw-r--r--drivers/media/i2c/tc358743.c46
-rw-r--r--drivers/media/pci/bt8xx/bt878.c3
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-v4l2.c11
-rw-r--r--drivers/media/platform/msm/ais/Makefile1
-rw-r--r--drivers/media/platform/msm/ais/camera/camera.c4
-rw-r--r--drivers/media/platform/msm/ais/common/Makefile2
-rw-r--r--drivers/media/platform/msm/ais/common/cam_hw_ops.c5
-rw-r--r--drivers/media/platform/msm/ais/common/cam_hw_ops.h13
-rw-r--r--drivers/media/platform/msm/ais/common/cam_soc_api.c7
-rw-r--r--drivers/media/platform/msm/ais/common/msm_camera_diag_util.c364
-rw-r--r--drivers/media/platform/msm/ais/common/msm_camera_diag_util.h47
-rw-r--r--drivers/media/platform/msm/ais/common/msm_camera_io_util.c5
-rw-r--r--drivers/media/platform/msm/ais/isp/msm_buf_mgr.c48
-rw-r--r--drivers/media/platform/msm/ais/isp/msm_buf_mgr.h15
-rw-r--r--drivers/media/platform/msm/ais/isp/msm_isp.h1
-rw-r--r--drivers/media/platform/msm/ais/isp/msm_isp47.c33
-rw-r--r--drivers/media/platform/msm/ais/isp/msm_isp_axi_util.c52
-rw-r--r--drivers/media/platform/msm/ais/isp/msm_isp_axi_util.h1
-rw-r--r--drivers/media/platform/msm/ais/isp/msm_isp_stats_util.c12
-rw-r--r--drivers/media/platform/msm/ais/isp/msm_isp_util.c73
-rw-r--r--drivers/media/platform/msm/ais/ispif/msm_ispif.c17
-rw-r--r--drivers/media/platform/msm/ais/jpeg_dma/msm_jpeg_dma_dev.c13
-rw-r--r--drivers/media/platform/msm/ais/msm.c10
-rw-r--r--drivers/media/platform/msm/ais/msm_ais_diag/Makefile4
-rw-r--r--drivers/media/platform/msm/ais/msm_ais_diag/msm_diag_cam.c267
-rw-r--r--drivers/media/platform/msm/ais/msm_ais_diag/msm_diag_cam.h57
-rw-r--r--drivers/media/platform/msm/ais/msm_ais_mgr/Makefile1
-rw-r--r--drivers/media/platform/msm/ais/msm_ais_mgr/msm_ais_mgr.c57
-rw-r--r--drivers/media/platform/msm/ais/sensor/cci/msm_early_cam.c63
-rw-r--r--drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_5_hwreg.h9
-rw-r--r--drivers/media/platform/msm/ais/sensor/csid/msm_csid.c194
-rw-r--r--drivers/media/platform/msm/ais/sensor/csiphy/msm_csiphy.c17
-rw-r--r--drivers/media/platform/msm/ais/sensor/msm_sensor.c115
-rw-r--r--drivers/media/platform/msm/ais/sensor/msm_sensor.h4
-rw-r--r--drivers/media/platform/msm/ais/sensor/msm_sensor_driver.c12
-rw-r--r--drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.c4
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c3
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp.h5
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp40.c6
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp44.c3
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp46.c3
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp47.c15
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c33
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.h4
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c17
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.h4
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c25
-rw-r--r--drivers/media/platform/msm/camera_v2/msm_buf_mgr/msm_generic_buf_mgr.c37
-rw-r--r--drivers/media/platform/msm/camera_v2/msm_sd.h5
-rw-r--r--drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.c64
-rw-r--r--drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c5
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_3_5_hwreg.h5
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c26
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_core.c2
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c7
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc.c2
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc_common.c11
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc_common.h2
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc_internal.h2
-rw-r--r--drivers/media/platform/soc_camera/soc_scale_crop.c4
-rw-r--r--drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c4
-rw-r--r--drivers/media/rc/mceusb.c9
-rw-r--r--drivers/media/tuners/r820t.c13
-rw-r--r--drivers/media/usb/cpia2/cpia2_v4l.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/lmedm04.c39
-rw-r--r--drivers/media/usb/dvb-usb/cxusb.c2
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_devices.c1
-rw-r--r--drivers/media/usb/em28xx/Kconfig2
-rw-r--r--drivers/media/usb/go7007/Kconfig2
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-core.c2
-rw-r--r--drivers/media/usb/pwc/pwc-if.c2
-rw-r--r--drivers/media/usb/usbtv/usbtv-core.c2
-rw-r--r--drivers/media/v4l2-core/Kconfig1
-rw-r--r--drivers/media/v4l2-core/v4l2-compat-ioctl32.c1038
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c5
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c4
-rw-r--r--drivers/media/v4l2-core/videobuf2-v4l2.c29
-rw-r--r--drivers/message/fusion/mptbase.c2
-rw-r--r--drivers/message/fusion/mptsas.c1
-rw-r--r--drivers/mfd/palmas.c14
-rw-r--r--drivers/misc/enclosure.c7
-rw-r--r--drivers/misc/mei/main.c1
-rw-r--r--drivers/misc/profiler.c3
-rw-r--r--drivers/misc/qcom/qdsp6v2/audio_multi_aac.c4
-rw-r--r--drivers/misc/qseecom.c189
-rw-r--r--drivers/misc/uid_sys_stats.c11
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.c10
-rw-r--r--drivers/mmc/card/block.c47
-rw-r--r--drivers/mmc/card/queue.c5
-rw-r--r--drivers/mmc/core/bus.c3
-rw-r--r--drivers/mmc/core/core.c64
-rw-r--r--drivers/mmc/core/host.c5
-rw-r--r--drivers/mmc/core/mmc.c2
-rw-r--r--drivers/mmc/core/sd.c20
-rw-r--r--drivers/mmc/host/dw_mmc.c6
-rw-r--r--drivers/mmc/host/jz4740_mmc.c2
-rw-r--r--drivers/mmc/host/omap_hsmmc.c4
-rw-r--r--drivers/mmc/host/sdhci-msm.c43
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c14
-rw-r--r--drivers/mtd/chips/Kconfig4
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c33
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c9
-rw-r--r--drivers/mtd/chips/jedec_probe.c2
-rw-r--r--drivers/mtd/maps/ck804xrom.c4
-rw-r--r--drivers/mtd/maps/esb2rom.c4
-rw-r--r--drivers/mtd/maps/ichxrom.c10
-rw-r--r--drivers/mtd/nand/brcmnand/brcmnand.c13
-rw-r--r--drivers/mtd/nand/fsl_ifc_nand.c5
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c84
-rw-r--r--drivers/mtd/nand/nand_base.c14
-rw-r--r--drivers/mtd/nand/sh_flctl.c5
-rw-r--r--drivers/mtd/nand/sunxi_nand.c8
-rw-r--r--drivers/mtd/ubi/block.c44
-rw-r--r--drivers/mtd/ubi/build.c11
-rw-r--r--drivers/mtd/ubi/fastmap-wl.c1
-rw-r--r--drivers/mtd/ubi/vmt.c15
-rw-r--r--drivers/net/Kconfig3
-rw-r--r--drivers/net/bonding/bond_alb.c2
-rw-r--r--drivers/net/bonding/bond_main.c98
-rw-r--r--drivers/net/can/cc770/cc770.c100
-rw-r--r--drivers/net/can/cc770/cc770.h2
-rw-r--r--drivers/net/can/flexcan.c2
-rw-r--r--drivers/net/can/spi/Kconfig6
-rw-r--r--drivers/net/can/spi/Makefile1
-rw-r--r--drivers/net/can/spi/k61.c3
-rw-r--r--drivers/net/can/spi/qti-can.c1454
-rw-r--r--drivers/net/can/spi/rh850.c9
-rw-r--r--drivers/net/can/usb/kvaser_usb.c2
-rw-r--r--drivers/net/ethernet/3com/3c509.c2
-rw-r--r--drivers/net/ethernet/3com/3c59x.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c4
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c1
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.h1
-rw-r--r--drivers/net/ethernet/arc/emac_main.c53
-rw-r--r--drivers/net/ethernet/arc/emac_rockchip.c6
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c33
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c24
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c14
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c2
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c22
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h4
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c32
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c23
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c2
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c1
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c2
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c9
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c6
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c4
-rw-r--r--drivers/net/ethernet/hp/hp100.c20
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c26
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000.h3
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c27
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c23
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c12
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c11
-rw-r--r--drivers/net/ethernet/marvell/sky2.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c14
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c18
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_dbg.c4
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c10
-rw-r--r--drivers/net/ethernet/realtek/8139too.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c11
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c2
-rw-r--r--drivers/net/ethernet/sun/niu.c5
-rw-r--r--drivers/net/ethernet/ti/cpsw.c19
-rw-r--r--drivers/net/ethernet/ti/tlan.c2
-rw-r--r--drivers/net/hamradio/hdlcdrv.c2
-rw-r--r--drivers/net/hippi/rrunner.c2
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c5
-rw-r--r--drivers/net/phy/mdio-sun4i.c6
-rw-r--r--drivers/net/phy/phy.c6
-rw-r--r--drivers/net/ppp/ppp_generic.c9
-rw-r--r--drivers/net/ppp/pppoe.c4
-rw-r--r--drivers/net/ppp/pptp.c1
-rw-r--r--drivers/net/slip/slhc.c5
-rw-r--r--drivers/net/team/team.c42
-rw-r--r--drivers/net/usb/Kconfig10
-rw-r--r--drivers/net/usb/Makefile2
-rw-r--r--drivers/net/usb/cdc_ether.c16
-rw-r--r--drivers/net/usb/cdc_ncm.c17
-rw-r--r--drivers/net/usb/lan78xx.c3
-rw-r--r--drivers/net/usb/qmi_wwan.c13
-rw-r--r--drivers/net/usb/r8152.c2
-rw-r--r--drivers/net/veth.c3
-rw-r--r--drivers/net/virtio_net.c16
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c5
-rw-r--r--drivers/net/vrf.c8
-rw-r--r--drivers/net/vxlan.c7
-rw-r--r--drivers/net/wan/hdlc_ppp.c5
-rw-r--r--drivers/net/wan/pc300too.c1
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c9
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c107
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c48
-rw-r--r--drivers/net/wireless/ath/ath10k/rx_desc.h3
-rw-r--r--drivers/net/wireless/ath/ath10k/testmode.c14
-rw-r--r--drivers/net/wireless/ath/ath10k/testmode_i.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-ops.h19
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c65
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h17
-rw-r--r--drivers/net/wireless/ath/ath10k/wow.c163
-rw-r--r--drivers/net/wireless/ath/ath10k/wow.h7
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c4
-rw-r--r--drivers/net/wireless/ath/regd.c19
-rw-r--r--drivers/net/wireless/ath/wcn36xx/txrx.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/p2p.c24
-rw-r--r--drivers/net/wireless/cnss/cnss_pci.c25
-rw-r--r--drivers/net/wireless/cnss2/Kconfig4
-rw-r--r--drivers/net/wireless/cnss2/main.c25
-rw-r--r--drivers/net/wireless/cnss2/main.h20
-rw-r--r--drivers/net/wireless/cnss2/pci.c197
-rw-r--r--drivers/net/wireless/cnss2/pci.h15
-rw-r--r--drivers/net/wireless/cnss2/power.c6
-rw-r--r--drivers/net/wireless/cnss2/qmi.c129
-rw-r--r--drivers/net/wireless/cnss2/wlan_firmware_service_v01.c544
-rw-r--r--drivers/net/wireless/cnss2/wlan_firmware_service_v01.h107
-rw-r--r--drivers/net/wireless/cw1200/cw1200_spi.c9
-rw-r--r--drivers/net/wireless/cw1200/pm.h9
-rw-r--r--drivers/net/wireless/cw1200/wsm.c8
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c17
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/mcu.c10
-rw-r--r--drivers/net/wireless/ray_cs.c7
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c7
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h1
-rw-r--r--drivers/net/wireless/rndis_wlan.c4
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c3
-rw-r--r--drivers/net/wireless/wcnss/wcnss_vreg.c51
-rw-r--r--drivers/net/wireless/wcnss/wcnss_wlan.c8
-rw-r--r--drivers/net/xen-netfront.c8
-rw-r--r--drivers/nfc/nfcmrvl/fw_dnld.c2
-rw-r--r--drivers/nfc/nfcmrvl/spi.c5
-rw-r--r--drivers/nvme/host/pci.c13
-rw-r--r--drivers/of/device.c2
-rw-r--r--drivers/parport/parport_pc.c4
-rw-r--r--drivers/pci/host/pci-keystone.c9
-rw-r--r--drivers/pci/host/pci-msm.c22
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c23
-rw-r--r--drivers/pci/probe.c2
-rw-r--r--drivers/pci/quirks.c2
-rw-r--r--drivers/pci/setup-res.c2
-rw-r--r--drivers/perf/arm_pmu.c12
-rw-r--r--drivers/pinctrl/core.c24
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c6
-rw-r--r--drivers/platform/chrome/cros_ec_proto.c8
-rw-r--r--drivers/platform/chrome/cros_ec_sysfs.c2
-rw-r--r--drivers/platform/goldfish/goldfish_pipe.c72
-rw-r--r--drivers/platform/goldfish/goldfish_pipe.h8
-rw-r--r--drivers/platform/goldfish/goldfish_pipe_v2.c654
-rw-r--r--drivers/platform/goldfish/pdev_bus.c8
-rw-r--r--drivers/platform/msm/gsi/gsi.c17
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c58
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_dp.c18
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_nat.c22
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_rt.c65
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c4
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c68
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_flt.c8
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_nat.c8
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_rt.c64
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c4
-rw-r--r--drivers/platform/x86/Kconfig2
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c9
-rw-r--r--drivers/platform/x86/intel_mid_thermal.c2
-rw-r--r--drivers/platform/x86/tc1100-wmi.c2
-rw-r--r--drivers/power/Kconfig1
-rw-r--r--drivers/power/bq27xxx_battery.c6
-rw-r--r--drivers/power/pda_power.c49
-rw-r--r--drivers/power/power_supply_sysfs.c1
-rw-r--r--drivers/power/qcom/lpm-stats.c3
-rw-r--r--drivers/power/supply/qcom/battery.c408
-rw-r--r--drivers/power/supply/qcom/qpnp-fg-gen3.c14
-rw-r--r--drivers/power/supply/qcom/qpnp-smb2.c13
-rw-r--r--drivers/power/supply/qcom/smb-lib.c110
-rw-r--r--drivers/power/supply/qcom/smb-lib.h5
-rw-r--r--drivers/powercap/powercap_sys.c1
-rw-r--r--drivers/ptp/ptp_clock.c18
-rw-r--r--drivers/pwm/pwm-tegra.c7
-rw-r--r--drivers/regulator/anatop-regulator.c5
-rw-r--r--drivers/regulator/cprh-kbss-regulator.c5
-rw-r--r--drivers/regulator/kryo-regulator.c5
-rw-r--r--drivers/rtc/Kconfig8
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/interface.c9
-rw-r--r--drivers/rtc/rtc-cmos.c17
-rw-r--r--drivers/rtc/rtc-ds1374.c10
-rw-r--r--drivers/rtc/rtc-goldfish.c237
-rw-r--r--drivers/rtc/rtc-opal.c22
-rw-r--r--drivers/rtc/rtc-snvs.c2
-rw-r--r--drivers/s390/block/dasd.c8
-rw-r--r--drivers/s390/block/dasd_3990_erp.c10
-rw-r--r--drivers/s390/block/dasd_eckd.c16
-rw-r--r--drivers/s390/char/Makefile2
-rw-r--r--drivers/s390/cio/chsc.c14
-rw-r--r--drivers/s390/cio/qdio_main.c42
-rw-r--r--drivers/s390/cio/qdio_setup.c12
-rw-r--r--drivers/s390/net/qeth_core.h5
-rw-r--r--drivers/s390/net/qeth_core_main.c54
-rw-r--r--drivers/s390/net/qeth_l2_main.c2
-rw-r--r--drivers/s390/net/qeth_l3_main.c2
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c23
-rw-r--r--drivers/s390/scsi/zfcp_ext.h5
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c14
-rw-r--r--drivers/scsi/advansys.c24
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h1
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c10
-rw-r--r--drivers/scsi/csiostor/csio_hw.c5
-rw-r--r--drivers/scsi/dpt_i2o.c3
-rw-r--r--drivers/scsi/fdomain.c2
-rw-r--r--drivers/scsi/g_NCR5380.c5
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h2
-rw-r--r--drivers/scsi/initio.c16
-rw-r--r--drivers/scsi/ipr.c16
-rw-r--r--drivers/scsi/libiscsi.c24
-rw-r--r--drivers/scsi/libsas/sas_expander.c4
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c33
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c3
-rw-r--r--drivers/scsi/mac_esp.c33
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c28
-rw-r--r--drivers/scsi/mvumi.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c2
-rw-r--r--drivers/scsi/scsi_devinfo.c9
-rw-r--r--drivers/scsi/scsi_dh.c5
-rw-r--r--drivers/scsi/sd.c82
-rw-r--r--drivers/scsi/ses.c1
-rw-r--r--drivers/scsi/sg.c42
-rw-r--r--drivers/scsi/sim710.c3
-rw-r--r--drivers/scsi/storvsc_drv.c3
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c2
-rw-r--r--drivers/scsi/ufs/ufshcd.c6
-rw-r--r--drivers/scsi/virtio_scsi.c25
-rw-r--r--drivers/slimbus/slimbus.c5
-rw-r--r--drivers/soc/qcom/Kconfig26
-rw-r--r--drivers/soc/qcom/Makefile2
-rw-r--r--drivers/soc/qcom/boot_marker.c60
-rw-r--r--drivers/soc/qcom/boot_stats.c18
-rw-r--r--drivers/soc/qcom/glink_smem_native_xprt.c35
-rw-r--r--drivers/soc/qcom/glink_ssr.c3
-rw-r--r--drivers/soc/qcom/hab/Kconfig4
-rw-r--r--drivers/soc/qcom/hab/Makefile22
-rw-r--r--drivers/soc/qcom/hab/ghs_comm.c141
-rw-r--r--drivers/soc/qcom/hab/hab.c651
-rw-r--r--drivers/soc/qcom/hab/hab.h182
-rw-r--r--drivers/soc/qcom/hab/hab_ghs.c217
-rw-r--r--drivers/soc/qcom/hab/hab_ghs.h30
-rw-r--r--drivers/soc/qcom/hab/hab_mem_linux.c509
-rw-r--r--drivers/soc/qcom/hab/hab_mimex.c149
-rw-r--r--drivers/soc/qcom/hab/hab_msg.c163
-rw-r--r--drivers/soc/qcom/hab/hab_open.c207
-rw-r--r--drivers/soc/qcom/hab/hab_parser.c29
-rw-r--r--drivers/soc/qcom/hab/hab_pchan.c29
-rw-r--r--drivers/soc/qcom/hab/hab_qvm.c62
-rw-r--r--drivers/soc/qcom/hab/hab_qvm.h3
-rw-r--r--drivers/soc/qcom/hab/hab_vchan.c182
-rw-r--r--drivers/soc/qcom/hab/khab.c49
-rw-r--r--drivers/soc/qcom/hab/khab_test.c263
-rw-r--r--drivers/soc/qcom/hab/khab_test.h18
-rw-r--r--drivers/soc/qcom/hab/qvm_comm.c13
-rw-r--r--drivers/soc/qcom/icnss.c5
-rw-r--r--drivers/soc/qcom/ipc_router_mhi_xprt.c6
-rw-r--r--drivers/soc/qcom/ipc_router_smd_xprt.c7
-rw-r--r--drivers/soc/qcom/msm_performance.c19
-rw-r--r--drivers/soc/qcom/pasr.c95
-rw-r--r--drivers/soc/qcom/qdsp6v2/Makefile2
-rw-r--r--drivers/soc/qcom/qdsp6v2/apr.c54
-rw-r--r--drivers/soc/qcom/qdsp6v2/apr_tal.c36
-rw-r--r--drivers/soc/qcom/qdsp6v2/apr_v2.c4
-rw-r--r--drivers/soc/qcom/qdsp6v2/apr_vm.c22
-rw-r--r--drivers/soc/qcom/qdsp6v2/audio-anc-dev-mgr.c1170
-rw-r--r--drivers/soc/qcom/qdsp6v2/audio_anc.c354
-rw-r--r--drivers/soc/qcom/qdsp6v2/lpass_resource_mgr.c552
-rw-r--r--drivers/soc/qcom/qdsp6v2/msm_audio_ion.c7
-rw-r--r--drivers/soc/qcom/qdsp6v2/msm_audio_ion_vm.c32
-rw-r--r--drivers/soc/qcom/qdsp6v2/sdsp-anc.c801
-rw-r--r--drivers/soc/qcom/qdsp6v2/voice_svc.c7
-rw-r--r--drivers/soc/qcom/rpm_stats.c2
-rw-r--r--drivers/soc/qcom/scm_qcpe.c413
-rw-r--r--drivers/soc/qcom/subsystem_notif_virt.c163
-rw-r--r--drivers/soc/qcom/subsystem_restart.c14
-rw-r--r--drivers/soc/qcom/tracer_pkt.c4
-rw-r--r--drivers/spi/spi-atmel.c2
-rw-r--r--drivers/spi/spi-davinci.c2
-rw-r--r--drivers/spi/spi-dw-mmio.c2
-rw-r--r--drivers/spi/spi-omap2-mcspi.c9
-rw-r--r--drivers/spi/spi-pxa2xx.h2
-rw-r--r--drivers/spi/spi-sun4i.c2
-rw-r--r--drivers/spi/spi-sun6i.c2
-rw-r--r--drivers/spi/spi_qsd.c10
-rw-r--r--drivers/spmi/spmi-pmic-arb.c21
-rw-r--r--drivers/spmi/virtspmi-pmic-arb.c561
-rw-r--r--drivers/ssb/main.c7
-rw-r--r--drivers/staging/android/Kconfig9
-rw-r--r--drivers/staging/android/Makefile1
-rw-r--r--drivers/staging/android/TODO9
-rw-r--r--drivers/staging/android/ashmem.c32
-rw-r--r--drivers/staging/android/ion/ion.c9
-rw-r--r--drivers/staging/android/ion/ion_cma_heap.c4
-rw-r--r--drivers/staging/android/ion/ion_page_pool.c5
-rw-r--r--drivers/staging/android/ion/msm/msm_ion.c4
-rw-r--r--drivers/staging/android/uapi/vsoc_shm.h303
-rw-r--r--drivers/staging/android/vsoc.c1165
-rw-r--r--drivers/staging/comedi/drivers.c3
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_common.c2
-rw-r--r--drivers/staging/goldfish/Kconfig7
-rw-r--r--drivers/staging/goldfish/Makefile1
-rw-r--r--drivers/staging/goldfish/goldfish_nand.c442
-rw-r--r--drivers/staging/goldfish/goldfish_nand_reg.h76
-rw-r--r--drivers/staging/iio/adc/ad7192.c27
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec.c2
-rw-r--r--drivers/staging/speakup/kobjects.c8
-rw-r--r--drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c7
-rw-r--r--drivers/staging/unisys/visorhba/visorhba_main.c8
-rw-r--r--drivers/staging/unisys/visorinput/Kconfig2
-rw-r--r--drivers/staging/wilc1000/host_interface.c2
-rw-r--r--drivers/staging/wilc1000/linux_mon.c2
-rw-r--r--drivers/staging/wilc1000/wilc_wlan_if.h1
-rw-r--r--drivers/staging/wlan-ng/prism2mgmt.c2
-rw-r--r--drivers/target/target_core_file.c23
-rw-r--r--drivers/target/target_core_user.c2
-rw-r--r--drivers/tee/optee/Makefile1
-rw-r--r--drivers/tee/optee/call.c220
-rw-r--r--drivers/tee/optee/core.c163
-rw-r--r--drivers/tee/optee/optee_msg.h38
-rw-r--r--drivers/tee/optee/optee_private.h72
-rw-r--r--drivers/tee/optee/optee_smc.h7
-rw-r--r--drivers/tee/optee/rpc.c81
-rw-r--r--drivers/tee/optee/shm_pool.c75
-rw-r--r--drivers/tee/optee/shm_pool.h23
-rw-r--r--drivers/tee/optee/supp.c375
-rw-r--r--drivers/tee/tee_core.c113
-rw-r--r--drivers/tee/tee_private.h60
-rw-r--r--drivers/tee/tee_shm.c230
-rw-r--r--drivers/tee/tee_shm_pool.c165
-rw-r--r--drivers/thermal/Kconfig4
-rw-r--r--drivers/thermal/imx_thermal.c6
-rw-r--r--drivers/thermal/msm-tsens.c10
-rw-r--r--drivers/thermal/msm_thermal.c11
-rw-r--r--drivers/thermal/power_allocator.c2
-rw-r--r--drivers/thermal/spear_thermal.c6
-rw-r--r--drivers/thunderbolt/nhi.c1
-rw-r--r--drivers/tty/Kconfig9
-rw-r--r--drivers/tty/goldfish.c239
-rw-r--r--drivers/tty/hvc/hvc_xen.c2
-rw-r--r--drivers/tty/n_gsm.c40
-rw-r--r--drivers/tty/n_tty.c6
-rw-r--r--drivers/tty/serial/8250/8250_omap.c4
-rw-r--r--drivers/tty/serial/8250/8250_pci.c11
-rw-r--r--drivers/tty/serial/8250/Kconfig2
-rw-r--r--drivers/tty/serial/atmel_serial.c1
-rw-r--r--drivers/tty/serial/msm_serial_hs.c17
-rw-r--r--drivers/tty/serial/sccnxp.c15
-rw-r--r--drivers/tty/serial/serial_mctrl_gpio.c5
-rw-r--r--drivers/tty/serial/sh-sci.c18
-rw-r--r--drivers/tty/tty_io.c16
-rw-r--r--drivers/tty/tty_ldisc.c16
-rw-r--r--drivers/tty/vt/vt.c14
-rw-r--r--drivers/uio/msm_sharedmem/msm_sharedmem.c32
-rw-r--r--drivers/usb/chipidea/core.c29
-rw-r--r--drivers/usb/core/config.c4
-rw-r--r--drivers/usb/core/driver.c5
-rw-r--r--drivers/usb/core/generic.c9
-rw-r--r--drivers/usb/core/hcd.c14
-rw-r--r--drivers/usb/core/hub.c10
-rw-r--r--drivers/usb/core/message.c4
-rw-r--r--drivers/usb/core/quirks.c9
-rw-r--r--drivers/usb/core/usb.c17
-rw-r--r--drivers/usb/dwc2/hcd.c6
-rw-r--r--drivers/usb/dwc3/core.c20
-rw-r--r--drivers/usb/dwc3/dwc3-keystone.c4
-rw-r--r--drivers/usb/dwc3/dwc3-msm.c102
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c2
-rw-r--r--drivers/usb/dwc3/gadget.c2
-rw-r--r--drivers/usb/gadget/function/f_accessory.c6
-rw-r--r--drivers/usb/gadget/function/f_audio_source.c29
-rw-r--r--drivers/usb/gadget/function/f_cdev.c3
-rw-r--r--drivers/usb/gadget/function/f_fs.c261
-rw-r--r--drivers/usb/gadget/function/f_gsi.c60
-rw-r--r--drivers/usb/gadget/function/f_hid.c24
-rw-r--r--drivers/usb/gadget/function/f_midi.c6
-rw-r--r--drivers/usb/gadget/function/f_qdss.c4
-rw-r--r--drivers/usb/gadget/function/f_sourcesink.c6
-rw-r--r--drivers/usb/gadget/function/g_zero.h1
-rw-r--r--drivers/usb/gadget/function/uvc_configfs.c16
-rw-r--r--drivers/usb/gadget/u_f.c6
-rw-r--r--drivers/usb/gadget/u_f.h26
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_core.c2
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_pci.c1
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c20
-rw-r--r--drivers/usb/host/Kconfig2
-rw-r--r--drivers/usb/host/ohci-q.c17
-rw-r--r--drivers/usb/host/xhci-plat.c46
-rw-r--r--drivers/usb/host/xhci.c20
-rw-r--r--drivers/usb/host/xhci.h3
-rw-r--r--drivers/usb/misc/ehset.c304
-rw-r--r--drivers/usb/misc/ldusb.c6
-rw-r--r--drivers/usb/mon/mon_text.c124
-rw-r--r--drivers/usb/musb/musb_gadget_ep0.c14
-rw-r--r--drivers/usb/musb/musb_host.c4
-rw-r--r--drivers/usb/musb/ux500_dma.c3
-rw-r--r--drivers/usb/phy/Kconfig1
-rw-r--r--drivers/usb/phy/phy-msm-ssusb-qmp.c12
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c5
-rw-r--r--drivers/usb/serial/Kconfig1
-rw-r--r--drivers/usb/serial/cp210x.c2
-rw-r--r--drivers/usb/serial/ftdi_sio.c5
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h9
-rw-r--r--drivers/usb/serial/option.c448
-rw-r--r--drivers/usb/serial/usb-serial-simple.c7
-rw-r--r--drivers/usb/serial/visor.c69
-rw-r--r--drivers/usb/storage/ene_ub6250.c11
-rw-r--r--drivers/usb/storage/uas.c2
-rw-r--r--drivers/usb/storage/unusual_devs.h7
-rw-r--r--drivers/usb/usbip/stub.h2
-rw-r--r--drivers/usb/usbip/stub_dev.c46
-rw-r--r--drivers/usb/usbip/stub_main.c100
-rw-r--r--drivers/usb/usbip/usbip_common.h2
-rw-r--r--drivers/usb/usbip/vhci_hcd.c4
-rw-r--r--drivers/usb/usbip/vhci_sysfs.c7
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c107
-rw-r--r--drivers/vhost/net.c1
-rw-r--r--drivers/vhost/vhost.c3
-rw-r--r--drivers/video/console/dummycon.c1
-rw-r--r--drivers/video/console/vgacon.c34
-rw-r--r--drivers/video/fbdev/Kconfig1
-rw-r--r--drivers/video/fbdev/amba-clcd.c4
-rw-r--r--drivers/video/fbdev/atmel_lcdfb.c8
-rw-r--r--drivers/video/fbdev/auo_k190x.c11
-rw-r--r--drivers/video/fbdev/exynos/s6e8ax0.c13
-rw-r--r--drivers/video/fbdev/goldfishfb.c96
-rw-r--r--drivers/video/fbdev/intelfb/intelfbdrv.c2
-rw-r--r--drivers/video/fbdev/mmp/core.c5
-rw-r--r--drivers/video/fbdev/msm/mdp3_ctrl.c15
-rw-r--r--drivers/video/fbdev/msm/mdss.h9
-rw-r--r--drivers/video/fbdev/msm/mdss_debug_xlog.c7
-rw-r--r--drivers/video/fbdev/msm/mdss_dp.c1
-rw-r--r--drivers/video/fbdev/msm/mdss_dp_aux.c2
-rw-r--r--drivers/video/fbdev/msm/mdss_dsi.c4
-rw-r--r--drivers/video/fbdev/msm/mdss_fb.c17
-rw-r--r--drivers/video/fbdev/msm/mdss_fb.h2
-rw-r--r--drivers/video/fbdev/msm/mdss_hdcp.h5
-rw-r--r--drivers/video/fbdev/msm/mdss_hdcp_1x.c178
-rw-r--r--drivers/video/fbdev/msm/mdss_hdmi_edid.c5
-rw-r--r--drivers/video/fbdev/msm/mdss_hdmi_edid.h1
-rw-r--r--drivers/video/fbdev/msm/mdss_hdmi_hdcp2p2.c38
-rw-r--r--drivers/video/fbdev/msm/mdss_hdmi_tx.c140
-rw-r--r--drivers/video/fbdev/msm/mdss_hdmi_tx.h3
-rw-r--r--drivers/video/fbdev/msm/mdss_hdmi_util.c6
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp.c4
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_ctl.c7
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c8
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_overlay.c51
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_pp.c23
-rw-r--r--drivers/video/fbdev/msm/msm_dba/adv7533.c188
-rw-r--r--drivers/video/fbdev/sis/init301.c10
-rw-r--r--drivers/video/fbdev/sm501fb.c1
-rw-r--r--drivers/video/fbdev/sm712fb.c16
-rw-r--r--drivers/video/fbdev/udlfb.c14
-rw-r--r--drivers/video/fbdev/vfb.c17
-rw-r--r--drivers/video/fbdev/via/viafbdev.c8
-rw-r--r--drivers/video/hdmi.c51
-rw-r--r--drivers/video/msm/ba/msm_ba.c19
-rw-r--r--drivers/virtio/virtio_balloon.c2
-rw-r--r--drivers/watchdog/f71808e_wdt.c2
-rw-r--r--drivers/watchdog/hpwdt.c12
-rw-r--r--drivers/watchdog/imx2_wdt.c20
-rw-r--r--drivers/xen/Kconfig2
-rw-r--r--drivers/xen/gntdev.c8
915 files changed, 34647 insertions, 6711 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index d7c1d7422e86..d563f5c13544 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -97,6 +97,7 @@ obj-$(CONFIG_TC) += tc/
obj-$(CONFIG_UWB) += uwb/
obj-$(CONFIG_USB_PHY) += usb/
obj-$(CONFIG_USB) += usb/
+obj-$(CONFIG_USB_SUPPORT) += usb/
obj-$(CONFIG_PCI) += usb/
obj-$(CONFIG_USB_GADGET) += usb/
obj-$(CONFIG_OF) += usb/
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index 10ce48e16ebf..d830705f8a18 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -180,6 +180,12 @@ acpi_status acpi_enable_event(u32 event, u32 flags)
ACPI_FUNCTION_TRACE(acpi_enable_event);
+ /* If Hardware Reduced flag is set, there are no fixed events */
+
+ if (acpi_gbl_reduced_hardware) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
/* Decode the Fixed Event */
if (event > ACPI_EVENT_MAX) {
@@ -237,6 +243,12 @@ acpi_status acpi_disable_event(u32 event, u32 flags)
ACPI_FUNCTION_TRACE(acpi_disable_event);
+ /* If Hardware Reduced flag is set, there are no fixed events */
+
+ if (acpi_gbl_reduced_hardware) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
/* Decode the Fixed Event */
if (event > ACPI_EVENT_MAX) {
@@ -290,6 +302,12 @@ acpi_status acpi_clear_event(u32 event)
ACPI_FUNCTION_TRACE(acpi_clear_event);
+ /* If Hardware Reduced flag is set, there are no fixed events */
+
+ if (acpi_gbl_reduced_hardware) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
/* Decode the Fixed Event */
if (event > ACPI_EVENT_MAX) {
diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c
index e54bc2aa7a88..a05b3b79b987 100644
--- a/drivers/acpi/acpica/psobject.c
+++ b/drivers/acpi/acpica/psobject.c
@@ -121,6 +121,9 @@ static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state)
(u32)(aml_offset +
sizeof(struct acpi_table_header)));
+ ACPI_ERROR((AE_INFO,
+ "Aborting disassembly, AML byte code is corrupt"));
+
/* Dump the context surrounding the invalid opcode */
acpi_ut_dump_buffer(((u8 *)walk_state->parser_state.
@@ -129,6 +132,14 @@ static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state)
sizeof(struct acpi_table_header) -
16));
acpi_os_printf(" */\n");
+
+ /*
+ * Just abort the disassembly, cannot continue because the
+ * parser is essentially lost. The disassembler can then
+ * randomly fail because an ill-constructed parse tree
+ * can result.
+ */
+ return_ACPI_STATUS(AE_AML_BAD_OPCODE);
#endif
}
@@ -293,6 +304,9 @@ acpi_ps_create_op(struct acpi_walk_state *walk_state,
if (status == AE_CTRL_PARSE_CONTINUE) {
return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE);
}
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
/* Create Op structure and append to parent's argument list */
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index d176e0ece470..2946e2846573 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -103,25 +103,27 @@ int acpi_map_pxm_to_node(int pxm)
*/
int acpi_map_pxm_to_online_node(int pxm)
{
- int node, n, dist, min_dist;
+ int node, min_node;
node = acpi_map_pxm_to_node(pxm);
if (node == NUMA_NO_NODE)
node = 0;
+ min_node = node;
if (!node_online(node)) {
- min_dist = INT_MAX;
+ int min_dist = INT_MAX, dist, n;
+
for_each_online_node(n) {
dist = node_distance(node, n);
if (dist < min_dist) {
min_dist = dist;
- node = n;
+ min_node = n;
}
}
}
- return node;
+ return min_node;
}
EXPORT_SYMBOL(acpi_map_pxm_to_online_node);
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index 8a10a7ae6a8a..c8e169e46673 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -131,9 +131,6 @@ static void do_prt_fixups(struct acpi_prt_entry *entry,
quirk = &prt_quirks[i];
/* All current quirks involve link devices, not GSIs */
- if (!prt->source)
- continue;
-
if (dmi_check_system(quirk->system) &&
entry->id.segment == quirk->segment &&
entry->id.bus == quirk->bus &&
diff --git a/drivers/acpi/pmic/intel_pmic_xpower.c b/drivers/acpi/pmic/intel_pmic_xpower.c
index 6a082d4de12c..24a793957bc0 100644
--- a/drivers/acpi/pmic/intel_pmic_xpower.c
+++ b/drivers/acpi/pmic/intel_pmic_xpower.c
@@ -28,97 +28,97 @@ static struct pmic_table power_table[] = {
.address = 0x00,
.reg = 0x13,
.bit = 0x05,
- },
+ }, /* ALD1 */
{
.address = 0x04,
.reg = 0x13,
.bit = 0x06,
- },
+ }, /* ALD2 */
{
.address = 0x08,
.reg = 0x13,
.bit = 0x07,
- },
+ }, /* ALD3 */
{
.address = 0x0c,
.reg = 0x12,
.bit = 0x03,
- },
+ }, /* DLD1 */
{
.address = 0x10,
.reg = 0x12,
.bit = 0x04,
- },
+ }, /* DLD2 */
{
.address = 0x14,
.reg = 0x12,
.bit = 0x05,
- },
+ }, /* DLD3 */
{
.address = 0x18,
.reg = 0x12,
.bit = 0x06,
- },
+ }, /* DLD4 */
{
.address = 0x1c,
.reg = 0x12,
.bit = 0x00,
- },
+ }, /* ELD1 */
{
.address = 0x20,
.reg = 0x12,
.bit = 0x01,
- },
+ }, /* ELD2 */
{
.address = 0x24,
.reg = 0x12,
.bit = 0x02,
- },
+ }, /* ELD3 */
{
.address = 0x28,
.reg = 0x13,
.bit = 0x02,
- },
+ }, /* FLD1 */
{
.address = 0x2c,
.reg = 0x13,
.bit = 0x03,
- },
+ }, /* FLD2 */
{
.address = 0x30,
.reg = 0x13,
.bit = 0x04,
- },
+ }, /* FLD3 */
{
- .address = 0x38,
+ .address = 0x34,
.reg = 0x10,
.bit = 0x03,
- },
+ }, /* BUC1 */
{
- .address = 0x3c,
+ .address = 0x38,
.reg = 0x10,
.bit = 0x06,
- },
+ }, /* BUC2 */
{
- .address = 0x40,
+ .address = 0x3c,
.reg = 0x10,
.bit = 0x05,
- },
+ }, /* BUC3 */
{
- .address = 0x44,
+ .address = 0x40,
.reg = 0x10,
.bit = 0x04,
- },
+ }, /* BUC4 */
{
- .address = 0x48,
+ .address = 0x44,
.reg = 0x10,
.bit = 0x01,
- },
+ }, /* BUC5 */
{
- .address = 0x4c,
+ .address = 0x48,
.reg = 0x10,
.bit = 0x00
- },
+ }, /* BUC6 */
};
/* TMP0 - TMP5 are the same, all from GPADC */
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index 11154a330f07..c9bf74982688 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -259,6 +259,9 @@ static int __acpi_processor_start(struct acpi_device *device)
if (ACPI_SUCCESS(status))
return 0;
+ result = -ENODEV;
+ acpi_pss_perf_exit(pr, device);
+
err_power_exit:
acpi_processor_power_exit(pr);
return result;
@@ -267,11 +270,16 @@ err_power_exit:
static int acpi_processor_start(struct device *dev)
{
struct acpi_device *device = ACPI_COMPANION(dev);
+ int ret;
if (!device)
return -ENODEV;
- return __acpi_processor_start(device);
+ /* Protect against concurrent CPU hotplug operations */
+ get_online_cpus();
+ ret = __acpi_processor_start(device);
+ put_online_cpus();
+ return ret;
}
static int acpi_processor_stop(struct device *dev)
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index c72e64893d03..93d72413d844 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -62,8 +62,8 @@ struct acpi_processor_throttling_arg {
#define THROTTLING_POSTCHANGE (2)
static int acpi_processor_get_throttling(struct acpi_processor *pr);
-int acpi_processor_set_throttling(struct acpi_processor *pr,
- int state, bool force);
+static int __acpi_processor_set_throttling(struct acpi_processor *pr,
+ int state, bool force, bool direct);
static int acpi_processor_update_tsd_coord(void)
{
@@ -891,7 +891,8 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Invalid throttling state, reset\n"));
state = 0;
- ret = acpi_processor_set_throttling(pr, state, true);
+ ret = __acpi_processor_set_throttling(pr, state, true,
+ true);
if (ret)
return ret;
}
@@ -901,36 +902,31 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
return 0;
}
-static int acpi_processor_get_throttling(struct acpi_processor *pr)
+static long __acpi_processor_get_throttling(void *data)
{
- cpumask_var_t saved_mask;
- int ret;
+ struct acpi_processor *pr = data;
+
+ return pr->throttling.acpi_processor_get_throttling(pr);
+}
+static int acpi_processor_get_throttling(struct acpi_processor *pr)
+{
if (!pr)
return -EINVAL;
if (!pr->flags.throttling)
return -ENODEV;
- if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL))
- return -ENOMEM;
-
/*
- * Migrate task to the cpu pointed by pr.
+ * This is either called from the CPU hotplug callback of
+ * processor_driver or via the ACPI probe function. In the latter
+ * case the CPU is not guaranteed to be online. Both call sites are
+ * protected against CPU hotplug.
*/
- cpumask_copy(saved_mask, &current->cpus_allowed);
- /* FIXME: use work_on_cpu() */
- if (set_cpus_allowed_ptr(current, cpumask_of(pr->id))) {
- /* Can't migrate to the target pr->id CPU. Exit */
- free_cpumask_var(saved_mask);
+ if (!cpu_online(pr->id))
return -ENODEV;
- }
- ret = pr->throttling.acpi_processor_get_throttling(pr);
- /* restore the previous state */
- set_cpus_allowed_ptr(current, saved_mask);
- free_cpumask_var(saved_mask);
- return ret;
+ return work_on_cpu(pr->id, __acpi_processor_get_throttling, pr);
}
static int acpi_processor_get_fadt_info(struct acpi_processor *pr)
@@ -1080,8 +1076,15 @@ static long acpi_processor_throttling_fn(void *data)
arg->target_state, arg->force);
}
-int acpi_processor_set_throttling(struct acpi_processor *pr,
- int state, bool force)
+static int call_on_cpu(int cpu, long (*fn)(void *), void *arg, bool direct)
+{
+ if (direct)
+ return fn(arg);
+ return work_on_cpu(cpu, fn, arg);
+}
+
+static int __acpi_processor_set_throttling(struct acpi_processor *pr,
+ int state, bool force, bool direct)
{
int ret = 0;
unsigned int i;
@@ -1130,7 +1133,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
arg.pr = pr;
arg.target_state = state;
arg.force = force;
- ret = work_on_cpu(pr->id, acpi_processor_throttling_fn, &arg);
+ ret = call_on_cpu(pr->id, acpi_processor_throttling_fn, &arg,
+ direct);
} else {
/*
* When the T-state coordination is SW_ALL or HW_ALL,
@@ -1163,8 +1167,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
arg.pr = match_pr;
arg.target_state = state;
arg.force = force;
- ret = work_on_cpu(pr->id, acpi_processor_throttling_fn,
- &arg);
+ ret = call_on_cpu(pr->id, acpi_processor_throttling_fn,
+ &arg, direct);
}
}
/*
@@ -1182,6 +1186,12 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
return ret;
}
+int acpi_processor_set_throttling(struct acpi_processor *pr, int state,
+ bool force)
+{
+ return __acpi_processor_set_throttling(pr, state, force, false);
+}
+
int acpi_processor_get_throttling_info(struct acpi_processor *pr)
{
int result = 0;
diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
index 2fa8304171e0..7a3431018e0a 100644
--- a/drivers/acpi/sbshc.c
+++ b/drivers/acpi/sbshc.c
@@ -275,8 +275,8 @@ static int acpi_smbus_hc_add(struct acpi_device *device)
device->driver_data = hc;
acpi_ec_add_query_handler(hc->ec, hc->query_bit, NULL, smbus_alarm, hc);
- printk(KERN_INFO PREFIX "SBS HC: EC = 0x%p, offset = 0x%0x, query_bit = 0x%0x\n",
- hc->ec, hc->offset, hc->query_bit);
+ dev_info(&device->dev, "SBS HC: offset = 0x%0x, query_bit = 0x%0x\n",
+ hc->offset, hc->query_bit);
return 0;
}
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index b48ecbfc4498..8c5503c0bad7 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -206,6 +206,15 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
},
},
{
+ /* https://bugzilla.redhat.com/show_bug.cgi?id=1557060 */
+ .callback = video_detect_force_video,
+ .ident = "SAMSUNG 670Z5E",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "670Z5E"),
+ },
+ },
+ {
/* https://bugzilla.redhat.com/show_bug.cgi?id=1094948 */
.callback = video_detect_force_video,
.ident = "SAMSUNG 730U3E/740U3E",
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index f0099360039e..1accc01fb0ca 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -68,11 +68,12 @@ static ssize_t driver_override_show(struct device *_dev,
struct device_attribute *attr, char *buf)
{
struct amba_device *dev = to_amba_device(_dev);
+ ssize_t len;
- if (!dev->driver_override)
- return 0;
-
- return sprintf(buf, "%s\n", dev->driver_override);
+ device_lock(_dev);
+ len = sprintf(buf, "%s\n", dev->driver_override);
+ device_unlock(_dev);
+ return len;
}
static ssize_t driver_override_store(struct device *_dev,
@@ -80,9 +81,10 @@ static ssize_t driver_override_store(struct device *_dev,
const char *buf, size_t count)
{
struct amba_device *dev = to_amba_device(_dev);
- char *driver_override, *old = dev->driver_override, *cp;
+ char *driver_override, *old, *cp;
- if (count > PATH_MAX)
+ /* We need to keep extra room for a newline */
+ if (count >= (PAGE_SIZE - 1))
return -EINVAL;
driver_override = kstrndup(buf, count, GFP_KERNEL);
@@ -93,12 +95,15 @@ static ssize_t driver_override_store(struct device *_dev,
if (cp)
*cp = '\0';
+ device_lock(_dev);
+ old = dev->driver_override;
if (strlen(driver_override)) {
dev->driver_override = driver_override;
} else {
kfree(driver_override);
dev->driver_override = NULL;
}
+ device_unlock(_dev);
kfree(old);
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 063e0df75121..2299c661b324 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -2148,8 +2148,14 @@ static void binder_send_failed_reply(struct binder_transaction *t,
&target_thread->reply_error.work);
wake_up_interruptible(&target_thread->wait);
} else {
- WARN(1, "Unexpected reply error: %u\n",
- target_thread->reply_error.cmd);
+ /*
+ * Cannot get here for normal operation, but
+ * we can if multiple synchronous transactions
+ * are sent without blocking for responses.
+ * Just ignore the 2nd error in this case.
+ */
+ pr_warn("Unexpected reply error: %u\n",
+ target_thread->reply_error.cmd);
}
binder_inner_proc_unlock(target_thread->proc);
binder_thread_dec_tmpref(target_thread);
@@ -2996,6 +3002,14 @@ static void binder_transaction(struct binder_proc *proc,
else
return_error = BR_DEAD_REPLY;
mutex_unlock(&context->context_mgr_node_lock);
+ if (target_node && target_proc == proc) {
+ binder_user_error("%d:%d got transaction to context manager from process owning it\n",
+ proc->pid, thread->pid);
+ return_error = BR_FAILED_REPLY;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
+ goto err_invalid_target_handle;
+ }
}
if (!target_node) {
/*
@@ -4536,8 +4550,29 @@ static int binder_thread_release(struct binder_proc *proc,
if (t)
spin_lock(&t->lock);
}
+
+ /*
+ * If this thread used poll, make sure we remove the waitqueue
+ * from any epoll data structures holding it with POLLFREE.
+ * waitqueue_active() is safe to use here because we're holding
+ * the inner lock.
+ */
+ if ((thread->looper & BINDER_LOOPER_STATE_POLL) &&
+ waitqueue_active(&thread->wait)) {
+ wake_up_poll(&thread->wait, POLLHUP | POLLFREE);
+ }
+
binder_inner_proc_unlock(thread->proc);
+ /*
+ * This is needed to avoid races between wake_up_poll() above and
+ * and ep_remove_waitqueue() called for other reasons (eg the epoll file
+ * descriptor being closed); ep_remove_waitqueue() holds an RCU read
+ * lock, so we can be sure it's done after calling synchronize_rcu().
+ */
+ if (thread->looper & BINDER_LOOPER_STATE_POLL)
+ synchronize_rcu();
+
if (send_reply)
binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
binder_release_work(proc, &thread->todo);
@@ -4553,6 +4588,8 @@ static unsigned int binder_poll(struct file *filp,
bool wait_for_proc_work;
thread = binder_get_thread(proc);
+ if (!thread)
+ return POLLERR;
binder_inner_proc_lock(thread->proc);
thread->looper |= BINDER_LOOPER_STATE_POLL;
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 60a15831c009..5a6a01135470 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -260,9 +260,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x3b23), board_ahci }, /* PCH AHCI */
{ PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */
{ PCI_VDEVICE(INTEL, 0x3b25), board_ahci }, /* PCH RAID */
- { PCI_VDEVICE(INTEL, 0x3b29), board_ahci }, /* PCH AHCI */
+ { PCI_VDEVICE(INTEL, 0x3b29), board_ahci }, /* PCH M AHCI */
{ PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
- { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */
+ { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH M RAID */
{ PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
{ PCI_VDEVICE(INTEL, 0x19b0), board_ahci }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19b1), board_ahci }, /* DNV AHCI */
@@ -285,9 +285,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x19cE), board_ahci }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19cF), board_ahci }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */
- { PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT AHCI */
+ { PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT M AHCI */
{ PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */
- { PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT RAID */
+ { PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT M RAID */
{ PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */
{ PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */
{ PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */
@@ -296,20 +296,20 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* PBG RAID */
{ PCI_VDEVICE(INTEL, 0x2323), board_ahci }, /* DH89xxCC AHCI */
{ PCI_VDEVICE(INTEL, 0x1e02), board_ahci }, /* Panther Point AHCI */
- { PCI_VDEVICE(INTEL, 0x1e03), board_ahci }, /* Panther Point AHCI */
+ { PCI_VDEVICE(INTEL, 0x1e03), board_ahci }, /* Panther Point M AHCI */
{ PCI_VDEVICE(INTEL, 0x1e04), board_ahci }, /* Panther Point RAID */
{ PCI_VDEVICE(INTEL, 0x1e05), board_ahci }, /* Panther Point RAID */
{ PCI_VDEVICE(INTEL, 0x1e06), board_ahci }, /* Panther Point RAID */
- { PCI_VDEVICE(INTEL, 0x1e07), board_ahci }, /* Panther Point RAID */
+ { PCI_VDEVICE(INTEL, 0x1e07), board_ahci }, /* Panther Point M RAID */
{ PCI_VDEVICE(INTEL, 0x1e0e), board_ahci }, /* Panther Point RAID */
{ PCI_VDEVICE(INTEL, 0x8c02), board_ahci }, /* Lynx Point AHCI */
- { PCI_VDEVICE(INTEL, 0x8c03), board_ahci }, /* Lynx Point AHCI */
+ { PCI_VDEVICE(INTEL, 0x8c03), board_ahci }, /* Lynx Point M AHCI */
{ PCI_VDEVICE(INTEL, 0x8c04), board_ahci }, /* Lynx Point RAID */
- { PCI_VDEVICE(INTEL, 0x8c05), board_ahci }, /* Lynx Point RAID */
+ { PCI_VDEVICE(INTEL, 0x8c05), board_ahci }, /* Lynx Point M RAID */
{ PCI_VDEVICE(INTEL, 0x8c06), board_ahci }, /* Lynx Point RAID */
- { PCI_VDEVICE(INTEL, 0x8c07), board_ahci }, /* Lynx Point RAID */
+ { PCI_VDEVICE(INTEL, 0x8c07), board_ahci }, /* Lynx Point M RAID */
{ PCI_VDEVICE(INTEL, 0x8c0e), board_ahci }, /* Lynx Point RAID */
- { PCI_VDEVICE(INTEL, 0x8c0f), board_ahci }, /* Lynx Point RAID */
+ { PCI_VDEVICE(INTEL, 0x8c0f), board_ahci }, /* Lynx Point M RAID */
{ PCI_VDEVICE(INTEL, 0x9c02), board_ahci }, /* Lynx Point-LP AHCI */
{ PCI_VDEVICE(INTEL, 0x9c03), board_ahci }, /* Lynx Point-LP AHCI */
{ PCI_VDEVICE(INTEL, 0x9c04), board_ahci }, /* Lynx Point-LP RAID */
@@ -350,21 +350,21 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x9c87), board_ahci }, /* Wildcat Point-LP RAID */
{ PCI_VDEVICE(INTEL, 0x9c8f), board_ahci }, /* Wildcat Point-LP RAID */
{ PCI_VDEVICE(INTEL, 0x8c82), board_ahci }, /* 9 Series AHCI */
- { PCI_VDEVICE(INTEL, 0x8c83), board_ahci }, /* 9 Series AHCI */
+ { PCI_VDEVICE(INTEL, 0x8c83), board_ahci }, /* 9 Series M AHCI */
{ PCI_VDEVICE(INTEL, 0x8c84), board_ahci }, /* 9 Series RAID */
- { PCI_VDEVICE(INTEL, 0x8c85), board_ahci }, /* 9 Series RAID */
+ { PCI_VDEVICE(INTEL, 0x8c85), board_ahci }, /* 9 Series M RAID */
{ PCI_VDEVICE(INTEL, 0x8c86), board_ahci }, /* 9 Series RAID */
- { PCI_VDEVICE(INTEL, 0x8c87), board_ahci }, /* 9 Series RAID */
+ { PCI_VDEVICE(INTEL, 0x8c87), board_ahci }, /* 9 Series M RAID */
{ PCI_VDEVICE(INTEL, 0x8c8e), board_ahci }, /* 9 Series RAID */
- { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci }, /* 9 Series RAID */
+ { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci }, /* 9 Series M RAID */
{ PCI_VDEVICE(INTEL, 0x9d03), board_ahci }, /* Sunrise Point-LP AHCI */
{ PCI_VDEVICE(INTEL, 0x9d05), board_ahci }, /* Sunrise Point-LP RAID */
{ PCI_VDEVICE(INTEL, 0x9d07), board_ahci }, /* Sunrise Point-LP RAID */
{ PCI_VDEVICE(INTEL, 0xa102), board_ahci }, /* Sunrise Point-H AHCI */
- { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */
+ { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H M AHCI */
{ PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */
{ PCI_VDEVICE(INTEL, 0xa106), board_ahci }, /* Sunrise Point-H RAID */
- { PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */
+ { PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H M RAID */
{ PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */
{ PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Lewisburg AHCI*/
@@ -382,6 +382,11 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0xa20e), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa252), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa256), board_ahci }, /* Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0xa356), board_ahci }, /* Cannon Lake PCH-H RAID */
+ { PCI_VDEVICE(INTEL, 0x0f22), board_ahci }, /* Bay Trail AHCI */
+ { PCI_VDEVICE(INTEL, 0x0f23), board_ahci }, /* Bay Trail AHCI */
+ { PCI_VDEVICE(INTEL, 0x22a3), board_ahci }, /* Cherry Trail AHCI */
+ { PCI_VDEVICE(INTEL, 0x5ae3), board_ahci }, /* Apollo Lake AHCI */
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -533,7 +538,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
.driver_data = board_ahci_yes_fbs },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9230),
.driver_data = board_ahci_yes_fbs },
- { PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0642),
+ { PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0642), /* highpoint rocketraid 642L */
+ .driver_data = board_ahci_yes_fbs },
+ { PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0645), /* highpoint rocketraid 644L */
.driver_data = board_ahci_yes_fbs },
/* Promise */
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index aaa761b9081c..cd2eab6aa92e 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -514,8 +514,9 @@ int ahci_platform_init_host(struct platform_device *pdev,
irq = platform_get_irq(pdev, 0);
if (irq <= 0) {
- dev_err(dev, "no irq\n");
- return -EINVAL;
+ if (irq != -EPROBE_DEFER)
+ dev_err(dev, "no irq\n");
+ return irq;
}
hpriv->irq = irq;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 69ec1c5d7152..60d6db82ce5a 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4224,6 +4224,28 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
{ "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
+ /* Crucial BX100 SSD 500GB has broken LPM support */
+ { "CT500BX100SSD1", NULL, ATA_HORKAGE_NOLPM },
+
+ /* 512GB MX100 with MU01 firmware has both queued TRIM and LPM issues */
+ { "Crucial_CT512MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
+ ATA_HORKAGE_ZERO_AFTER_TRIM |
+ ATA_HORKAGE_NOLPM, },
+ /* 512GB MX100 with newer firmware has only LPM issues */
+ { "Crucial_CT512MX100*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM |
+ ATA_HORKAGE_NOLPM, },
+
+ /* 480GB+ M500 SSDs have both queued TRIM and LPM issues */
+ { "Crucial_CT480M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
+ ATA_HORKAGE_ZERO_AFTER_TRIM |
+ ATA_HORKAGE_NOLPM, },
+ { "Crucial_CT960M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
+ ATA_HORKAGE_ZERO_AFTER_TRIM |
+ ATA_HORKAGE_NOLPM, },
+
+ /* Sandisk devices which are known to not handle LPM well */
+ { "SanDisk SD7UB3Q*G1001", NULL, ATA_HORKAGE_NOLPM, },
+
/* devices that don't properly handle queued TRIM commands */
{ "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM, },
@@ -4235,7 +4257,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM, },
- { "Samsung SSD 8*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
+ { "Samsung SSD 840*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
+ ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ { "Samsung SSD 850*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "FCCT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM, },
@@ -5077,8 +5101,7 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
* We guarantee to LLDs that they will have at least one
* non-zero sg if the command is a data command.
*/
- if (WARN_ON_ONCE(ata_is_data(prot) &&
- (!qc->sg || !qc->n_elem || !qc->nbytes)))
+ if (ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes))
goto sys_err;
if (ata_is_dma(prot) || (ata_is_pio(prot) &&
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 5b2aee83d776..4a267347a6d9 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -3472,7 +3472,9 @@ static inline int __ata_scsi_queuecmd(struct scsi_cmnd *scmd,
if (likely((scsi_op != ATA_16) || !atapi_passthru16)) {
/* relay SCSI command to ATAPI device */
int len = COMMAND_SIZE(scsi_op);
- if (unlikely(len > scmd->cmd_len || len > dev->cdb_len))
+ if (unlikely(len > scmd->cmd_len ||
+ len > dev->cdb_len ||
+ scmd->cmd_len > ATAPI_CDB_LEN))
goto bad_cdb_len;
xlat_func = atapi_xlat;
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
index cecfb943762f..6eab52b92e01 100644
--- a/drivers/atm/zatm.c
+++ b/drivers/atm/zatm.c
@@ -23,6 +23,7 @@
#include <linux/bitops.h>
#include <linux/wait.h>
#include <linux/slab.h>
+#include <linux/nospec.h>
#include <asm/byteorder.h>
#include <asm/string.h>
#include <asm/io.h>
@@ -1456,6 +1457,8 @@ static int zatm_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
return -EFAULT;
if (pool < 0 || pool > ZATM_LAST_POOL)
return -EINVAL;
+ pool = array_index_nospec(pool,
+ ZATM_LAST_POOL + 1);
spin_lock_irqsave(&zatm_dev->lock, flags);
info = zatm_dev->pool_info[pool];
if (cmd == ZATM_GETPOOLZ) {
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 576b5facdf43..b3a62e94d1f3 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -1582,7 +1582,7 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
return -EINVAL;
if (val_len % map->format.val_bytes)
return -EINVAL;
- if (map->max_raw_write && map->max_raw_write > val_len)
+ if (map->max_raw_write && map->max_raw_write < val_len)
return -E2BIG;
map->lock(map->lock_arg);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 1c36de9719e5..e8165ec55e6f 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -263,7 +263,7 @@ static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos)
struct iov_iter i;
ssize_t bw;
- iov_iter_bvec(&i, ITER_BVEC, bvec, 1, bvec->bv_len);
+ iov_iter_bvec(&i, ITER_BVEC | WRITE, bvec, 1, bvec->bv_len);
file_start_write(file);
bw = vfs_iter_write(file, &i, ppos);
@@ -623,6 +623,9 @@ static int loop_switch(struct loop_device *lo, struct file *file)
*/
static int loop_flush(struct loop_device *lo)
{
+ /* loop not yet configured, no running thread, nothing to flush */
+ if (lo->lo_state != Lo_bound)
+ return 0;
return loop_switch(lo, NULL);
}
@@ -1118,11 +1121,15 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
if (info->lo_encrypt_type) {
unsigned int type = info->lo_encrypt_type;
- if (type >= MAX_LO_CRYPT)
- return -EINVAL;
+ if (type >= MAX_LO_CRYPT) {
+ err = -EINVAL;
+ goto exit;
+ }
xfer = xfer_funcs[type];
- if (xfer == NULL)
- return -EINVAL;
+ if (xfer == NULL) {
+ err = -EINVAL;
+ goto exit;
+ }
} else
xfer = NULL;
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index f018318d4466..440b52c458fe 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2779,7 +2779,7 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
pd->pkt_dev = MKDEV(pktdev_major, idx);
ret = pkt_new_dev(pd, dev);
if (ret)
- goto out_new_dev;
+ goto out_mem2;
/* inherit events of the host device */
disk->events = pd->bdev->bd_disk->events;
@@ -2797,8 +2797,6 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
mutex_unlock(&ctl_mutex);
return 0;
-out_new_dev:
- blk_cleanup_queue(disk->queue);
out_mem2:
put_disk(disk);
out_mem:
diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c
index 7b624423a7e8..89ccb604045c 100644
--- a/drivers/bluetooth/btsdio.c
+++ b/drivers/bluetooth/btsdio.c
@@ -31,6 +31,7 @@
#include <linux/errno.h>
#include <linux/skbuff.h>
+#include <linux/mmc/host.h>
#include <linux/mmc/sdio_ids.h>
#include <linux/mmc/sdio_func.h>
@@ -291,6 +292,14 @@ static int btsdio_probe(struct sdio_func *func,
tuple = tuple->next;
}
+ /* BCM43341 devices soldered onto the PCB (non-removable) use an
+ * uart connection for bluetooth, ignore the BT SDIO interface.
+ */
+ if (func->vendor == SDIO_VENDOR_ID_BROADCOM &&
+ func->device == SDIO_DEVICE_ID_BROADCOM_43341 &&
+ !mmc_card_is_removable(func->card->host))
+ return -ENODEV;
+
data = devm_kzalloc(&func->dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 1ccad79ce77c..54cef3dc0beb 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -23,6 +23,7 @@
#include <linux/module.h>
#include <linux/usb.h>
+#include <linux/usb/quirks.h>
#include <linux/firmware.h>
#include <asm/unaligned.h>
@@ -360,8 +361,8 @@ static const struct usb_device_id blacklist_table[] = {
#define BTUSB_FIRMWARE_LOADED 7
#define BTUSB_FIRMWARE_FAILED 8
#define BTUSB_BOOTING 9
-#define BTUSB_RESET_RESUME 10
-#define BTUSB_DIAG_RUNNING 11
+#define BTUSB_DIAG_RUNNING 10
+#define BTUSB_OOB_WAKE_ENABLED 11
struct btusb_data {
struct hci_dev *hdev;
@@ -2972,9 +2973,9 @@ static int btusb_probe(struct usb_interface *intf,
/* QCA Rome devices lose their updated firmware over suspend,
* but the USB hub doesn't notice any status change.
- * Explicitly request a device reset on resume.
+ * explicitly request a device reset on resume.
*/
- set_bit(BTUSB_RESET_RESUME, &data->flags);
+ interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME;
}
#ifdef CONFIG_BT_HCIBTUSB_RTL
@@ -2985,7 +2986,7 @@ static int btusb_probe(struct usb_interface *intf,
* but the USB hub doesn't notice any status change.
* Explicitly request a device reset on resume.
*/
- set_bit(BTUSB_RESET_RESUME, &data->flags);
+ interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME;
}
#endif
@@ -3142,14 +3143,6 @@ static int btusb_suspend(struct usb_interface *intf, pm_message_t message)
btusb_stop_traffic(data);
usb_kill_anchored_urbs(&data->tx_anchor);
- /* Optionally request a device reset on resume, but only when
- * wakeups are disabled. If wakeups are enabled we assume the
- * device will stay powered up throughout suspend.
- */
- if (test_bit(BTUSB_RESET_RESUME, &data->flags) &&
- !device_may_wakeup(&data->udev->dev))
- data->udev->reset_resume = 1;
-
return 0;
}
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 71325e443e46..8a3bf0a8c31d 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -936,6 +936,9 @@ static int qca_setup(struct hci_uart *hu)
if (!ret) {
set_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags);
qca_debugfs_init(hdev);
+ } else if (ret == -ENOENT) {
+ /* No patch/nvm-config found, run with original fw/config */
+ ret = 0;
}
/* Setup bdaddr */
diff --git a/drivers/bus/brcmstb_gisb.c b/drivers/bus/brcmstb_gisb.c
index f364fa4d24eb..f59183018280 100644
--- a/drivers/bus/brcmstb_gisb.c
+++ b/drivers/bus/brcmstb_gisb.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2014 Broadcom Corporation
+ * Copyright (C) 2014-2017 Broadcom
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -33,8 +33,6 @@
#define ARB_ERR_CAP_CLEAR (1 << 0)
#define ARB_ERR_CAP_STATUS_TIMEOUT (1 << 12)
#define ARB_ERR_CAP_STATUS_TEA (1 << 11)
-#define ARB_ERR_CAP_STATUS_BS_SHIFT (1 << 2)
-#define ARB_ERR_CAP_STATUS_BS_MASK 0x3c
#define ARB_ERR_CAP_STATUS_WRITE (1 << 1)
#define ARB_ERR_CAP_STATUS_VALID (1 << 0)
@@ -43,7 +41,6 @@ enum {
ARB_ERR_CAP_CLR,
ARB_ERR_CAP_HI_ADDR,
ARB_ERR_CAP_ADDR,
- ARB_ERR_CAP_DATA,
ARB_ERR_CAP_STATUS,
ARB_ERR_CAP_MASTER,
};
@@ -53,7 +50,6 @@ static const int gisb_offsets_bcm7038[] = {
[ARB_ERR_CAP_CLR] = 0x0c4,
[ARB_ERR_CAP_HI_ADDR] = -1,
[ARB_ERR_CAP_ADDR] = 0x0c8,
- [ARB_ERR_CAP_DATA] = 0x0cc,
[ARB_ERR_CAP_STATUS] = 0x0d0,
[ARB_ERR_CAP_MASTER] = -1,
};
@@ -63,7 +59,6 @@ static const int gisb_offsets_bcm7400[] = {
[ARB_ERR_CAP_CLR] = 0x0c8,
[ARB_ERR_CAP_HI_ADDR] = -1,
[ARB_ERR_CAP_ADDR] = 0x0cc,
- [ARB_ERR_CAP_DATA] = 0x0d0,
[ARB_ERR_CAP_STATUS] = 0x0d4,
[ARB_ERR_CAP_MASTER] = 0x0d8,
};
@@ -73,7 +68,6 @@ static const int gisb_offsets_bcm7435[] = {
[ARB_ERR_CAP_CLR] = 0x168,
[ARB_ERR_CAP_HI_ADDR] = -1,
[ARB_ERR_CAP_ADDR] = 0x16c,
- [ARB_ERR_CAP_DATA] = 0x170,
[ARB_ERR_CAP_STATUS] = 0x174,
[ARB_ERR_CAP_MASTER] = 0x178,
};
@@ -83,7 +77,6 @@ static const int gisb_offsets_bcm7445[] = {
[ARB_ERR_CAP_CLR] = 0x7e4,
[ARB_ERR_CAP_HI_ADDR] = 0x7e8,
[ARB_ERR_CAP_ADDR] = 0x7ec,
- [ARB_ERR_CAP_DATA] = 0x7f0,
[ARB_ERR_CAP_STATUS] = 0x7f4,
[ARB_ERR_CAP_MASTER] = 0x7f8,
};
@@ -105,9 +98,13 @@ static u32 gisb_read(struct brcmstb_gisb_arb_device *gdev, int reg)
{
int offset = gdev->gisb_offsets[reg];
- /* return 1 if the hardware doesn't have ARB_ERR_CAP_MASTER */
- if (offset == -1)
- return 1;
+ if (offset < 0) {
+ /* return 1 if the hardware doesn't have ARB_ERR_CAP_MASTER */
+ if (reg == ARB_ERR_CAP_MASTER)
+ return 1;
+ else
+ return 0;
+ }
if (gdev->big_endian)
return ioread32be(gdev->base + offset);
@@ -115,6 +112,16 @@ static u32 gisb_read(struct brcmstb_gisb_arb_device *gdev, int reg)
return ioread32(gdev->base + offset);
}
+static u64 gisb_read_address(struct brcmstb_gisb_arb_device *gdev)
+{
+ u64 value;
+
+ value = gisb_read(gdev, ARB_ERR_CAP_ADDR);
+ value |= (u64)gisb_read(gdev, ARB_ERR_CAP_HI_ADDR) << 32;
+
+ return value;
+}
+
static void gisb_write(struct brcmstb_gisb_arb_device *gdev, u32 val, int reg)
{
int offset = gdev->gisb_offsets[reg];
@@ -123,9 +130,9 @@ static void gisb_write(struct brcmstb_gisb_arb_device *gdev, u32 val, int reg)
return;
if (gdev->big_endian)
- iowrite32be(val, gdev->base + reg);
+ iowrite32be(val, gdev->base + offset);
else
- iowrite32(val, gdev->base + reg);
+ iowrite32(val, gdev->base + offset);
}
static ssize_t gisb_arb_get_timeout(struct device *dev,
@@ -181,7 +188,7 @@ static int brcmstb_gisb_arb_decode_addr(struct brcmstb_gisb_arb_device *gdev,
const char *reason)
{
u32 cap_status;
- unsigned long arb_addr;
+ u64 arb_addr;
u32 master;
const char *m_name;
char m_fmt[11];
@@ -193,10 +200,7 @@ static int brcmstb_gisb_arb_decode_addr(struct brcmstb_gisb_arb_device *gdev,
return 1;
/* Read the address and master */
- arb_addr = gisb_read(gdev, ARB_ERR_CAP_ADDR) & 0xffffffff;
-#if (IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT))
- arb_addr |= (u64)gisb_read(gdev, ARB_ERR_CAP_HI_ADDR) << 32;
-#endif
+ arb_addr = gisb_read_address(gdev);
master = gisb_read(gdev, ARB_ERR_CAP_MASTER);
m_name = brcmstb_gisb_master_to_str(gdev, master);
@@ -205,7 +209,7 @@ static int brcmstb_gisb_arb_decode_addr(struct brcmstb_gisb_arb_device *gdev,
m_name = m_fmt;
}
- pr_crit("%s: %s at 0x%lx [%c %s], core: %s\n",
+ pr_crit("%s: %s at 0x%llx [%c %s], core: %s\n",
__func__, reason, arb_addr,
cap_status & ARB_ERR_CAP_STATUS_WRITE ? 'W' : 'R',
cap_status & ARB_ERR_CAP_STATUS_TIMEOUT ? "timeout" : "",
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index c206ccda899b..b5f245d2875c 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -2358,7 +2358,7 @@ static int cdrom_ioctl_media_changed(struct cdrom_device_info *cdi,
if (!CDROM_CAN(CDC_SELECT_DISC) || arg == CDSL_CURRENT)
return media_changed(cdi, 1);
- if ((unsigned int)arg >= cdi->capacity)
+ if (arg >= cdi->capacity)
return -EINVAL;
info = kmalloc(sizeof(*info), GFP_KERNEL);
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index 76594144d73e..c0787608af56 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -1188,7 +1188,10 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
if (err)
goto bail;
}
- if (ctx->buf->virt && metalen <= copylen)
+ VERIFY(err, ctx->buf->virt != NULL);
+ if (err)
+ goto bail;
+ if (metalen <= copylen)
memset(ctx->buf->virt, 0, metalen);
/* copy metadata */
@@ -1250,7 +1253,7 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
/* copy non ion buffers */
PERF(ctx->fl->profile, ctx->fl->perf.copy,
rlen = copylen - metalen;
- for (oix = 0; oix < inbufs + outbufs; ++oix) {
+ for (oix = 0; rpra && oix < inbufs + outbufs; ++oix) {
int i = ctx->overps[oix]->raix;
struct fastrpc_mmap *map = ctx->maps[i];
size_t mlen;
@@ -1301,7 +1304,7 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
if (map && (map->attr & FASTRPC_ATTR_COHERENT))
continue;
- if (rpra[i].buf.len && ctx->overps[oix]->mstart) {
+ if (rpra && rpra[i].buf.len && ctx->overps[oix]->mstart) {
if (map && map->handle)
msm_ion_do_cache_op(ctx->fl->apps->client,
map->handle,
@@ -1317,7 +1320,7 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
PERF_END);
inh = inbufs + outbufs;
- for (i = 0; i < REMOTE_SCALARS_INHANDLES(sc); i++) {
+ for (i = 0; rpra && i < REMOTE_SCALARS_INHANDLES(sc); i++) {
rpra[inh + i].buf.pv = ptr_to_uint64(ctx->lpra[inh + i].buf.pv);
rpra[inh + i].buf.len = ctx->lpra[inh + i].buf.len;
rpra[inh + i].h = ctx->lpra[inh + i].h;
@@ -2771,6 +2774,28 @@ static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
if (err)
goto bail;
break;
+ case FASTRPC_IOCTL_MMAP_64:
+ K_COPY_FROM_USER(err, 0, &p.mmap, param,
+ sizeof(p.mmap));
+ if (err)
+ goto bail;
+ VERIFY(err, 0 == (err = fastrpc_internal_mmap(fl, &p.mmap)));
+ if (err)
+ goto bail;
+ K_COPY_TO_USER(err, 0, param, &p.mmap, sizeof(p.mmap));
+ if (err)
+ goto bail;
+ break;
+ case FASTRPC_IOCTL_MUNMAP_64:
+ K_COPY_FROM_USER(err, 0, &p.munmap, param,
+ sizeof(p.munmap));
+ if (err)
+ goto bail;
+ VERIFY(err, 0 == (err = fastrpc_internal_munmap(fl,
+ &p.munmap)));
+ if (err)
+ goto bail;
+ break;
case FASTRPC_IOCTL_SETMODE:
switch ((uint32_t)ioctl_param) {
case FASTRPC_MODE_PARALLEL:
diff --git a/drivers/char/adsprpc_compat.c b/drivers/char/adsprpc_compat.c
index fc6450336061..e1e061748f22 100644
--- a/drivers/char/adsprpc_compat.c
+++ b/drivers/char/adsprpc_compat.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -36,6 +36,11 @@
_IOWR('R', 9, struct compat_fastrpc_ioctl_perf)
#define COMPAT_FASTRPC_IOCTL_INIT_ATTRS \
_IOWR('R', 10, struct compat_fastrpc_ioctl_init_attrs)
+#define COMPAT_FASTRPC_IOCTL_MMAP_64 \
+ _IOWR('R', 14, struct compat_fastrpc_ioctl_mmap_64)
+#define COMPAT_FASTRPC_IOCTL_MUNMAP_64 \
+ _IOWR('R', 15, struct compat_fastrpc_ioctl_munmap_64)
+
struct compat_remote_buf {
compat_uptr_t pv; /* buffer pointer */
@@ -72,11 +77,24 @@ struct compat_fastrpc_ioctl_mmap {
compat_uptr_t vaddrout; /* dsps virtual address */
};
+struct compat_fastrpc_ioctl_mmap_64 {
+ compat_int_t fd; /* ion fd */
+ compat_uint_t flags; /* flags for dsp to map with */
+ compat_u64 vaddrin; /* optional virtual address */
+ compat_size_t size; /* size */
+ compat_u64 vaddrout; /* dsps virtual address */
+};
+
struct compat_fastrpc_ioctl_munmap {
compat_uptr_t vaddrout; /* address to unmap */
compat_size_t size; /* size */
};
+struct compat_fastrpc_ioctl_munmap_64 {
+ compat_u64 vaddrout; /* address to unmap */
+ compat_size_t size; /* size */
+};
+
struct compat_fastrpc_ioctl_init {
compat_uint_t flags; /* one of FASTRPC_INIT_* macros */
compat_uptr_t file; /* pointer to elf file */
@@ -209,6 +227,28 @@ static int compat_get_fastrpc_ioctl_mmap(
return err;
}
+static int compat_get_fastrpc_ioctl_mmap_64(
+ struct compat_fastrpc_ioctl_mmap_64 __user *map32,
+ struct fastrpc_ioctl_mmap __user *map)
+{
+ compat_uint_t u;
+ compat_int_t i;
+ compat_size_t s;
+ compat_u64 p;
+ int err;
+
+ err = get_user(i, &map32->fd);
+ err |= put_user(i, &map->fd);
+ err |= get_user(u, &map32->flags);
+ err |= put_user(u, &map->flags);
+ err |= get_user(p, &map32->vaddrin);
+ err |= put_user(p, &map->vaddrin);
+ err |= get_user(s, &map32->size);
+ err |= put_user(s, &map->size);
+
+ return err;
+}
+
static int compat_put_fastrpc_ioctl_mmap(
struct compat_fastrpc_ioctl_mmap __user *map32,
struct fastrpc_ioctl_mmap __user *map)
@@ -222,6 +262,19 @@ static int compat_put_fastrpc_ioctl_mmap(
return err;
}
+static int compat_put_fastrpc_ioctl_mmap_64(
+ struct compat_fastrpc_ioctl_mmap_64 __user *map32,
+ struct fastrpc_ioctl_mmap __user *map)
+{
+ compat_u64 p;
+ int err;
+
+ err = get_user(p, &map->vaddrout);
+ err |= put_user(p, &map32->vaddrout);
+
+ return err;
+}
+
static int compat_get_fastrpc_ioctl_munmap(
struct compat_fastrpc_ioctl_munmap __user *unmap32,
struct fastrpc_ioctl_munmap __user *unmap)
@@ -238,6 +291,22 @@ static int compat_get_fastrpc_ioctl_munmap(
return err;
}
+static int compat_get_fastrpc_ioctl_munmap_64(
+ struct compat_fastrpc_ioctl_munmap_64 __user *unmap32,
+ struct fastrpc_ioctl_munmap __user *unmap)
+{
+ compat_u64 p;
+ compat_size_t s;
+ int err;
+
+ err = get_user(p, &unmap32->vaddrout);
+ err |= put_user(p, &unmap->vaddrout);
+ err |= get_user(s, &unmap32->size);
+ err |= put_user(s, &unmap->size);
+
+ return err;
+}
+
static int compat_get_fastrpc_ioctl_perf(
struct compat_fastrpc_ioctl_perf __user *perf32,
struct fastrpc_ioctl_perf __user *perf)
@@ -343,6 +412,27 @@ long compat_fastrpc_device_ioctl(struct file *filp, unsigned int cmd,
VERIFY(err, 0 == compat_put_fastrpc_ioctl_mmap(map32, map));
return err;
}
+ case COMPAT_FASTRPC_IOCTL_MMAP_64:
+ {
+ struct compat_fastrpc_ioctl_mmap_64 __user *map32;
+ struct fastrpc_ioctl_mmap __user *map;
+ long ret;
+
+ map32 = compat_ptr(arg);
+ VERIFY(err, NULL != (map = compat_alloc_user_space(
+ sizeof(*map))));
+ if (err)
+ return -EFAULT;
+ VERIFY(err, 0 == compat_get_fastrpc_ioctl_mmap_64(map32, map));
+ if (err)
+ return err;
+ ret = filp->f_op->unlocked_ioctl(filp, FASTRPC_IOCTL_MMAP_64,
+ (unsigned long)map);
+ if (ret)
+ return ret;
+ VERIFY(err, 0 == compat_put_fastrpc_ioctl_mmap_64(map32, map));
+ return err;
+ }
case COMPAT_FASTRPC_IOCTL_MUNMAP:
{
struct compat_fastrpc_ioctl_munmap __user *unmap32;
@@ -360,6 +450,23 @@ long compat_fastrpc_device_ioctl(struct file *filp, unsigned int cmd,
return filp->f_op->unlocked_ioctl(filp, FASTRPC_IOCTL_MUNMAP,
(unsigned long)unmap);
}
+ case COMPAT_FASTRPC_IOCTL_MUNMAP_64:
+ {
+ struct compat_fastrpc_ioctl_munmap_64 __user *unmap32;
+ struct fastrpc_ioctl_munmap __user *unmap;
+
+ unmap32 = compat_ptr(arg);
+ VERIFY(err, NULL != (unmap = compat_alloc_user_space(
+ sizeof(*unmap))));
+ if (err)
+ return -EFAULT;
+ VERIFY(err, 0 == compat_get_fastrpc_ioctl_munmap_64(unmap32,
+ unmap));
+ if (err)
+ return err;
+ return filp->f_op->unlocked_ioctl(filp, FASTRPC_IOCTL_MUNMAP_64,
+ (unsigned long)unmap);
+ }
case COMPAT_FASTRPC_IOCTL_INIT:
/* fall through */
case COMPAT_FASTRPC_IOCTL_INIT_ATTRS:
diff --git a/drivers/char/adsprpc_shared.h b/drivers/char/adsprpc_shared.h
index be8d1a536d6c..a88c668440c7 100644
--- a/drivers/char/adsprpc_shared.h
+++ b/drivers/char/adsprpc_shared.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -19,6 +19,8 @@
#define FASTRPC_IOCTL_INVOKE _IOWR('R', 1, struct fastrpc_ioctl_invoke)
#define FASTRPC_IOCTL_MMAP _IOWR('R', 2, struct fastrpc_ioctl_mmap)
#define FASTRPC_IOCTL_MUNMAP _IOWR('R', 3, struct fastrpc_ioctl_munmap)
+#define FASTRPC_IOCTL_MMAP_64 _IOWR('R', 14, struct fastrpc_ioctl_mmap_64)
+#define FASTRPC_IOCTL_MUNMAP_64 _IOWR('R', 15, struct fastrpc_ioctl_munmap_64)
#define FASTRPC_IOCTL_INVOKE_FD _IOWR('R', 4, struct fastrpc_ioctl_invoke_fd)
#define FASTRPC_IOCTL_SETMODE _IOWR('R', 5, uint32_t)
#define FASTRPC_IOCTL_INIT _IOWR('R', 6, struct fastrpc_ioctl_init)
@@ -171,6 +173,11 @@ struct fastrpc_ioctl_munmap {
size_t size; /* size */
};
+struct fastrpc_ioctl_munmap_64 {
+ uint64_t vaddrout; /* address to unmap */
+ size_t size; /* size */
+};
+
struct fastrpc_ioctl_mmap {
int fd; /* ion fd */
uint32_t flags; /* flags for dsp to map with */
@@ -179,6 +186,15 @@ struct fastrpc_ioctl_mmap {
uintptr_t vaddrout; /* dsps virtual address */
};
+
+struct fastrpc_ioctl_mmap_64 {
+ int fd; /* ion fd */
+ uint32_t flags; /* flags for dsp to map with */
+ uint64_t vaddrin; /* optional virtual address */
+ size_t size; /* size */
+ uint64_t vaddrout; /* dsps virtual address */
+};
+
struct fastrpc_ioctl_perf { /* kernel performance data */
uintptr_t data;
uint32_t numkeys;
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 1341a94cc779..76afc841232c 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -859,6 +859,8 @@ void intel_gtt_insert_sg_entries(struct sg_table *st,
}
}
wmb();
+ if (intel_private.driver->chipset_flush)
+ intel_private.driver->chipset_flush();
}
EXPORT_SYMBOL(intel_gtt_insert_sg_entries);
diff --git a/drivers/char/diag/diag_dci.c b/drivers/char/diag/diag_dci.c
index 6b8b00809cd3..b0b36d00415d 100644
--- a/drivers/char/diag/diag_dci.c
+++ b/drivers/char/diag/diag_dci.c
@@ -26,6 +26,7 @@
#include <linux/reboot.h>
#include <asm/current.h>
#include <soc/qcom/restart.h>
+#include <linux/vmalloc.h>
#ifdef CONFIG_DIAG_OVER_USB
#include <linux/usb/usbdiag.h>
#endif
@@ -258,7 +259,7 @@ static int diag_dci_init_buffer(struct diag_dci_buffer_t *buffer, int type)
switch (type) {
case DCI_BUF_PRIMARY:
buffer->capacity = IN_BUF_SIZE;
- buffer->data = kzalloc(buffer->capacity, GFP_KERNEL);
+ buffer->data = vzalloc(buffer->capacity);
if (!buffer->data)
return -ENOMEM;
break;
@@ -268,7 +269,7 @@ static int diag_dci_init_buffer(struct diag_dci_buffer_t *buffer, int type)
break;
case DCI_BUF_CMD:
buffer->capacity = DIAG_MAX_REQ_SIZE + DCI_BUF_SIZE;
- buffer->data = kzalloc(buffer->capacity, GFP_KERNEL);
+ buffer->data = vzalloc(buffer->capacity);
if (!buffer->data)
return -ENOMEM;
break;
@@ -688,7 +689,7 @@ int diag_dci_query_log_mask(struct diag_dci_client_tbl *entry,
byte_mask = 0x01 << (item_num % 8);
offset = equip_id * 514;
- if (offset + byte_index > DCI_LOG_MASK_SIZE) {
+ if (offset + byte_index >= DCI_LOG_MASK_SIZE) {
pr_err("diag: In %s, invalid offset: %d, log_code: %d, byte_index: %d\n",
__func__, offset, log_code, byte_index);
return 0;
@@ -715,7 +716,7 @@ int diag_dci_query_event_mask(struct diag_dci_client_tbl *entry,
bit_index = event_id % 8;
byte_mask = 0x1 << bit_index;
- if (byte_index > DCI_EVENT_MASK_SIZE) {
+ if (byte_index >= DCI_EVENT_MASK_SIZE) {
pr_err("diag: In %s, invalid, event_id: %d, byte_index: %d\n",
__func__, event_id, byte_index);
return 0;
@@ -863,7 +864,7 @@ static void dci_process_ctrl_status(unsigned char *buf, int len, int token)
read_len += sizeof(struct diag_ctrl_dci_status);
for (i = 0; i < header->count; i++) {
- if (read_len > len) {
+ if (read_len > (len - 2)) {
pr_err("diag: In %s, Invalid length len: %d\n",
__func__, len);
return;
@@ -2719,7 +2720,7 @@ static int diag_dci_init_remote(void)
create_dci_event_mask_tbl(temp->event_mask_composite);
}
- partial_pkt.data = kzalloc(MAX_DCI_PACKET_SZ, GFP_KERNEL);
+ partial_pkt.data = vzalloc(MAX_DCI_PACKET_SZ);
if (!partial_pkt.data) {
pr_err("diag: Unable to create partial pkt data\n");
return -ENOMEM;
@@ -2775,7 +2776,7 @@ int diag_dci_init(void)
goto err;
if (driver->apps_dci_buf == NULL) {
- driver->apps_dci_buf = kzalloc(DCI_BUF_SIZE, GFP_KERNEL);
+ driver->apps_dci_buf = vzalloc(DCI_BUF_SIZE);
if (driver->apps_dci_buf == NULL)
goto err;
}
@@ -2792,12 +2793,12 @@ int diag_dci_init(void)
return DIAG_DCI_NO_ERROR;
err:
pr_err("diag: Could not initialize diag DCI buffers");
- kfree(driver->apps_dci_buf);
+ vfree(driver->apps_dci_buf);
driver->apps_dci_buf = NULL;
if (driver->diag_dci_wq)
destroy_workqueue(driver->diag_dci_wq);
- kfree(partial_pkt.data);
+ vfree(partial_pkt.data);
partial_pkt.data = NULL;
mutex_destroy(&driver->dci_mutex);
mutex_destroy(&dci_log_mask_mutex);
@@ -2817,9 +2818,9 @@ void diag_dci_channel_init(void)
void diag_dci_exit(void)
{
- kfree(partial_pkt.data);
+ vfree(partial_pkt.data);
partial_pkt.data = NULL;
- kfree(driver->apps_dci_buf);
+ vfree(driver->apps_dci_buf);
driver->apps_dci_buf = NULL;
mutex_destroy(&driver->dci_mutex);
mutex_destroy(&dci_log_mask_mutex);
@@ -2959,7 +2960,7 @@ int diag_dci_register_client(struct diag_dci_reg_tbl_t *reg_entry)
new_entry->in_service = 0;
INIT_LIST_HEAD(&new_entry->list_write_buf);
mutex_init(&new_entry->write_buf_mutex);
- new_entry->dci_log_mask = kzalloc(DCI_LOG_MASK_SIZE, GFP_KERNEL);
+ new_entry->dci_log_mask = vzalloc(DCI_LOG_MASK_SIZE);
if (!new_entry->dci_log_mask) {
pr_err("diag: Unable to create log mask for client, %d",
driver->dci_client_id);
@@ -2967,7 +2968,7 @@ int diag_dci_register_client(struct diag_dci_reg_tbl_t *reg_entry)
}
create_dci_log_mask_tbl(new_entry->dci_log_mask, DCI_LOG_MASK_CLEAN);
- new_entry->dci_event_mask = kzalloc(DCI_EVENT_MASK_SIZE, GFP_KERNEL);
+ new_entry->dci_event_mask = vzalloc(DCI_EVENT_MASK_SIZE);
if (!new_entry->dci_event_mask) {
pr_err("diag: Unable to create event mask for client, %d",
driver->dci_client_id);
@@ -2977,7 +2978,7 @@ int diag_dci_register_client(struct diag_dci_reg_tbl_t *reg_entry)
new_entry->buffers = kzalloc(new_entry->num_buffers *
sizeof(struct diag_dci_buf_peripheral_t),
- GFP_KERNEL);
+ GFP_KERNEL);
if (!new_entry->buffers) {
pr_err("diag: Unable to allocate buffers for peripherals in %s\n",
__func__);
@@ -3001,7 +3002,7 @@ int diag_dci_register_client(struct diag_dci_reg_tbl_t *reg_entry)
if (!proc_buf->buf_primary)
goto fail_alloc;
proc_buf->buf_cmd = kzalloc(sizeof(struct diag_dci_buffer_t),
- GFP_KERNEL);
+ GFP_KERNEL);
if (!proc_buf->buf_cmd)
goto fail_alloc;
err = diag_dci_init_buffer(proc_buf->buf_primary,
@@ -3034,7 +3035,7 @@ fail_alloc:
if (proc_buf) {
mutex_destroy(&proc_buf->health_mutex);
if (proc_buf->buf_primary) {
- kfree(proc_buf->buf_primary->data);
+ vfree(proc_buf->buf_primary->data);
proc_buf->buf_primary->data = NULL;
mutex_destroy(
&proc_buf->buf_primary->data_mutex);
@@ -3042,7 +3043,7 @@ fail_alloc:
kfree(proc_buf->buf_primary);
proc_buf->buf_primary = NULL;
if (proc_buf->buf_cmd) {
- kfree(proc_buf->buf_cmd->data);
+ vfree(proc_buf->buf_cmd->data);
proc_buf->buf_cmd->data = NULL;
mutex_destroy(
&proc_buf->buf_cmd->data_mutex);
@@ -3051,9 +3052,9 @@ fail_alloc:
proc_buf->buf_cmd = NULL;
}
}
- kfree(new_entry->dci_event_mask);
+ vfree(new_entry->dci_event_mask);
new_entry->dci_event_mask = NULL;
- kfree(new_entry->dci_log_mask);
+ vfree(new_entry->dci_log_mask);
new_entry->dci_log_mask = NULL;
kfree(new_entry->buffers);
new_entry->buffers = NULL;
@@ -3088,7 +3089,7 @@ int diag_dci_deinit_client(struct diag_dci_client_tbl *entry)
* Clear the client's log and event masks, update the cumulative
* masks and send the masks to peripherals
*/
- kfree(entry->dci_log_mask);
+ vfree(entry->dci_log_mask);
entry->dci_log_mask = NULL;
diag_dci_invalidate_cumulative_log_mask(token);
if (token == DCI_LOCAL_PROC)
@@ -3097,7 +3098,7 @@ int diag_dci_deinit_client(struct diag_dci_client_tbl *entry)
if (ret != DIAG_DCI_NO_ERROR) {
return ret;
}
- kfree(entry->dci_event_mask);
+ vfree(entry->dci_event_mask);
entry->dci_event_mask = NULL;
diag_dci_invalidate_cumulative_event_mask(token);
if (token == DCI_LOCAL_PROC)
@@ -3161,12 +3162,12 @@ int diag_dci_deinit_client(struct diag_dci_client_tbl *entry)
}
mutex_lock(&proc_buf->buf_primary->data_mutex);
- kfree(proc_buf->buf_primary->data);
+ vfree(proc_buf->buf_primary->data);
proc_buf->buf_primary->data = NULL;
mutex_unlock(&proc_buf->buf_primary->data_mutex);
mutex_lock(&proc_buf->buf_cmd->data_mutex);
- kfree(proc_buf->buf_cmd->data);
+ vfree(proc_buf->buf_cmd->data);
proc_buf->buf_cmd->data = NULL;
mutex_unlock(&proc_buf->buf_cmd->data_mutex);
diff --git a/drivers/char/diag/diag_memorydevice.c b/drivers/char/diag/diag_memorydevice.c
index a752cdc675a3..aa45c2e7ec7b 100644
--- a/drivers/char/diag/diag_memorydevice.c
+++ b/drivers/char/diag/diag_memorydevice.c
@@ -37,6 +37,7 @@ struct diag_md_info diag_md[NUM_DIAG_MD_DEV] = {
.ctx = 0,
.mempool = POOL_TYPE_MUX_APPS,
.num_tbl_entries = 0,
+ .md_info_inited = 0,
.tbl = NULL,
.ops = NULL,
},
@@ -46,6 +47,7 @@ struct diag_md_info diag_md[NUM_DIAG_MD_DEV] = {
.ctx = 0,
.mempool = POOL_TYPE_MDM_MUX,
.num_tbl_entries = 0,
+ .md_info_inited = 0,
.tbl = NULL,
.ops = NULL,
},
@@ -54,6 +56,7 @@ struct diag_md_info diag_md[NUM_DIAG_MD_DEV] = {
.ctx = 0,
.mempool = POOL_TYPE_MDM2_MUX,
.num_tbl_entries = 0,
+ .md_info_inited = 0,
.tbl = NULL,
.ops = NULL,
},
@@ -62,6 +65,7 @@ struct diag_md_info diag_md[NUM_DIAG_MD_DEV] = {
.ctx = 0,
.mempool = POOL_TYPE_QSC_MUX,
.num_tbl_entries = 0,
+ .md_info_inited = 0,
.tbl = NULL,
.ops = NULL,
}
@@ -85,6 +89,8 @@ void diag_md_open_all()
for (i = 0; i < NUM_DIAG_MD_DEV; i++) {
ch = &diag_md[i];
+ if (!ch->md_info_inited)
+ continue;
if (ch->ops && ch->ops->open)
ch->ops->open(ch->ctx, DIAG_MEMORY_DEVICE_MODE);
}
@@ -101,6 +107,8 @@ void diag_md_close_all()
for (i = 0; i < NUM_DIAG_MD_DEV; i++) {
ch = &diag_md[i];
+ if (!ch->md_info_inited)
+ continue;
if (ch->ops && ch->ops->close)
ch->ops->close(ch->ctx, DIAG_MEMORY_DEVICE_MODE);
@@ -159,7 +167,7 @@ int diag_md_write(int id, unsigned char *buf, int len, int ctx)
mutex_unlock(&driver->md_session_lock);
ch = &diag_md[id];
- if (!ch)
+ if (!ch || !ch->md_info_inited)
return -EINVAL;
spin_lock_irqsave(&ch->lock, flags);
@@ -236,6 +244,8 @@ int diag_md_copy_to_user(char __user *buf, int *pret, size_t buf_size,
for (i = 0; i < NUM_DIAG_MD_DEV && !err; i++) {
ch = &diag_md[i];
+ if (!ch->md_info_inited)
+ continue;
for (j = 0; j < ch->num_tbl_entries && !err; j++) {
entry = &ch->tbl[j];
if (entry->len <= 0 || entry->buf == NULL)
@@ -358,6 +368,8 @@ int diag_md_close_peripheral(int id, uint8_t peripheral)
return -EINVAL;
ch = &diag_md[id];
+ if (!ch || !ch->md_info_inited)
+ return -EINVAL;
spin_lock_irqsave(&ch->lock, flags);
for (i = 0; i < ch->num_tbl_entries && !found; i++) {
@@ -385,12 +397,12 @@ int diag_md_close_peripheral(int id, uint8_t peripheral)
return 0;
}
-int diag_md_init()
+int diag_md_init(void)
{
int i, j;
struct diag_md_info *ch = NULL;
- for (i = 0; i < NUM_DIAG_MD_DEV; i++) {
+ for (i = 0; i < DIAG_MD_LOCAL_LAST; i++) {
ch = &diag_md[i];
ch->num_tbl_entries = diag_mempools[ch->mempool].poolsize;
ch->tbl = kzalloc(ch->num_tbl_entries *
@@ -405,6 +417,7 @@ int diag_md_init()
ch->tbl[j].ctx = 0;
}
spin_lock_init(&(ch->lock));
+ ch->md_info_inited = 1;
}
return 0;
@@ -414,12 +427,54 @@ fail:
return -ENOMEM;
}
-void diag_md_exit()
+int diag_md_mdm_init(void)
+{
+ int i, j;
+ struct diag_md_info *ch = NULL;
+
+ for (i = DIAG_MD_BRIDGE_BASE; i < NUM_DIAG_MD_DEV; i++) {
+ ch = &diag_md[i];
+ ch->num_tbl_entries = diag_mempools[ch->mempool].poolsize;
+ ch->tbl = kcalloc(ch->num_tbl_entries, sizeof(*ch->tbl),
+ GFP_KERNEL);
+ if (!ch->tbl)
+ goto fail;
+
+ for (j = 0; j < ch->num_tbl_entries; j++) {
+ ch->tbl[j].buf = NULL;
+ ch->tbl[j].len = 0;
+ ch->tbl[j].ctx = 0;
+ }
+ spin_lock_init(&(ch->lock));
+ ch->md_info_inited = 1;
+ }
+
+ return 0;
+
+fail:
+ diag_md_mdm_exit();
+ return -ENOMEM;
+}
+
+void diag_md_exit(void)
{
int i;
struct diag_md_info *ch = NULL;
- for (i = 0; i < NUM_DIAG_MD_DEV; i++) {
+ for (i = 0; i < DIAG_MD_LOCAL_LAST; i++) {
+ ch = &diag_md[i];
+ kfree(ch->tbl);
+ ch->num_tbl_entries = 0;
+ ch->ops = NULL;
+ }
+}
+
+void diag_md_mdm_exit(void)
+{
+ int i;
+ struct diag_md_info *ch = NULL;
+
+ for (i = DIAG_MD_BRIDGE_BASE; i < NUM_DIAG_MD_DEV; i++) {
ch = &diag_md[i];
kfree(ch->tbl);
ch->num_tbl_entries = 0;
diff --git a/drivers/char/diag/diag_memorydevice.h b/drivers/char/diag/diag_memorydevice.h
index 35a1ee35a956..4d65dedfdb58 100644
--- a/drivers/char/diag/diag_memorydevice.h
+++ b/drivers/char/diag/diag_memorydevice.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2015, 2018 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -38,6 +38,7 @@ struct diag_md_info {
int ctx;
int mempool;
int num_tbl_entries;
+ int md_info_inited;
spinlock_t lock;
struct diag_buf_tbl_t *tbl;
struct diag_mux_ops *ops;
@@ -46,7 +47,9 @@ struct diag_md_info {
extern struct diag_md_info diag_md[NUM_DIAG_MD_DEV];
int diag_md_init(void);
+int diag_md_mdm_init(void);
void diag_md_exit(void);
+void diag_md_mdm_exit(void);
void diag_md_open_all(void);
void diag_md_close_all(void);
int diag_md_register(int id, int ctx, struct diag_mux_ops *ops);
diff --git a/drivers/char/diag/diag_mux.c b/drivers/char/diag/diag_mux.c
index 8cc803eef552..8d766e1ae583 100644
--- a/drivers/char/diag/diag_mux.c
+++ b/drivers/char/diag/diag_mux.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -239,6 +239,8 @@ int diag_mux_switch_logging(int *req_mode, int *peripheral_mask)
new_mask = ~(*peripheral_mask) & diag_mux->mux_mask;
if (new_mask != DIAG_CON_NONE)
*req_mode = DIAG_MULTI_MODE;
+ if (new_mask == DIAG_CON_ALL)
+ *req_mode = DIAG_MEMORY_DEVICE_MODE;
break;
case DIAG_MEMORY_DEVICE_MODE:
new_mask = (*peripheral_mask) | diag_mux->mux_mask;
diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h
index 75080f0d4c39..afea5f40bfee 100644
--- a/drivers/char/diag/diagchar.h
+++ b/drivers/char/diag/diagchar.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -580,6 +580,7 @@ struct diagchar_dev {
/* buffer for updating mask to peripherals */
unsigned char *buf_feature_mask_update;
uint8_t hdlc_disabled;
+ uint8_t p_hdlc_disabled[NUM_MD_SESSIONS];
struct mutex hdlc_disable_mutex;
struct mutex hdlc_recovery_mutex;
struct timer_list hdlc_reset_timer;
@@ -604,6 +605,7 @@ struct diagchar_dev {
struct work_struct diag_drain_work;
struct work_struct update_user_clients;
struct work_struct update_md_clients;
+ struct work_struct diag_hdlc_reset_work;
struct workqueue_struct *diag_cntl_wq;
uint8_t log_on_demand_support;
uint8_t *apps_req_buf;
@@ -683,5 +685,6 @@ void diag_record_stats(int type, int flag);
struct diag_md_session_t *diag_md_session_get_pid(int pid);
struct diag_md_session_t *diag_md_session_get_peripheral(uint8_t peripheral);
+int diag_md_session_match_pid_peripheral(int pid, uint8_t peripheral);
#endif
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index 678e3a2b051c..6df597dfa750 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -252,18 +252,13 @@ static void diag_update_md_client_work_fn(struct work_struct *work)
void diag_drain_work_fn(struct work_struct *work)
{
- struct diag_md_session_t *session_info = NULL;
uint8_t hdlc_disabled = 0;
timer_in_progress = 0;
mutex_lock(&apps_data_mutex);
- mutex_lock(&driver->md_session_lock);
- session_info = diag_md_session_get_peripheral(APPS_DATA);
- if (session_info)
- hdlc_disabled = session_info->hdlc_disabled;
- else
- hdlc_disabled = driver->hdlc_disabled;
- mutex_unlock(&driver->md_session_lock);
+ mutex_lock(&driver->hdlc_disable_mutex);
+ hdlc_disabled = driver->p_hdlc_disabled[APPS_DATA];
+ mutex_unlock(&driver->hdlc_disable_mutex);
if (!hdlc_disabled)
diag_drain_apps_data(&hdlc_data);
else
@@ -389,8 +384,8 @@ static int diagchar_open(struct inode *inode, struct file *file)
return -ENOMEM;
fail:
- mutex_unlock(&driver->diagchar_mutex);
driver->num_clients--;
+ mutex_unlock(&driver->diagchar_mutex);
pr_err_ratelimited("diag: Insufficient memory for new client");
return -ENOMEM;
}
@@ -1006,6 +1001,7 @@ static int diag_remote_init(void)
poolsize_mdm_dci_write);
diagmem_setsize(POOL_TYPE_QSC_MUX, itemsize_qsc_usb,
poolsize_qsc_usb);
+ diag_md_mdm_init();
driver->hdlc_encode_buf = kzalloc(DIAG_MAX_HDLC_BUF_SIZE, GFP_KERNEL);
if (!driver->hdlc_encode_buf)
return -ENOMEM;
@@ -1029,7 +1025,6 @@ static int diag_send_raw_data_remote(int proc, void *buf, int len,
struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 };
struct diag_hdlc_dest_type enc = { NULL, NULL, 0 };
int bridge_index = proc - 1;
- struct diag_md_session_t *session_info = NULL;
uint8_t hdlc_disabled = 0;
if (!buf)
@@ -1055,13 +1050,9 @@ static int diag_send_raw_data_remote(int proc, void *buf, int len,
if (driver->hdlc_encode_buf_len != 0)
return -EAGAIN;
- mutex_lock(&driver->md_session_lock);
- session_info = diag_md_session_get_peripheral(APPS_DATA);
- if (session_info)
- hdlc_disabled = session_info->hdlc_disabled;
- else
- hdlc_disabled = driver->hdlc_disabled;
- mutex_unlock(&driver->md_session_lock);
+ mutex_lock(&driver->hdlc_disable_mutex);
+ hdlc_disabled = driver->p_hdlc_disabled[APPS_DATA];
+ mutex_unlock(&driver->hdlc_disable_mutex);
if (hdlc_disabled) {
if (len < 4) {
pr_err("diag: In %s, invalid len: %d of non_hdlc pkt",
@@ -1483,6 +1474,43 @@ struct diag_md_session_t *diag_md_session_get_peripheral(uint8_t peripheral)
return driver->md_session_map[peripheral];
}
+/*
+ * diag_md_session_match_pid_peripheral
+ *
+ * 1. Pass valid PID and get all the peripherals in logging session
+ * for that PID
+ * 2. Pass valid Peipheral and get the pid logging for that peripheral
+ *
+ */
+
+int diag_md_session_match_pid_peripheral(int pid,
+ uint8_t peripheral)
+{
+ int i, flag = 0;
+
+ if (pid <= 0 || peripheral >= NUM_MD_SESSIONS)
+ return -EINVAL;
+
+ if (!peripheral) {
+ for (i = 0; i < NUM_MD_SESSIONS; i++) {
+ if (driver->md_session_map[i] &&
+ driver->md_session_map[i]->pid == pid) {
+ peripheral |= 1 << i;
+ flag = 1;
+ }
+ }
+ if (flag)
+ return peripheral;
+ }
+
+ if (!pid) {
+ if (driver->md_session_map[peripheral])
+ return driver->md_session_map[peripheral]->pid;
+ }
+
+ return -EINVAL;
+}
+
static int diag_md_peripheral_switch(int pid,
int peripheral_mask, int req_mode) {
int i, bit = 0;
@@ -1637,6 +1665,13 @@ static int diag_md_session_check(int curr_mode, int req_mode,
}
err = diag_md_session_create(DIAG_MD_PERIPHERAL,
param->peripheral_mask, DIAG_LOCAL_PROC);
+ mutex_lock(&driver->hdlc_disable_mutex);
+ for (i = 0; i < NUM_MD_SESSIONS; i++) {
+ if ((param->peripheral_mask > 0) &&
+ (param->peripheral_mask & (1 << i)))
+ driver->p_hdlc_disabled[i] = 0;
+ }
+ mutex_unlock(&driver->hdlc_disable_mutex);
}
*change_mode = 1;
return err;
@@ -1710,19 +1745,20 @@ static int diag_switch_logging(struct diag_logging_mode_param_t *param)
return -EINVAL;
}
+ i = upd - UPD_WLAN;
+
if (driver->md_session_map[peripheral] &&
(MD_PERIPHERAL_MASK(peripheral) &
- diag_mux->mux_mask)) {
+ diag_mux->mux_mask) &&
+ !driver->pd_session_clear[i]) {
DIAG_LOG(DIAG_DEBUG_USERSPACE,
"diag_fr: User PD is already logging onto active peripheral logging\n");
- i = upd - UPD_WLAN;
driver->pd_session_clear[i] = 0;
return -EINVAL;
}
peripheral_mask =
diag_translate_mask(param->pd_mask);
param->peripheral_mask = peripheral_mask;
- i = upd - UPD_WLAN;
if (!driver->pd_session_clear[i]) {
driver->pd_logging_mode[i] = 1;
driver->num_pd_session += 1;
@@ -2084,11 +2120,14 @@ static int diag_ioctl_dci_support(unsigned long ioarg)
static int diag_ioctl_hdlc_toggle(unsigned long ioarg)
{
- uint8_t hdlc_support;
+ uint8_t hdlc_support, i;
+ int peripheral = -EINVAL;
struct diag_md_session_t *session_info = NULL;
+
if (copy_from_user(&hdlc_support, (void __user *)ioarg,
sizeof(uint8_t)))
return -EFAULT;
+
mutex_lock(&driver->hdlc_disable_mutex);
mutex_lock(&driver->md_session_lock);
session_info = diag_md_session_get_pid(current->tgid);
@@ -2096,6 +2135,25 @@ static int diag_ioctl_hdlc_toggle(unsigned long ioarg)
session_info->hdlc_disabled = hdlc_support;
else
driver->hdlc_disabled = hdlc_support;
+
+ peripheral =
+ diag_md_session_match_pid_peripheral(current->tgid,
+ 0);
+ for (i = 0; i < NUM_MD_SESSIONS; i++) {
+ if (peripheral > 0 && session_info) {
+ if (peripheral & (1 << i))
+ driver->p_hdlc_disabled[i] =
+ session_info->hdlc_disabled;
+ else if (!diag_md_session_get_peripheral(i))
+ driver->p_hdlc_disabled[i] =
+ driver->hdlc_disabled;
+ } else {
+ if (!diag_md_session_get_peripheral(i))
+ driver->p_hdlc_disabled[i] =
+ driver->hdlc_disabled;
+ }
+ }
+
mutex_unlock(&driver->md_session_lock);
mutex_unlock(&driver->hdlc_disable_mutex);
diag_update_md_clients(HDLC_SUPPORT_TYPE);
@@ -2969,7 +3027,6 @@ static int diag_user_process_apps_data(const char __user *buf, int len,
int stm_size = 0;
const int mempool = POOL_TYPE_COPY;
unsigned char *user_space_data = NULL;
- struct diag_md_session_t *session_info = NULL;
uint8_t hdlc_disabled;
if (!buf || len <= 0 || len > DIAG_MAX_RSP_SIZE) {
@@ -3023,13 +3080,7 @@ static int diag_user_process_apps_data(const char __user *buf, int len,
mutex_lock(&apps_data_mutex);
mutex_lock(&driver->hdlc_disable_mutex);
- mutex_lock(&driver->md_session_lock);
- session_info = diag_md_session_get_peripheral(APPS_DATA);
- if (session_info)
- hdlc_disabled = session_info->hdlc_disabled;
- else
- hdlc_disabled = driver->hdlc_disabled;
- mutex_unlock(&driver->md_session_lock);
+ hdlc_disabled = driver->p_hdlc_disabled[APPS_DATA];
if (hdlc_disabled)
ret = diag_process_apps_data_non_hdlc(user_space_data, len,
pkt_type);
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
index f876489e202d..c7b46304dc84 100644
--- a/drivers/char/diag/diagfwd.c
+++ b/drivers/char/diag/diagfwd.c
@@ -49,6 +49,11 @@
#define STM_RSP_STATUS_INDEX 8
#define STM_RSP_NUM_BYTES 9
+struct diag_md_hdlc_reset_work {
+ int pid;
+ struct work_struct work;
+};
+
static int timestamp_switch;
module_param(timestamp_switch, int, 0644);
@@ -436,6 +441,7 @@ static void diag_send_rsp(unsigned char *buf, int len, int pid)
{
struct diag_md_session_t *session_info = NULL, *info = NULL;
uint8_t hdlc_disabled;
+
mutex_lock(&driver->md_session_lock);
info = diag_md_session_get_pid(pid);
session_info = (info) ? info :
@@ -445,6 +451,7 @@ static void diag_send_rsp(unsigned char *buf, int len, int pid)
else
hdlc_disabled = driver->hdlc_disabled;
mutex_unlock(&driver->md_session_lock);
+
if (hdlc_disabled)
pack_rsp_and_send(buf, len, pid);
else
@@ -948,7 +955,7 @@ void diag_send_error_rsp(unsigned char *buf, int len,
int diag_process_apps_pkt(unsigned char *buf, int len, int pid)
{
int i, p_mask = 0;
- int mask_ret;
+ int mask_ret, peripheral = -EINVAL;
int write_len = 0;
unsigned char *temp = NULL;
struct diag_cmd_reg_entry_t entry;
@@ -1003,8 +1010,11 @@ int diag_process_apps_pkt(unsigned char *buf, int len, int pid)
} else {
mutex_unlock(&driver->md_session_lock);
if (MD_PERIPHERAL_MASK(reg_item->proc) &
- driver->logging_mask)
+ driver->logging_mask) {
+ mutex_unlock(&driver->cmd_reg_mutex);
diag_send_error_rsp(buf, len, pid);
+ return write_len;
+ }
else
write_len = diag_send_data(reg_item, buf, len);
}
@@ -1172,6 +1182,22 @@ int diag_process_apps_pkt(unsigned char *buf, int len, int pid)
info->hdlc_disabled = 1;
else
driver->hdlc_disabled = 1;
+ peripheral =
+ diag_md_session_match_pid_peripheral(pid, 0);
+ for (i = 0; i < NUM_MD_SESSIONS; i++) {
+ if (peripheral > 0 && info) {
+ if (peripheral & (1 << i))
+ driver->p_hdlc_disabled[i] =
+ info->hdlc_disabled;
+ else if (!diag_md_session_get_peripheral(i))
+ driver->p_hdlc_disabled[i] =
+ driver->hdlc_disabled;
+ } else {
+ if (!diag_md_session_get_peripheral(i))
+ driver->p_hdlc_disabled[i] =
+ driver->hdlc_disabled;
+ }
+ }
mutex_unlock(&driver->md_session_lock);
diag_update_md_clients(HDLC_SUPPORT_TYPE);
mutex_unlock(&driver->hdlc_disable_mutex);
@@ -1347,8 +1373,17 @@ static int diagfwd_mux_close(int id, int mode)
pr_debug("diag: In %s, re-enabling HDLC encoding\n",
__func__);
mutex_lock(&driver->hdlc_disable_mutex);
- if (driver->md_session_mode == DIAG_MD_NONE)
+ if (driver->md_session_mode == DIAG_MD_NONE) {
driver->hdlc_disabled = 0;
+ /*
+ * HDLC encoding is re-enabled when
+ * there is logical/physical disconnection of diag
+ * to USB.
+ */
+ for (i = 0; i < NUM_MD_SESSIONS; i++)
+ driver->p_hdlc_disabled[i] =
+ driver->hdlc_disabled;
+ }
mutex_unlock(&driver->hdlc_disable_mutex);
queue_work(driver->diag_wq,
&(driver->update_user_clients));
@@ -1363,6 +1398,7 @@ static uint8_t hdlc_reset;
static void hdlc_reset_timer_start(int pid)
{
struct diag_md_session_t *info = NULL;
+
mutex_lock(&driver->md_session_lock);
info = diag_md_session_get_pid(pid);
if (!hdlc_timer_in_progress) {
@@ -1377,30 +1413,99 @@ static void hdlc_reset_timer_start(int pid)
mutex_unlock(&driver->md_session_lock);
}
+/*
+ * diag_timer_work_fn
+ * Queued in workqueue to protect md_session_info structure
+ *
+ * Update hdlc_disabled for each peripheral
+ * which are not in any md_session_info.
+ *
+ */
+static void diag_timer_work_fn(struct work_struct *work)
+{
+ int i = 0;
+ struct diag_md_session_t *session_info = NULL;
+
+ mutex_lock(&driver->hdlc_disable_mutex);
+ driver->hdlc_disabled = 0;
+ mutex_lock(&driver->md_session_lock);
+ for (i = 0; i < NUM_MD_SESSIONS; i++) {
+ session_info = diag_md_session_get_peripheral(i);
+ if (!session_info)
+ driver->p_hdlc_disabled[i] =
+ driver->hdlc_disabled;
+ }
+ mutex_unlock(&driver->md_session_lock);
+ mutex_unlock(&driver->hdlc_disable_mutex);
+}
+
+/*
+ * diag_md_timer_work_fn
+ * Queued in workqueue to protect md_session_info structure
+ *
+ * Update hdlc_disabled for each peripheral
+ * which are in any md_session_info
+ *
+ */
+static void diag_md_timer_work_fn(struct work_struct *work)
+{
+ int peripheral = -EINVAL, i = 0;
+ struct diag_md_session_t *session_info = NULL;
+ struct diag_md_hdlc_reset_work *hdlc_work = container_of(work,
+ struct diag_md_hdlc_reset_work, work);
+
+ if (!hdlc_work)
+ return;
+
+ mutex_lock(&driver->hdlc_disable_mutex);
+ mutex_lock(&driver->md_session_lock);
+ session_info = diag_md_session_get_pid(hdlc_work->pid);
+ if (session_info)
+ session_info->hdlc_disabled = 0;
+ peripheral =
+ diag_md_session_match_pid_peripheral(hdlc_work->pid, 0);
+ if (peripheral > 0 && session_info) {
+ for (i = 0; i < NUM_MD_SESSIONS; i++) {
+ if (peripheral & (1 << i))
+ driver->p_hdlc_disabled[i] =
+ session_info->hdlc_disabled;
+ }
+ }
+ kfree(hdlc_work);
+ mutex_unlock(&driver->md_session_lock);
+ mutex_unlock(&driver->hdlc_disable_mutex);
+}
+
static void hdlc_reset_timer_func(unsigned long data)
{
pr_debug("diag: In %s, re-enabling HDLC encoding\n",
__func__);
+
if (hdlc_reset) {
- driver->hdlc_disabled = 0;
- queue_work(driver->diag_wq,
- &(driver->update_user_clients));
+ queue_work(driver->diag_wq, &(driver->diag_hdlc_reset_work));
+ queue_work(driver->diag_wq, &(driver->update_user_clients));
}
hdlc_timer_in_progress = 0;
}
void diag_md_hdlc_reset_timer_func(unsigned long pid)
{
- struct diag_md_session_t *session_info = NULL;
+ struct diag_md_hdlc_reset_work *hdlc_reset_work = NULL;
pr_debug("diag: In %s, re-enabling HDLC encoding\n",
__func__);
+ hdlc_reset_work = kmalloc(sizeof(*hdlc_reset_work), GFP_ATOMIC);
+ if (!hdlc_reset_work) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: Could not allocate hdlc_reset_work\n");
+ hdlc_timer_in_progress = 0;
+ return;
+ }
if (hdlc_reset) {
- session_info = diag_md_session_get_pid(pid);
- if (session_info)
- session_info->hdlc_disabled = 0;
- queue_work(driver->diag_wq,
- &(driver->update_md_clients));
+ hdlc_reset_work->pid = pid;
+ INIT_WORK(&hdlc_reset_work->work, diag_md_timer_work_fn);
+ queue_work(driver->diag_wq, &(hdlc_reset_work->work));
+ queue_work(driver->diag_wq, &(driver->update_md_clients));
}
hdlc_timer_in_progress = 0;
}
@@ -1408,7 +1513,7 @@ void diag_md_hdlc_reset_timer_func(unsigned long pid)
static void diag_hdlc_start_recovery(unsigned char *buf, int len,
int pid)
{
- int i;
+ int i, peripheral = -EINVAL;
static uint32_t bad_byte_counter;
unsigned char *start_ptr = NULL;
struct diag_pkt_frame_t *actual_pkt = NULL;
@@ -1440,6 +1545,24 @@ static void diag_hdlc_start_recovery(unsigned char *buf, int len,
info->hdlc_disabled = 0;
else
driver->hdlc_disabled = 0;
+
+ peripheral =
+ diag_md_session_match_pid_peripheral(pid, 0);
+ for (i = 0; i < NUM_MD_SESSIONS; i++) {
+ if (peripheral > 0 && info) {
+ if (peripheral & (1 << i))
+ driver->p_hdlc_disabled[i] =
+ info->hdlc_disabled;
+ else if (
+ !diag_md_session_get_peripheral(i))
+ driver->p_hdlc_disabled[i] =
+ driver->hdlc_disabled;
+ } else {
+ if (!diag_md_session_get_peripheral(i))
+ driver->p_hdlc_disabled[i] =
+ driver->hdlc_disabled;
+ }
+ }
mutex_unlock(&driver->md_session_lock);
mutex_unlock(&driver->hdlc_disable_mutex);
diag_update_md_clients(HDLC_SUPPORT_TYPE);
@@ -1697,6 +1820,8 @@ int diagfwd_init(void)
INIT_LIST_HEAD(&driver->cmd_reg_list);
driver->cmd_reg_count = 0;
mutex_init(&driver->cmd_reg_mutex);
+ INIT_WORK(&(driver->diag_hdlc_reset_work),
+ diag_timer_work_fn);
for (i = 0; i < NUM_PERIPHERALS; i++) {
driver->feature[i].separate_cmd_rsp = 0;
diff --git a/drivers/char/diag/diagfwd_peripheral.c b/drivers/char/diag/diagfwd_peripheral.c
index a7abe3dafb69..6b74c0056d1b 100644
--- a/drivers/char/diag/diagfwd_peripheral.c
+++ b/drivers/char/diag/diagfwd_peripheral.c
@@ -284,7 +284,6 @@ static void diagfwd_data_process_done(struct diagfwd_info *fwd_info,
int err = 0;
int write_len = 0, peripheral = 0;
unsigned char *write_buf = NULL;
- struct diag_md_session_t *session_info = NULL;
uint8_t hdlc_disabled = 0;
if (!fwd_info || !buf || len <= 0) {
@@ -316,13 +315,9 @@ static void diagfwd_data_process_done(struct diagfwd_info *fwd_info,
diag_ws_release();
return;
}
- mutex_lock(&driver->md_session_lock);
- session_info = diag_md_session_get_peripheral(peripheral);
- if (session_info)
- hdlc_disabled = session_info->hdlc_disabled;
- else
- hdlc_disabled = driver->hdlc_disabled;
- mutex_unlock(&driver->md_session_lock);
+
+ hdlc_disabled = driver->p_hdlc_disabled[peripheral];
+
if (hdlc_disabled) {
/* The data is raw and and on APPS side HDLC is disabled */
if (!buf) {
@@ -615,7 +610,6 @@ static void diagfwd_data_read_done(struct diagfwd_info *fwd_info,
int write_len = 0;
unsigned char *write_buf = NULL;
struct diagfwd_buf_t *temp_buf = NULL;
- struct diag_md_session_t *session_info = NULL;
uint8_t hdlc_disabled = 0;
if (!fwd_info || !buf || len <= 0) {
@@ -637,13 +631,9 @@ static void diagfwd_data_read_done(struct diagfwd_info *fwd_info,
mutex_lock(&driver->hdlc_disable_mutex);
mutex_lock(&fwd_info->data_mutex);
- mutex_lock(&driver->md_session_lock);
- session_info = diag_md_session_get_peripheral(fwd_info->peripheral);
- if (session_info)
- hdlc_disabled = session_info->hdlc_disabled;
- else
- hdlc_disabled = driver->hdlc_disabled;
- mutex_unlock(&driver->md_session_lock);
+
+ hdlc_disabled = driver->p_hdlc_disabled[fwd_info->peripheral];
+
if (!driver->feature[fwd_info->peripheral].encode_hdlc) {
if (fwd_info->buf_1 && fwd_info->buf_1->data == buf) {
temp_buf = fwd_info->buf_1;
diff --git a/drivers/char/hw_random/exynos-rng.c b/drivers/char/hw_random/exynos-rng.c
index 7845a38b6604..7ba0ae060d61 100644
--- a/drivers/char/hw_random/exynos-rng.c
+++ b/drivers/char/hw_random/exynos-rng.c
@@ -155,8 +155,7 @@ static int exynos_rng_probe(struct platform_device *pdev)
return ret;
}
-#ifdef CONFIG_PM
-static int exynos_rng_runtime_suspend(struct device *dev)
+static int __maybe_unused exynos_rng_runtime_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct exynos_rng *exynos_rng = platform_get_drvdata(pdev);
@@ -166,7 +165,7 @@ static int exynos_rng_runtime_suspend(struct device *dev)
return 0;
}
-static int exynos_rng_runtime_resume(struct device *dev)
+static int __maybe_unused exynos_rng_runtime_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct exynos_rng *exynos_rng = platform_get_drvdata(pdev);
@@ -174,12 +173,12 @@ static int exynos_rng_runtime_resume(struct device *dev)
return clk_prepare_enable(exynos_rng->clk);
}
-static int exynos_rng_suspend(struct device *dev)
+static int __maybe_unused exynos_rng_suspend(struct device *dev)
{
return pm_runtime_force_suspend(dev);
}
-static int exynos_rng_resume(struct device *dev)
+static int __maybe_unused exynos_rng_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct exynos_rng *exynos_rng = platform_get_drvdata(pdev);
@@ -191,7 +190,6 @@ static int exynos_rng_resume(struct device *dev)
return exynos_rng_configure(exynos_rng);
}
-#endif
static const struct dev_pm_ops exynos_rng_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(exynos_rng_suspend, exynos_rng_resume)
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index f53e8ba2c718..83c206f0fc98 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -409,6 +409,7 @@ static void start_event_fetch(struct ssif_info *ssif_info, unsigned long *flags)
msg = ipmi_alloc_smi_msg();
if (!msg) {
ssif_info->ssif_state = SSIF_NORMAL;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
return;
}
@@ -431,6 +432,7 @@ static void start_recv_msg_fetch(struct ssif_info *ssif_info,
msg = ipmi_alloc_smi_msg();
if (!msg) {
ssif_info->ssif_state = SSIF_NORMAL;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
return;
}
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 40d400fe5bb7..4ada103945f0 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -515,7 +515,7 @@ static void panic_halt_ipmi_heartbeat(void)
msg.cmd = IPMI_WDOG_RESET_TIMER;
msg.data = NULL;
msg.data_len = 0;
- atomic_add(2, &panic_done_count);
+ atomic_add(1, &panic_done_count);
rv = ipmi_request_supply_msgs(watchdog_user,
(struct ipmi_addr *) &addr,
0,
@@ -525,7 +525,7 @@ static void panic_halt_ipmi_heartbeat(void)
&panic_halt_heartbeat_recv_msg,
1);
if (rv)
- atomic_sub(2, &panic_done_count);
+ atomic_sub(1, &panic_done_count);
}
static struct ipmi_smi_msg panic_halt_smi_msg = {
@@ -549,12 +549,12 @@ static void panic_halt_ipmi_set_timeout(void)
/* Wait for the messages to be free. */
while (atomic_read(&panic_done_count) != 0)
ipmi_poll_interface(watchdog_user);
- atomic_add(2, &panic_done_count);
+ atomic_add(1, &panic_done_count);
rv = i_ipmi_set_timeout(&panic_halt_smi_msg,
&panic_halt_recv_msg,
&send_heartbeat_now);
if (rv) {
- atomic_sub(2, &panic_done_count);
+ atomic_sub(1, &panic_done_count);
printk(KERN_WARNING PFX
"Unable to extend the watchdog timeout.");
} else {
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 1822472dffab..dffd06a3bb76 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -724,7 +724,7 @@ retry:
static int credit_entropy_bits_safe(struct entropy_store *r, int nbits)
{
- const int nbits_max = (int)(~0U >> (ENTROPY_SHIFT + 1));
+ const int nbits_max = r->poolinfo->poolwords * 32;
if (nbits < 0)
return -EINVAL;
@@ -886,12 +886,16 @@ static void add_interrupt_bench(cycles_t start)
static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
{
__u32 *ptr = (__u32 *) regs;
+ unsigned int idx;
if (regs == NULL)
return 0;
- if (f->reg_idx >= sizeof(struct pt_regs) / sizeof(__u32))
- f->reg_idx = 0;
- return *(ptr + f->reg_idx++);
+ idx = READ_ONCE(f->reg_idx);
+ if (idx >= sizeof(struct pt_regs) / sizeof(__u32))
+ idx = 0;
+ ptr += idx++;
+ WRITE_ONCE(f->reg_idx, idx);
+ return *ptr;
}
void add_interrupt_randomness(int irq, int irq_flags)
diff --git a/drivers/char/tpm/st33zp24/st33zp24.c b/drivers/char/tpm/st33zp24/st33zp24.c
index 8d626784cd8d..49e4040eeb55 100644
--- a/drivers/char/tpm/st33zp24/st33zp24.c
+++ b/drivers/char/tpm/st33zp24/st33zp24.c
@@ -485,7 +485,7 @@ static int st33zp24_recv(struct tpm_chip *chip, unsigned char *buf,
size_t count)
{
int size = 0;
- int expected;
+ u32 expected;
if (!chip)
return -EBUSY;
@@ -502,7 +502,7 @@ static int st33zp24_recv(struct tpm_chip *chip, unsigned char *buf,
}
expected = be32_to_cpu(*(__be32 *)(buf + 2));
- if (expected > count) {
+ if (expected > count || expected < TPM_HEADER_SIZE) {
size = -EIO;
goto out;
}
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index aaa5fa95dede..36afc1a21699 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -1040,6 +1040,11 @@ int tpm_get_random(u32 chip_num, u8 *out, size_t max)
break;
recd = be32_to_cpu(tpm_cmd.params.getrandom_out.rng_data_len);
+ if (recd > num_bytes) {
+ total = -EFAULT;
+ break;
+ }
+
memcpy(dest, tpm_cmd.params.getrandom_out.rng_data, recd);
dest += recd;
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index 286bd090a488..389a009b83f2 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -622,6 +622,11 @@ static int tpm2_unseal_cmd(struct tpm_chip *chip,
if (!rc) {
data_len = be16_to_cpup(
(__be16 *) &buf.data[TPM_HEADER_SIZE + 4]);
+ if (data_len < MIN_KEY_SIZE || data_len > MAX_KEY_SIZE + 1) {
+ rc = -EFAULT;
+ goto out;
+ }
+
data = &buf.data[TPM_HEADER_SIZE + 6];
memcpy(payload->key, data, data_len - 1);
@@ -629,6 +634,7 @@ static int tpm2_unseal_cmd(struct tpm_chip *chip,
payload->migratable = data[data_len - 1];
}
+out:
tpm_buf_destroy(&buf);
return rc;
}
diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c
index f2aa99e34b4b..9f12ad74a09b 100644
--- a/drivers/char/tpm/tpm_i2c_infineon.c
+++ b/drivers/char/tpm/tpm_i2c_infineon.c
@@ -436,7 +436,8 @@ static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count)
{
int size = 0;
- int expected, status;
+ int status;
+ u32 expected;
if (count < TPM_HEADER_SIZE) {
size = -EIO;
@@ -451,7 +452,7 @@ static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count)
}
expected = be32_to_cpu(*(__be32 *)(buf + 2));
- if ((size_t) expected > count) {
+ if (((size_t) expected > count) || (expected < TPM_HEADER_SIZE)) {
size = -EIO;
goto out;
}
diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c
index a1e1474dda30..aedf726cbab6 100644
--- a/drivers/char/tpm/tpm_i2c_nuvoton.c
+++ b/drivers/char/tpm/tpm_i2c_nuvoton.c
@@ -267,7 +267,11 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)
struct device *dev = chip->dev.parent;
struct i2c_client *client = to_i2c_client(dev);
s32 rc;
- int expected, status, burst_count, retries, size = 0;
+ int status;
+ int burst_count;
+ int retries;
+ int size = 0;
+ u32 expected;
if (count < TPM_HEADER_SIZE) {
i2c_nuvoton_ready(chip); /* return to idle */
@@ -309,7 +313,7 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)
* to machine native
*/
expected = be32_to_cpu(*(__be32 *) (buf + 2));
- if (expected > count) {
+ if (expected > count || expected < size) {
dev_err(dev, "%s() expected > count\n", __func__);
size = -EIO;
continue;
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index 7f13221aeb30..9dd93a209ef2 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -283,7 +283,8 @@ static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
{
int size = 0;
- int expected, status;
+ int status;
+ u32 expected;
if (count < TPM_HEADER_SIZE) {
size = -EIO;
@@ -298,7 +299,7 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
}
expected = be32_to_cpu(*(__be32 *) (buf + 2));
- if (expected > count) {
+ if (expected > count || expected < TPM_HEADER_SIZE) {
size = -EIO;
goto out;
}
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index be0b09a0fb44..2aca689061e1 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1399,7 +1399,6 @@ static int add_port(struct ports_device *portdev, u32 id)
{
char debugfs_name[16];
struct port *port;
- struct port_buffer *buf;
dev_t devt;
unsigned int nr_added_bufs;
int err;
@@ -1510,8 +1509,6 @@ static int add_port(struct ports_device *portdev, u32 id)
return 0;
free_inbufs:
- while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
- free_buf(buf, true);
free_device:
device_destroy(pdrvdata.class, port->dev->devt);
free_cdev:
@@ -1536,34 +1533,14 @@ static void remove_port(struct kref *kref)
static void remove_port_data(struct port *port)
{
- struct port_buffer *buf;
-
spin_lock_irq(&port->inbuf_lock);
/* Remove unused data this port might have received. */
discard_port_data(port);
spin_unlock_irq(&port->inbuf_lock);
- /* Remove buffers we queued up for the Host to send us data in. */
- do {
- spin_lock_irq(&port->inbuf_lock);
- buf = virtqueue_detach_unused_buf(port->in_vq);
- spin_unlock_irq(&port->inbuf_lock);
- if (buf)
- free_buf(buf, true);
- } while (buf);
-
spin_lock_irq(&port->outvq_lock);
reclaim_consumed_buffers(port);
spin_unlock_irq(&port->outvq_lock);
-
- /* Free pending buffers from the out-queue. */
- do {
- spin_lock_irq(&port->outvq_lock);
- buf = virtqueue_detach_unused_buf(port->out_vq);
- spin_unlock_irq(&port->outvq_lock);
- if (buf)
- free_buf(buf, true);
- } while (buf);
}
/*
@@ -1788,13 +1765,24 @@ static void control_work_handler(struct work_struct *work)
spin_unlock(&portdev->c_ivq_lock);
}
+static void flush_bufs(struct virtqueue *vq, bool can_sleep)
+{
+ struct port_buffer *buf;
+ unsigned int len;
+
+ while ((buf = virtqueue_get_buf(vq, &len)))
+ free_buf(buf, can_sleep);
+}
+
static void out_intr(struct virtqueue *vq)
{
struct port *port;
port = find_port_by_vq(vq->vdev->priv, vq);
- if (!port)
+ if (!port) {
+ flush_bufs(vq, false);
return;
+ }
wake_up_interruptible(&port->waitqueue);
}
@@ -1805,8 +1793,10 @@ static void in_intr(struct virtqueue *vq)
unsigned long flags;
port = find_port_by_vq(vq->vdev->priv, vq);
- if (!port)
+ if (!port) {
+ flush_bufs(vq, false);
return;
+ }
spin_lock_irqsave(&port->inbuf_lock, flags);
port->inbuf = get_inbuf(port);
@@ -1981,6 +1971,15 @@ static const struct file_operations portdev_fops = {
static void remove_vqs(struct ports_device *portdev)
{
+ struct virtqueue *vq;
+
+ virtio_device_for_each_vq(portdev->vdev, vq) {
+ struct port_buffer *buf;
+
+ flush_bufs(vq, true);
+ while ((buf = virtqueue_detach_unused_buf(vq)))
+ free_buf(buf, true);
+ }
portdev->vdev->config->del_vqs(portdev->vdev);
kfree(portdev->in_vqs);
kfree(portdev->out_vqs);
diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
index 35ab89fe9d7b..d56ba46e6b78 100644
--- a/drivers/clk/bcm/clk-bcm2835.c
+++ b/drivers/clk/bcm/clk-bcm2835.c
@@ -891,9 +891,7 @@ static void bcm2835_pll_off(struct clk_hw *hw)
const struct bcm2835_pll_data *data = pll->data;
spin_lock(&cprman->regs_lock);
- cprman_write(cprman, data->cm_ctrl_reg,
- cprman_read(cprman, data->cm_ctrl_reg) |
- CM_PLL_ANARST);
+ cprman_write(cprman, data->cm_ctrl_reg, CM_PLL_ANARST);
cprman_write(cprman, data->a2w_ctrl_reg,
cprman_read(cprman, data->a2w_ctrl_reg) |
A2W_PLL_CTRL_PWRDN);
@@ -912,8 +910,10 @@ static int bcm2835_pll_on(struct clk_hw *hw)
~A2W_PLL_CTRL_PWRDN);
/* Take the PLL out of reset. */
+ spin_lock(&cprman->regs_lock);
cprman_write(cprman, data->cm_ctrl_reg,
cprman_read(cprman, data->cm_ctrl_reg) & ~CM_PLL_ANARST);
+ spin_unlock(&cprman->regs_lock);
/* Wait for the PLL to lock. */
timeout = ktime_add_ns(ktime_get(), LOCK_TIMEOUT_NS);
@@ -927,6 +927,10 @@ static int bcm2835_pll_on(struct clk_hw *hw)
cpu_relax();
}
+ cprman_write(cprman, data->a2w_ctrl_reg,
+ cprman_read(cprman, data->a2w_ctrl_reg) |
+ A2W_PLL_CTRL_PRST_DISABLE);
+
return 0;
}
@@ -997,9 +1001,11 @@ static int bcm2835_pll_set_rate(struct clk_hw *hw,
}
/* Unmask the reference clock from the oscillator. */
+ spin_lock(&cprman->regs_lock);
cprman_write(cprman, A2W_XOSC_CTRL,
cprman_read(cprman, A2W_XOSC_CTRL) |
data->reference_enable_mask);
+ spin_unlock(&cprman->regs_lock);
if (do_ana_setup_first)
bcm2835_pll_write_ana(cprman, data->ana_reg_base, ana);
diff --git a/drivers/clk/bcm/clk-ns2.c b/drivers/clk/bcm/clk-ns2.c
index a564e9248814..adc14145861a 100644
--- a/drivers/clk/bcm/clk-ns2.c
+++ b/drivers/clk/bcm/clk-ns2.c
@@ -103,7 +103,7 @@ CLK_OF_DECLARE(ns2_genpll_src_clk, "brcm,ns2-genpll-scr",
static const struct iproc_pll_ctrl genpll_sw = {
.flags = IPROC_CLK_AON | IPROC_CLK_PLL_SPLIT_STAT_CTRL,
- .aon = AON_VAL(0x0, 2, 9, 8),
+ .aon = AON_VAL(0x0, 1, 11, 10),
.reset = RESET_VAL(0x4, 2, 1),
.dig_filter = DF_VAL(0x0, 9, 3, 5, 4, 2, 3),
.ndiv_int = REG_VAL(0x8, 4, 10),
diff --git a/drivers/clk/clk-conf.c b/drivers/clk/clk-conf.c
index 43a218f35b19..4ad32ce428cf 100644
--- a/drivers/clk/clk-conf.c
+++ b/drivers/clk/clk-conf.c
@@ -106,7 +106,7 @@ static int __set_clk_rates(struct device_node *node, bool clk_supplier)
rc = clk_set_rate(clk, rate);
if (rc < 0)
- pr_err("clk: couldn't set %s clk rate to %d (%d), current rate: %ld\n",
+ pr_err("clk: couldn't set %s clk rate to %u (%d), current rate: %lu\n",
__clk_get_name(clk), rate, rc,
clk_get_rate(clk));
clk_put(clk);
diff --git a/drivers/clk/clk-scpi.c b/drivers/clk/clk-scpi.c
index cd0f2726f5e0..c40445488d3a 100644
--- a/drivers/clk/clk-scpi.c
+++ b/drivers/clk/clk-scpi.c
@@ -71,15 +71,15 @@ static const struct clk_ops scpi_clk_ops = {
};
/* find closest match to given frequency in OPP table */
-static int __scpi_dvfs_round_rate(struct scpi_clk *clk, unsigned long rate)
+static long __scpi_dvfs_round_rate(struct scpi_clk *clk, unsigned long rate)
{
int idx;
- u32 fmin = 0, fmax = ~0, ftmp;
+ unsigned long fmin = 0, fmax = ~0, ftmp;
const struct scpi_opp *opp = clk->info->opps;
for (idx = 0; idx < clk->info->count; idx++, opp++) {
ftmp = opp->freq;
- if (ftmp >= (u32)rate) {
+ if (ftmp >= rate) {
if (ftmp <= fmax)
fmax = ftmp;
break;
diff --git a/drivers/clk/clk-si5351.c b/drivers/clk/clk-si5351.c
index e346b223199d..a01ee9a3ed6d 100644
--- a/drivers/clk/clk-si5351.c
+++ b/drivers/clk/clk-si5351.c
@@ -72,7 +72,7 @@ static const char * const si5351_input_names[] = {
"xtal", "clkin"
};
static const char * const si5351_pll_names[] = {
- "plla", "pllb", "vxco"
+ "si5351_plla", "si5351_pllb", "si5351_vxco"
};
static const char * const si5351_msynth_names[] = {
"ms0", "ms1", "ms2", "ms3", "ms4", "ms5", "ms6", "ms7"
diff --git a/drivers/clk/msm/clock-dummy.c b/drivers/clk/msm/clock-dummy.c
index e874fccc7f6c..e3a0181f1cc4 100644
--- a/drivers/clk/msm/clock-dummy.c
+++ b/drivers/clk/msm/clock-dummy.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011,2013-2014 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011,2013-2014,2018 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -14,6 +14,53 @@
#include <linux/platform_device.h>
#include <linux/of.h>
#include <soc/qcom/msm-clock-controller.h>
+#include <linux/reset-controller.h>
+
+#define DUMMY_RESET_NR 20
+
+static int dummy_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return 0;
+}
+
+static int dummy_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return 0;
+}
+
+static int dummy_reset(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ return 0;
+}
+
+static struct reset_control_ops dummy_reset_ops = {
+ .reset = dummy_reset,
+ .assert = dummy_reset_assert,
+ .deassert = dummy_reset_deassert,
+};
+
+static int dummy_reset_controller_register(struct platform_device *pdev)
+{
+ struct reset_controller_dev *prcdev;
+ int ret = 0;
+
+ prcdev = devm_kzalloc(&pdev->dev, sizeof(*prcdev), GFP_KERNEL);
+ if (!prcdev)
+ return -ENOMEM;
+
+ prcdev->of_node = pdev->dev.of_node;
+ prcdev->ops = &dummy_reset_ops;
+ prcdev->owner = pdev->dev.driver->owner;
+ prcdev->nr_resets = DUMMY_RESET_NR;
+
+ ret = reset_controller_register(prcdev);
+ if (ret)
+ dev_err(&pdev->dev, "Failed to register reset controller\n");
+
+ return ret;
+}
static int dummy_clk_reset(struct clk *clk, enum clk_reset_action action)
{
@@ -99,7 +146,11 @@ static int msm_clock_dummy_probe(struct platform_device *pdev)
ret = of_clk_add_provider(pdev->dev.of_node, of_dummy_get, NULL);
if (ret)
- return -ENOMEM;
+ return ret;
+
+ ret = dummy_reset_controller_register(pdev);
+ if (ret)
+ return ret;
dev_info(&pdev->dev, "Registered DUMMY provider.\n");
return ret;
diff --git a/drivers/clk/msm/clock-osm.c b/drivers/clk/msm/clock-osm.c
index eb72217b9b1c..a1635bad3bb0 100644
--- a/drivers/clk/msm/clock-osm.c
+++ b/drivers/clk/msm/clock-osm.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -2868,7 +2868,7 @@ static ssize_t debugfs_trace_method_get(struct file *file, char __user *buf,
else if (c->trace_method == XOR_PACKET)
len = snprintf(debug_buf, sizeof(debug_buf), "xor\n");
- rc = simple_read_from_buffer((void __user *) buf, len, ppos,
+ rc = simple_read_from_buffer((void __user *) buf, count, ppos,
(void *) debug_buf, len);
mutex_unlock(&debug_buf_mutex);
diff --git a/drivers/clk/msm/mdss/mdss-dsi-pll-8996-util.c b/drivers/clk/msm/mdss/mdss-dsi-pll-8996-util.c
index 5f779ec9bcc3..e37faceac088 100644
--- a/drivers/clk/msm/mdss/mdss-dsi-pll-8996-util.c
+++ b/drivers/clk/msm/mdss/mdss-dsi-pll-8996-util.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2016, 2018 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -403,8 +403,8 @@ static void mdss_dsi_pll_8996_input_init(struct mdss_pll_resources *pll,
pdb->in.pll_ip_trim = 4; /* 4, reg: 0x0404 */
pdb->in.pll_cpcset_cur = 1; /* 1, reg: 0x04f0, bit 0 - 2 */
pdb->in.pll_cpmset_cur = 1; /* 1, reg: 0x04f0, bit 3 - 5 */
- pdb->in.pll_icpmset = 4; /* 4, reg: 0x04fc, bit 3 - 5 */
- pdb->in.pll_icpcset = 4; /* 4, reg: 0x04fc, bit 0 - 2 */
+ pdb->in.pll_icpmset = 7; /* 7, reg: 0x04fc, bit 3 - 5 */
+ pdb->in.pll_icpcset = 7; /* 7, reg: 0x04fc, bit 0 - 2 */
pdb->in.pll_icpmset_p = 0; /* 0, reg: 0x04f4, bit 0 - 2 */
pdb->in.pll_icpmset_m = 0; /* 0, reg: 0x04f4, bit 3 - 5 */
pdb->in.pll_icpcset_p = 0; /* 0, reg: 0x04f8, bit 0 - 2 */
diff --git a/drivers/clk/msm/virtclk-front-8996.c b/drivers/clk/msm/virtclk-front-8996.c
index 68ef5967df58..f2a70f240984 100644
--- a/drivers/clk/msm/virtclk-front-8996.c
+++ b/drivers/clk/msm/virtclk-front-8996.c
@@ -15,6 +15,7 @@
#include <linux/platform_device.h>
#include <linux/of.h>
#include <dt-bindings/clock/msm-clocks-8996.h>
+#include "virtclk-front.h"
#include "virt-reset-front.h"
static struct virtclk_front gcc_blsp1_ahb_clk = {
@@ -498,6 +499,33 @@ static struct virtclk_front gcc_mss_mnoc_bimc_axi_clk = {
},
};
+static struct virtclk_front ipa_clk = {
+ .c = {
+ .dbg_name = "ipa",
+ .ops = &virtclk_front_ops,
+ CLK_INIT(ipa_clk.c),
+ },
+ .flag = CLOCK_FLAG_NODE_TYPE_REMOTE,
+};
+
+static struct virtclk_front pnoc_clk = {
+ .c = {
+ .dbg_name = "pnoc",
+ .ops = &virtclk_front_ops,
+ CLK_INIT(pnoc_clk.c),
+ },
+ .flag = CLOCK_FLAG_NODE_TYPE_REMOTE,
+};
+
+static struct virtclk_front qdss_clk = {
+ .c = {
+ .dbg_name = "qdss",
+ .ops = &virtclk_front_ops,
+ CLK_INIT(qdss_clk.c),
+ },
+ .flag = CLOCK_FLAG_NODE_TYPE_REMOTE,
+};
+
static struct clk_lookup msm_clocks_8996[] = {
CLK_LIST(gcc_blsp1_ahb_clk),
CLK_LIST(gcc_blsp1_qup1_spi_apps_clk),
@@ -559,6 +587,9 @@ static struct clk_lookup msm_clocks_8996[] = {
CLK_LIST(gpll0_out_msscc),
CLK_LIST(gcc_mss_snoc_axi_clk),
CLK_LIST(gcc_mss_mnoc_bimc_axi_clk),
+ CLK_LIST(ipa_clk),
+ CLK_LIST(pnoc_clk),
+ CLK_LIST(qdss_clk),
};
static struct virt_reset_map msm_resets_8996[] = {
diff --git a/drivers/clk/msm/virtclk-front.c b/drivers/clk/msm/virtclk-front.c
index 4018c4922574..ad89dda6514f 100644
--- a/drivers/clk/msm/virtclk-front.c
+++ b/drivers/clk/msm/virtclk-front.c
@@ -62,7 +62,7 @@ static int virtclk_front_get_id(struct clk *clk)
if (v->id)
return ret;
- msg.header.cmd = CLK_MSG_GETID;
+ msg.header.cmd = CLK_MSG_GETID | v->flag;
msg.header.len = sizeof(msg);
strlcpy(msg.name, clk->dbg_name, sizeof(msg.name));
@@ -77,7 +77,7 @@ static int virtclk_front_get_id(struct clk *clk)
}
ret = habmm_socket_recv(handle, &rsp, &rsp_size,
- UINT_MAX, 0);
+ UINT_MAX, HABMM_SOCKET_RECV_FLAGS_UNINTERRUPTIBLE);
if (ret) {
pr_err("%s: habmm socket receive failed (%d)\n", clk->dbg_name,
ret);
@@ -119,7 +119,7 @@ static int virtclk_front_prepare(struct clk *clk)
return ret;
msg.clk_id = v->id;
- msg.cmd = CLK_MSG_ENABLE;
+ msg.cmd = CLK_MSG_ENABLE | v->flag;
msg.len = sizeof(struct clk_msg_header);
rt_mutex_lock(&virtclk_front_ctx.lock);
@@ -132,7 +132,8 @@ static int virtclk_front_prepare(struct clk *clk)
goto err_out;
}
- ret = habmm_socket_recv(handle, &rsp, &rsp_size, UINT_MAX, 0);
+ ret = habmm_socket_recv(handle, &rsp, &rsp_size, UINT_MAX,
+ HABMM_SOCKET_RECV_FLAGS_UNINTERRUPTIBLE);
if (ret) {
pr_err("%s: habmm socket receive failed (%d)\n", clk->dbg_name,
ret);
@@ -172,7 +173,7 @@ static void virtclk_front_unprepare(struct clk *clk)
return;
msg.clk_id = v->id;
- msg.cmd = CLK_MSG_DISABLE;
+ msg.cmd = CLK_MSG_DISABLE | v->flag;
msg.len = sizeof(struct clk_msg_header);
rt_mutex_lock(&virtclk_front_ctx.lock);
@@ -185,7 +186,8 @@ static void virtclk_front_unprepare(struct clk *clk)
goto err_out;
}
- ret = habmm_socket_recv(handle, &rsp, &rsp_size, UINT_MAX, 0);
+ ret = habmm_socket_recv(handle, &rsp, &rsp_size, UINT_MAX,
+ HABMM_SOCKET_RECV_FLAGS_UNINTERRUPTIBLE);
if (ret) {
pr_err("%s: habmm socket receive failed (%d)\n", clk->dbg_name,
ret);
@@ -222,7 +224,7 @@ static int virtclk_front_reset(struct clk *clk, enum clk_reset_action action)
return ret;
msg.header.clk_id = v->id;
- msg.header.cmd = CLK_MSG_RESET;
+ msg.header.cmd = CLK_MSG_RESET | v->flag;
msg.header.len = sizeof(struct clk_msg_header);
msg.reset = action;
@@ -236,7 +238,8 @@ static int virtclk_front_reset(struct clk *clk, enum clk_reset_action action)
goto err_out;
}
- ret = habmm_socket_recv(handle, &rsp, &rsp_size, UINT_MAX, 0);
+ ret = habmm_socket_recv(handle, &rsp, &rsp_size, UINT_MAX,
+ HABMM_SOCKET_RECV_FLAGS_UNINTERRUPTIBLE);
if (ret) {
pr_err("%s: habmm socket receive failed (%d)\n", clk->dbg_name,
ret);
@@ -276,7 +279,7 @@ static int virtclk_front_set_rate(struct clk *clk, unsigned long rate)
return ret;
msg.header.clk_id = v->id;
- msg.header.cmd = CLK_MSG_SETFREQ;
+ msg.header.cmd = CLK_MSG_SETFREQ | v->flag;
msg.header.len = sizeof(msg);
msg.freq = (u32)rate;
@@ -290,7 +293,8 @@ static int virtclk_front_set_rate(struct clk *clk, unsigned long rate)
goto err_out;
}
- ret = habmm_socket_recv(handle, &rsp, &rsp_size, UINT_MAX, 0);
+ ret = habmm_socket_recv(handle, &rsp, &rsp_size, UINT_MAX,
+ HABMM_SOCKET_RECV_FLAGS_UNINTERRUPTIBLE);
if (ret) {
pr_err("%s: habmm socket receive failed (%d)\n", clk->dbg_name,
ret);
@@ -348,7 +352,7 @@ static unsigned long virtclk_front_get_rate(struct clk *clk)
return 0;
msg.clk_id = v->id;
- msg.cmd = CLK_MSG_GETFREQ;
+ msg.cmd = CLK_MSG_GETFREQ | v->flag;
msg.len = sizeof(msg);
rt_mutex_lock(&virtclk_front_ctx.lock);
@@ -362,7 +366,8 @@ static unsigned long virtclk_front_get_rate(struct clk *clk)
goto err_out;
}
- ret = habmm_socket_recv(handle, &rsp, &rsp_size, UINT_MAX, 0);
+ ret = habmm_socket_recv(handle, &rsp, &rsp_size, UINT_MAX,
+ HABMM_SOCKET_RECV_FLAGS_UNINTERRUPTIBLE);
if (ret) {
ret = 0;
pr_err("%s: habmm socket receive failed (%d)\n", clk->dbg_name,
diff --git a/drivers/clk/msm/virtclk-front.h b/drivers/clk/msm/virtclk-front.h
index 60650f8d1ed1..4d3bdf7cd841 100644
--- a/drivers/clk/msm/virtclk-front.h
+++ b/drivers/clk/msm/virtclk-front.h
@@ -23,6 +23,8 @@ enum virtclk_cmd {
CLK_MSG_MAX
};
+#define CLOCK_FLAG_NODE_TYPE_REMOTE 0xff00
+
struct clk_msg_header {
u32 cmd;
u32 len;
diff --git a/drivers/clk/mvebu/armada-38x.c b/drivers/clk/mvebu/armada-38x.c
index 8bccf4ecdab6..9ff4ea63932d 100644
--- a/drivers/clk/mvebu/armada-38x.c
+++ b/drivers/clk/mvebu/armada-38x.c
@@ -46,10 +46,11 @@ static u32 __init armada_38x_get_tclk_freq(void __iomem *sar)
}
static const u32 armada_38x_cpu_frequencies[] __initconst = {
- 0, 0, 0, 0,
- 1066 * 1000 * 1000, 0, 0, 0,
+ 666 * 1000 * 1000, 0, 800 * 1000 * 1000, 0,
+ 1066 * 1000 * 1000, 0, 1200 * 1000 * 1000, 0,
1332 * 1000 * 1000, 0, 0, 0,
- 1600 * 1000 * 1000,
+ 1600 * 1000 * 1000, 0, 0, 0,
+ 1866 * 1000 * 1000, 0, 0, 2000 * 1000 * 1000,
};
static u32 __init armada_38x_get_cpu_freq(void __iomem *sar)
@@ -75,11 +76,11 @@ static const struct coreclk_ratio armada_38x_coreclk_ratios[] __initconst = {
};
static const int armada_38x_cpu_l2_ratios[32][2] __initconst = {
- {0, 1}, {0, 1}, {0, 1}, {0, 1},
- {1, 2}, {0, 1}, {0, 1}, {0, 1},
+ {1, 2}, {0, 1}, {1, 2}, {0, 1},
+ {1, 2}, {0, 1}, {1, 2}, {0, 1},
{1, 2}, {0, 1}, {0, 1}, {0, 1},
{1, 2}, {0, 1}, {0, 1}, {0, 1},
- {0, 1}, {0, 1}, {0, 1}, {0, 1},
+ {1, 2}, {0, 1}, {0, 1}, {1, 2},
{0, 1}, {0, 1}, {0, 1}, {0, 1},
{0, 1}, {0, 1}, {0, 1}, {0, 1},
{0, 1}, {0, 1}, {0, 1}, {0, 1},
@@ -90,7 +91,7 @@ static const int armada_38x_cpu_ddr_ratios[32][2] __initconst = {
{1, 2}, {0, 1}, {0, 1}, {0, 1},
{1, 2}, {0, 1}, {0, 1}, {0, 1},
{1, 2}, {0, 1}, {0, 1}, {0, 1},
- {0, 1}, {0, 1}, {0, 1}, {0, 1},
+ {1, 2}, {0, 1}, {0, 1}, {7, 15},
{0, 1}, {0, 1}, {0, 1}, {0, 1},
{0, 1}, {0, 1}, {0, 1}, {0, 1},
{0, 1}, {0, 1}, {0, 1}, {0, 1},
diff --git a/drivers/clk/qcom/clk-cpu-osm.c b/drivers/clk/qcom/clk-cpu-osm.c
index 510a9803bd82..68f4afde0d0c 100644
--- a/drivers/clk/qcom/clk-cpu-osm.c
+++ b/drivers/clk/qcom/clk-cpu-osm.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -2691,7 +2691,7 @@ static ssize_t debugfs_trace_method_get(struct file *file, char __user *buf,
else if (c->trace_method == XOR_PACKET)
len = snprintf(debug_buf, sizeof(debug_buf), "xor\n");
- rc = simple_read_from_buffer((void __user *) buf, len, ppos,
+ rc = simple_read_from_buffer((void __user *) buf, count, ppos,
(void *) debug_buf, len);
mutex_unlock(&debug_buf_mutex);
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index ff0c8327fabe..3c3cf8e04eea 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013, 2016-2018, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -210,9 +210,11 @@ static unsigned long
clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
{
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ const struct freq_tbl *f_curr;
u32 cfg, hid_div, m = 0, n = 0, mode = 0, mask;
- if (rcg->enable_safe_config && !clk_hw_is_prepared(hw)) {
+ if (rcg->enable_safe_config && (!clk_hw_is_prepared(hw)
+ || !clk_hw_is_enabled(hw))) {
if (!rcg->current_freq)
rcg->current_freq = cxo_f.freq;
return rcg->current_freq;
@@ -232,9 +234,17 @@ clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
mode >>= CFG_MODE_SHIFT;
}
- mask = BIT(rcg->hid_width) - 1;
- hid_div = cfg >> CFG_SRC_DIV_SHIFT;
- hid_div &= mask;
+ if (rcg->enable_safe_config) {
+ f_curr = qcom_find_freq(rcg->freq_tbl, rcg->current_freq);
+ if (!f_curr)
+ return -EINVAL;
+
+ hid_div = f_curr->pre_div;
+ } else {
+ mask = BIT(rcg->hid_width) - 1;
+ hid_div = cfg >> CFG_SRC_DIV_SHIFT;
+ hid_div &= mask;
+ }
return calc_rate(parent_rate, m, n, mode, hid_div);
}
diff --git a/drivers/clk/qcom/gcc-msm8916.c b/drivers/clk/qcom/gcc-msm8916.c
index 2e7f03d50f4e..95a4dd290f35 100644
--- a/drivers/clk/qcom/gcc-msm8916.c
+++ b/drivers/clk/qcom/gcc-msm8916.c
@@ -1437,6 +1437,7 @@ static const struct freq_tbl ftbl_codec_clk[] = {
static struct clk_rcg2 codec_digcodec_clk_src = {
.cmd_rcgr = 0x1c09c,
+ .mnd_width = 8,
.hid_width = 5,
.parent_map = gcc_xo_gpll1_emclk_sleep_map,
.freq_tbl = ftbl_codec_clk,
diff --git a/drivers/clk/qcom/gpucc-sdm660.c b/drivers/clk/qcom/gpucc-sdm660.c
index 8b2e6fd601c0..ff837aad0c9a 100644
--- a/drivers/clk/qcom/gpucc-sdm660.c
+++ b/drivers/clk/qcom/gpucc-sdm660.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -181,6 +181,7 @@ static const struct freq_tbl ftbl_gfx3d_clk_src[] = {
F_GFX(370000000, 0, 2, 0, 0, 740000000),
F_GFX(430000000, 0, 2, 0, 0, 860000000),
F_GFX(465000000, 0, 2, 0, 0, 930000000),
+ F_GFX(585000000, 0, 2, 0, 0, 1170000000),
F_GFX(588000000, 0, 2, 0, 0, 1176000000),
F_GFX(647000000, 0, 2, 0, 0, 1294000000),
F_GFX(700000000, 0, 2, 0, 0, 1400000000),
diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll-14nm-util.c b/drivers/clk/qcom/mdss/mdss-dsi-pll-14nm-util.c
index a4044955c68f..d388073f5f42 100644
--- a/drivers/clk/qcom/mdss/mdss-dsi-pll-14nm-util.c
+++ b/drivers/clk/qcom/mdss/mdss-dsi-pll-14nm-util.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2016, 2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -430,8 +430,8 @@ static void mdss_dsi_pll_14nm_input_init(struct mdss_pll_resources *pll,
pdb->in.pll_ip_trim = 4; /* 4, reg: 0x0404 */
pdb->in.pll_cpcset_cur = 1; /* 1, reg: 0x04f0, bit 0 - 2 */
pdb->in.pll_cpmset_cur = 1; /* 1, reg: 0x04f0, bit 3 - 5 */
- pdb->in.pll_icpmset = 4; /* 4, reg: 0x04fc, bit 3 - 5 */
- pdb->in.pll_icpcset = 4; /* 4, reg: 0x04fc, bit 0 - 2 */
+ pdb->in.pll_icpmset = 7; /* 7, reg: 0x04fc, bit 3 - 5 */
+ pdb->in.pll_icpcset = 7; /* 7, reg: 0x04fc, bit 0 - 2 */
pdb->in.pll_icpmset_p = 0; /* 0, reg: 0x04f4, bit 0 - 2 */
pdb->in.pll_icpmset_m = 0; /* 0, reg: 0x04f4, bit 3 - 5 */
pdb->in.pll_icpcset_p = 0; /* 0, reg: 0x04f8, bit 0 - 2 */
diff --git a/drivers/clk/qcom/mmcc-sdm660.c b/drivers/clk/qcom/mmcc-sdm660.c
index 542737e4d204..05606f1b23dc 100644
--- a/drivers/clk/qcom/mmcc-sdm660.c
+++ b/drivers/clk/qcom/mmcc-sdm660.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -2419,7 +2419,7 @@ static struct clk_regmap_div mmss_mdss_byte0_intf_div_clk = {
},
.num_parents = 1,
.ops = &clk_regmap_div_ops,
- .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+ .flags = CLK_GET_RATE_NOCACHE,
},
},
};
@@ -2476,7 +2476,7 @@ static struct clk_regmap_div mmss_mdss_byte1_intf_div_clk = {
},
.num_parents = 1,
.ops = &clk_regmap_div_ops,
- .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+ .flags = CLK_GET_RATE_NOCACHE,
},
},
};
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 8b2061fca5f0..7ca79714649e 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -46,6 +46,15 @@ config CPU_FREQ_STAT_DETAILS
If in doubt, say N.
+config CPU_FREQ_TIMES
+ bool "CPU frequency time-in-state statistics"
+ default y
+ help
+ This driver exports CPU time-in-state information through procfs file
+ system.
+
+ If in doubt, say N.
+
choice
prompt "Default CPUFreq governor"
default CPU_FREQ_DEFAULT_GOV_USERSPACE if ARM_SA1100_CPUFREQ || ARM_SA1110_CPUFREQ
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 6d4a7aeb506d..4f9ba8eb0130 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -4,7 +4,10 @@ obj-$(CONFIG_CPU_FREQ) += cpufreq.o freq_table.o cpufreq_governor_attr_set.o
# CPUfreq stats
obj-$(CONFIG_CPU_FREQ_STAT) += cpufreq_stats.o
-# CPUfreq governors
+# CPUfreq times
+obj-$(CONFIG_CPU_FREQ_TIMES) += cpufreq_times.o
+
+# CPUfreq governors
obj-$(CONFIG_CPU_FREQ_GOV_PERFORMANCE) += cpufreq_performance.o
obj-$(CONFIG_CPU_FREQ_GOV_POWERSAVE) += cpufreq_powersave.o
obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 5f0f983ce173..659d2029ac5a 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -19,6 +19,7 @@
#include <linux/cpu.h>
#include <linux/cpufreq.h>
+#include <linux/cpufreq_times.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/init.h>
@@ -447,6 +448,7 @@ static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
pr_debug("FREQ: %lu - CPU: %lu\n",
(unsigned long)freqs->new, (unsigned long)freqs->cpu);
trace_cpu_frequency(freqs->new, freqs->cpu);
+ cpufreq_times_record_transition(freqs);
srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
CPUFREQ_POSTCHANGE, freqs);
if (likely(policy) && likely(policy->cpu == freqs->cpu))
@@ -1354,6 +1356,7 @@ static int cpufreq_online(unsigned int cpu)
goto out_exit_policy;
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_CREATE_POLICY, policy);
+ cpufreq_times_create_policy(policy);
write_lock_irqsave(&cpufreq_driver_lock, flags);
list_add(&policy->policy_list, &cpufreq_policy_list);
diff --git a/drivers/cpufreq/cpufreq_times.c b/drivers/cpufreq/cpufreq_times.c
new file mode 100644
index 000000000000..e5df7a47cc16
--- /dev/null
+++ b/drivers/cpufreq/cpufreq_times.c
@@ -0,0 +1,461 @@
+/* drivers/cpufreq/cpufreq_times.c
+ *
+ * Copyright (C) 2018 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/cpufreq.h>
+#include <linux/cpufreq_times.h>
+#include <linux/cputime.h>
+#include <linux/hashtable.h>
+#include <linux/init.h>
+#include <linux/proc_fs.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/threads.h>
+
+#define UID_HASH_BITS 10
+
+DECLARE_HASHTABLE(uid_hash_table, UID_HASH_BITS);
+
+static DEFINE_SPINLOCK(task_time_in_state_lock); /* task->time_in_state */
+static DEFINE_SPINLOCK(uid_lock); /* uid_hash_table */
+
+struct uid_entry {
+ uid_t uid;
+ unsigned int max_state;
+ struct hlist_node hash;
+ struct rcu_head rcu;
+ u64 time_in_state[0];
+};
+
+/**
+ * struct cpu_freqs - per-cpu frequency information
+ * @offset: start of these freqs' stats in task time_in_state array
+ * @max_state: number of entries in freq_table
+ * @last_index: index in freq_table of last frequency switched to
+ * @freq_table: list of available frequencies
+ */
+struct cpu_freqs {
+ unsigned int offset;
+ unsigned int max_state;
+ unsigned int last_index;
+ unsigned int freq_table[0];
+};
+
+static struct cpu_freqs *all_freqs[NR_CPUS];
+
+static unsigned int next_offset;
+
+
+/* Caller must hold rcu_read_lock() */
+static struct uid_entry *find_uid_entry_rcu(uid_t uid)
+{
+ struct uid_entry *uid_entry;
+
+ hash_for_each_possible_rcu(uid_hash_table, uid_entry, hash, uid) {
+ if (uid_entry->uid == uid)
+ return uid_entry;
+ }
+ return NULL;
+}
+
+/* Caller must hold uid lock */
+static struct uid_entry *find_uid_entry_locked(uid_t uid)
+{
+ struct uid_entry *uid_entry;
+
+ hash_for_each_possible(uid_hash_table, uid_entry, hash, uid) {
+ if (uid_entry->uid == uid)
+ return uid_entry;
+ }
+ return NULL;
+}
+
+/* Caller must hold uid lock */
+static struct uid_entry *find_or_register_uid_locked(uid_t uid)
+{
+ struct uid_entry *uid_entry, *temp;
+ unsigned int max_state = READ_ONCE(next_offset);
+ size_t alloc_size = sizeof(*uid_entry) + max_state *
+ sizeof(uid_entry->time_in_state[0]);
+
+ uid_entry = find_uid_entry_locked(uid);
+ if (uid_entry) {
+ if (uid_entry->max_state == max_state)
+ return uid_entry;
+ /* uid_entry->time_in_state is too small to track all freqs, so
+ * expand it.
+ */
+ temp = __krealloc(uid_entry, alloc_size, GFP_ATOMIC);
+ if (!temp)
+ return uid_entry;
+ temp->max_state = max_state;
+ memset(temp->time_in_state + uid_entry->max_state, 0,
+ (max_state - uid_entry->max_state) *
+ sizeof(uid_entry->time_in_state[0]));
+ if (temp != uid_entry) {
+ hlist_replace_rcu(&uid_entry->hash, &temp->hash);
+ kfree_rcu(uid_entry, rcu);
+ }
+ return temp;
+ }
+
+ uid_entry = kzalloc(alloc_size, GFP_ATOMIC);
+ if (!uid_entry)
+ return NULL;
+
+ uid_entry->uid = uid;
+ uid_entry->max_state = max_state;
+
+ hash_add_rcu(uid_hash_table, &uid_entry->hash, uid);
+
+ return uid_entry;
+}
+
+static bool freq_index_invalid(unsigned int index)
+{
+ unsigned int cpu;
+ struct cpu_freqs *freqs;
+
+ for_each_possible_cpu(cpu) {
+ freqs = all_freqs[cpu];
+ if (!freqs || index < freqs->offset ||
+ freqs->offset + freqs->max_state <= index)
+ continue;
+ return freqs->freq_table[index - freqs->offset] ==
+ CPUFREQ_ENTRY_INVALID;
+ }
+ return true;
+}
+
+static int single_uid_time_in_state_show(struct seq_file *m, void *ptr)
+{
+ struct uid_entry *uid_entry;
+ unsigned int i;
+ u64 time;
+ uid_t uid = from_kuid_munged(current_user_ns(), *(kuid_t *)m->private);
+
+ if (uid == overflowuid)
+ return -EINVAL;
+
+ rcu_read_lock();
+
+ uid_entry = find_uid_entry_rcu(uid);
+ if (!uid_entry) {
+ rcu_read_unlock();
+ return 0;
+ }
+
+ for (i = 0; i < uid_entry->max_state; ++i) {
+ if (freq_index_invalid(i))
+ continue;
+ time = cputime_to_clock_t(uid_entry->time_in_state[i]);
+ seq_write(m, &time, sizeof(time));
+ }
+
+ rcu_read_unlock();
+
+ return 0;
+}
+
+static void *uid_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ if (*pos >= HASH_SIZE(uid_hash_table))
+ return NULL;
+
+ return &uid_hash_table[*pos];
+}
+
+static void *uid_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ (*pos)++;
+
+ if (*pos >= HASH_SIZE(uid_hash_table))
+ return NULL;
+
+ return &uid_hash_table[*pos];
+}
+
+static void uid_seq_stop(struct seq_file *seq, void *v) { }
+
+static int uid_time_in_state_seq_show(struct seq_file *m, void *v)
+{
+ struct uid_entry *uid_entry;
+ struct cpu_freqs *freqs, *last_freqs = NULL;
+ int i, cpu;
+
+ if (v == uid_hash_table) {
+ seq_puts(m, "uid:");
+ for_each_possible_cpu(cpu) {
+ freqs = all_freqs[cpu];
+ if (!freqs || freqs == last_freqs)
+ continue;
+ last_freqs = freqs;
+ for (i = 0; i < freqs->max_state; i++) {
+ if (freqs->freq_table[i] ==
+ CPUFREQ_ENTRY_INVALID)
+ continue;
+ seq_printf(m, " %d", freqs->freq_table[i]);
+ }
+ }
+ seq_putc(m, '\n');
+ }
+
+ rcu_read_lock();
+
+ hlist_for_each_entry_rcu(uid_entry, (struct hlist_head *)v, hash) {
+ if (uid_entry->max_state)
+ seq_printf(m, "%d:", uid_entry->uid);
+ for (i = 0; i < uid_entry->max_state; ++i) {
+ if (freq_index_invalid(i))
+ continue;
+ seq_printf(m, " %lu", (unsigned long)cputime_to_clock_t(
+ uid_entry->time_in_state[i]));
+ }
+ if (uid_entry->max_state)
+ seq_putc(m, '\n');
+ }
+
+ rcu_read_unlock();
+ return 0;
+}
+
+void cpufreq_task_times_init(struct task_struct *p)
+{
+ void *temp;
+ unsigned long flags;
+ unsigned int max_state;
+
+ spin_lock_irqsave(&task_time_in_state_lock, flags);
+ p->time_in_state = NULL;
+ spin_unlock_irqrestore(&task_time_in_state_lock, flags);
+ p->max_state = 0;
+
+ max_state = READ_ONCE(next_offset);
+
+ /* We use one array to avoid multiple allocs per task */
+ temp = kcalloc(max_state, sizeof(p->time_in_state[0]), GFP_ATOMIC);
+ if (!temp)
+ return;
+
+ spin_lock_irqsave(&task_time_in_state_lock, flags);
+ p->time_in_state = temp;
+ spin_unlock_irqrestore(&task_time_in_state_lock, flags);
+ p->max_state = max_state;
+}
+
+/* Caller must hold task_time_in_state_lock */
+static int cpufreq_task_times_realloc_locked(struct task_struct *p)
+{
+ void *temp;
+ unsigned int max_state = READ_ONCE(next_offset);
+
+ temp = krealloc(p->time_in_state, max_state * sizeof(u64), GFP_ATOMIC);
+ if (!temp)
+ return -ENOMEM;
+ p->time_in_state = temp;
+ memset(p->time_in_state + p->max_state, 0,
+ (max_state - p->max_state) * sizeof(u64));
+ p->max_state = max_state;
+ return 0;
+}
+
+void cpufreq_task_times_exit(struct task_struct *p)
+{
+ unsigned long flags;
+ void *temp;
+
+ if (!p->time_in_state)
+ return;
+
+ spin_lock_irqsave(&task_time_in_state_lock, flags);
+ temp = p->time_in_state;
+ p->time_in_state = NULL;
+ spin_unlock_irqrestore(&task_time_in_state_lock, flags);
+ kfree(temp);
+}
+
+int proc_time_in_state_show(struct seq_file *m, struct pid_namespace *ns,
+ struct pid *pid, struct task_struct *p)
+{
+ unsigned int cpu, i;
+ cputime_t cputime;
+ unsigned long flags;
+ struct cpu_freqs *freqs;
+ struct cpu_freqs *last_freqs = NULL;
+
+ spin_lock_irqsave(&task_time_in_state_lock, flags);
+ for_each_possible_cpu(cpu) {
+ freqs = all_freqs[cpu];
+ if (!freqs || freqs == last_freqs)
+ continue;
+ last_freqs = freqs;
+
+ seq_printf(m, "cpu%u\n", cpu);
+ for (i = 0; i < freqs->max_state; i++) {
+ if (freqs->freq_table[i] == CPUFREQ_ENTRY_INVALID)
+ continue;
+ cputime = 0;
+ if (freqs->offset + i < p->max_state &&
+ p->time_in_state)
+ cputime = p->time_in_state[freqs->offset + i];
+ seq_printf(m, "%u %lu\n", freqs->freq_table[i],
+ (unsigned long)cputime_to_clock_t(cputime));
+ }
+ }
+ spin_unlock_irqrestore(&task_time_in_state_lock, flags);
+ return 0;
+}
+
+void cpufreq_acct_update_power(struct task_struct *p, cputime_t cputime)
+{
+ unsigned long flags;
+ unsigned int state;
+ struct uid_entry *uid_entry;
+ struct cpu_freqs *freqs = all_freqs[task_cpu(p)];
+ uid_t uid = from_kuid_munged(current_user_ns(), task_uid(p));
+
+ if (!freqs || p->flags & PF_EXITING)
+ return;
+
+ state = freqs->offset + READ_ONCE(freqs->last_index);
+
+ spin_lock_irqsave(&task_time_in_state_lock, flags);
+ if ((state < p->max_state || !cpufreq_task_times_realloc_locked(p)) &&
+ p->time_in_state)
+ p->time_in_state[state] += cputime;
+ spin_unlock_irqrestore(&task_time_in_state_lock, flags);
+
+ spin_lock_irqsave(&uid_lock, flags);
+ uid_entry = find_or_register_uid_locked(uid);
+ if (uid_entry && state < uid_entry->max_state)
+ uid_entry->time_in_state[state] += cputime;
+ spin_unlock_irqrestore(&uid_lock, flags);
+}
+
+void cpufreq_times_create_policy(struct cpufreq_policy *policy)
+{
+ int cpu, index;
+ unsigned int count = 0;
+ struct cpufreq_frequency_table *pos, *table;
+ struct cpu_freqs *freqs;
+ void *tmp;
+
+ if (all_freqs[policy->cpu])
+ return;
+
+ table = cpufreq_frequency_get_table(policy->cpu);
+ if (!table)
+ return;
+
+ cpufreq_for_each_entry(pos, table)
+ count++;
+
+ tmp = kzalloc(sizeof(*freqs) + sizeof(freqs->freq_table[0]) * count,
+ GFP_KERNEL);
+ if (!tmp)
+ return;
+
+ freqs = tmp;
+ freqs->max_state = count;
+
+ index = cpufreq_frequency_table_get_index(policy, policy->cur);
+ if (index >= 0)
+ WRITE_ONCE(freqs->last_index, index);
+
+ cpufreq_for_each_entry(pos, table)
+ freqs->freq_table[pos - table] = pos->frequency;
+
+ freqs->offset = next_offset;
+ WRITE_ONCE(next_offset, freqs->offset + count);
+ for_each_cpu(cpu, policy->related_cpus)
+ all_freqs[cpu] = freqs;
+}
+
+void cpufreq_task_times_remove_uids(uid_t uid_start, uid_t uid_end)
+{
+ struct uid_entry *uid_entry;
+ struct hlist_node *tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&uid_lock, flags);
+
+ for (; uid_start <= uid_end; uid_start++) {
+ hash_for_each_possible_safe(uid_hash_table, uid_entry, tmp,
+ hash, uid_start) {
+ if (uid_start == uid_entry->uid) {
+ hash_del_rcu(&uid_entry->hash);
+ kfree_rcu(uid_entry, rcu);
+ }
+ }
+ }
+
+ spin_unlock_irqrestore(&uid_lock, flags);
+}
+
+void cpufreq_times_record_transition(struct cpufreq_freqs *freq)
+{
+ int index;
+ struct cpu_freqs *freqs = all_freqs[freq->cpu];
+ struct cpufreq_policy *policy;
+
+ if (!freqs)
+ return;
+
+ policy = cpufreq_cpu_get(freq->cpu);
+ if (!policy)
+ return;
+
+ index = cpufreq_frequency_table_get_index(policy, freq->new);
+ if (index >= 0)
+ WRITE_ONCE(freqs->last_index, index);
+
+ cpufreq_cpu_put(policy);
+}
+
+static const struct seq_operations uid_time_in_state_seq_ops = {
+ .start = uid_seq_start,
+ .next = uid_seq_next,
+ .stop = uid_seq_stop,
+ .show = uid_time_in_state_seq_show,
+};
+
+static int uid_time_in_state_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &uid_time_in_state_seq_ops);
+}
+
+int single_uid_time_in_state_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, single_uid_time_in_state_show,
+ &(inode->i_uid));
+}
+
+static const struct file_operations uid_time_in_state_fops = {
+ .open = uid_time_in_state_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int __init cpufreq_times_init(void)
+{
+ proc_create_data("uid_time_in_state", 0444, NULL,
+ &uid_time_in_state_fops, NULL);
+
+ return 0;
+}
+
+early_initcall(cpufreq_times_init);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 7ff8b15a3422..88728d997088 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -1361,6 +1361,11 @@ static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; }
static inline bool intel_pstate_has_acpi_ppc(void) { return false; }
#endif /* CONFIG_ACPI */
+static const struct x86_cpu_id hwp_support_ids[] __initconst = {
+ { X86_VENDOR_INTEL, 6, X86_MODEL_ANY, X86_FEATURE_HWP },
+ {}
+};
+
static int __init intel_pstate_init(void)
{
int cpu, rc = 0;
@@ -1370,17 +1375,16 @@ static int __init intel_pstate_init(void)
if (no_load)
return -ENODEV;
+ if (x86_match_cpu(hwp_support_ids) && !no_hwp) {
+ copy_cpu_funcs(&core_params.funcs);
+ hwp_active++;
+ goto hwp_cpu_matched;
+ }
+
id = x86_match_cpu(intel_pstate_cpu_ids);
if (!id)
return -ENODEV;
- /*
- * The Intel pstate driver will be ignored if the platform
- * firmware has its own power management modes.
- */
- if (intel_pstate_platform_pwr_mgmt_exists())
- return -ENODEV;
-
cpu_def = (struct cpu_defaults *)id->driver_data;
copy_pid_params(&cpu_def->pid_policy);
@@ -1389,17 +1393,20 @@ static int __init intel_pstate_init(void)
if (intel_pstate_msrs_not_valid())
return -ENODEV;
+hwp_cpu_matched:
+ /*
+ * The Intel pstate driver will be ignored if the platform
+ * firmware has its own power management modes.
+ */
+ if (intel_pstate_platform_pwr_mgmt_exists())
+ return -ENODEV;
+
pr_info("Intel P-state driver initializing.\n");
all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus());
if (!all_cpu_data)
return -ENOMEM;
- if (static_cpu_has_safe(X86_FEATURE_HWP) && !no_hwp) {
- pr_info("intel_pstate: HWP enabled\n");
- hwp_active++;
- }
-
if (!hwp_active && hwp_only)
goto out;
@@ -1410,6 +1417,9 @@ static int __init intel_pstate_init(void)
intel_pstate_debug_expose_params();
intel_pstate_sysfs_expose_params();
+ if (hwp_active)
+ pr_info("intel_pstate: HWP enabled\n");
+
return rc;
out:
get_online_cpus();
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index c4b0ef65988c..57e6c45724e7 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -592,7 +592,7 @@ static int __init powernv_cpufreq_init(void)
int rc = 0;
/* Don't probe on pseries (guest) platforms */
- if (!firmware_has_feature(FW_FEATURE_OPALv3))
+ if (!firmware_has_feature(FW_FEATURE_OPAL))
return -ENODEV;
/* Discover pstates from device tree and init */
diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c
index 68ef8fd9482f..f5c4e009113c 100644
--- a/drivers/cpufreq/s3c24xx-cpufreq.c
+++ b/drivers/cpufreq/s3c24xx-cpufreq.c
@@ -364,7 +364,13 @@ struct clk *s3c_cpufreq_clk_get(struct device *dev, const char *name)
static int s3c_cpufreq_init(struct cpufreq_policy *policy)
{
policy->clk = clk_arm;
- return cpufreq_generic_init(policy, ftab, cpu_cur.info->latency);
+
+ policy->cpuinfo.transition_latency = cpu_cur.info->latency;
+
+ if (ftab)
+ return cpufreq_table_validate_and_show(policy, ftab);
+
+ return 0;
}
static int __init s3c_cpufreq_initclks(void)
diff --git a/drivers/cpufreq/sh-cpufreq.c b/drivers/cpufreq/sh-cpufreq.c
index 86628e22b2a3..719c3d9f07fb 100644
--- a/drivers/cpufreq/sh-cpufreq.c
+++ b/drivers/cpufreq/sh-cpufreq.c
@@ -30,54 +30,63 @@
static DEFINE_PER_CPU(struct clk, sh_cpuclk);
+struct cpufreq_target {
+ struct cpufreq_policy *policy;
+ unsigned int freq;
+};
+
static unsigned int sh_cpufreq_get(unsigned int cpu)
{
return (clk_get_rate(&per_cpu(sh_cpuclk, cpu)) + 500) / 1000;
}
-/*
- * Here we notify other drivers of the proposed change and the final change.
- */
-static int sh_cpufreq_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+static long __sh_cpufreq_target(void *arg)
{
- unsigned int cpu = policy->cpu;
+ struct cpufreq_target *target = arg;
+ struct cpufreq_policy *policy = target->policy;
+ int cpu = policy->cpu;
struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu);
- cpumask_t cpus_allowed;
struct cpufreq_freqs freqs;
struct device *dev;
long freq;
- cpus_allowed = current->cpus_allowed;
- set_cpus_allowed_ptr(current, cpumask_of(cpu));
-
- BUG_ON(smp_processor_id() != cpu);
+ if (smp_processor_id() != cpu)
+ return -ENODEV;
dev = get_cpu_device(cpu);
/* Convert target_freq from kHz to Hz */
- freq = clk_round_rate(cpuclk, target_freq * 1000);
+ freq = clk_round_rate(cpuclk, target->freq * 1000);
if (freq < (policy->min * 1000) || freq > (policy->max * 1000))
return -EINVAL;
- dev_dbg(dev, "requested frequency %u Hz\n", target_freq * 1000);
+ dev_dbg(dev, "requested frequency %u Hz\n", target->freq * 1000);
freqs.old = sh_cpufreq_get(cpu);
freqs.new = (freq + 500) / 1000;
freqs.flags = 0;
- cpufreq_freq_transition_begin(policy, &freqs);
- set_cpus_allowed_ptr(current, &cpus_allowed);
+ cpufreq_freq_transition_begin(target->policy, &freqs);
clk_set_rate(cpuclk, freq);
- cpufreq_freq_transition_end(policy, &freqs, 0);
+ cpufreq_freq_transition_end(target->policy, &freqs, 0);
dev_dbg(dev, "set frequency %lu Hz\n", freq);
-
return 0;
}
+/*
+ * Here we notify other drivers of the proposed change and the final change.
+ */
+static int sh_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ struct cpufreq_target data = { .policy = policy, .freq = target_freq };
+
+ return work_on_cpu(policy->cpu, __sh_cpufreq_target, &data);
+}
+
static int sh_cpufreq_verify(struct cpufreq_policy *policy)
{
struct clk *cpuclk = &per_cpu(sh_cpuclk, policy->cpu);
diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c
index 344058f8501a..d5657d50ac40 100644
--- a/drivers/cpuidle/coupled.c
+++ b/drivers/cpuidle/coupled.c
@@ -119,7 +119,6 @@ struct cpuidle_coupled {
#define CPUIDLE_COUPLED_NOT_IDLE (-1)
-static DEFINE_MUTEX(cpuidle_coupled_lock);
static DEFINE_PER_CPU(struct call_single_data, cpuidle_coupled_poke_cb);
/*
diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c
index d5c5a476360f..c44a843cb405 100644
--- a/drivers/cpuidle/cpuidle-powernv.c
+++ b/drivers/cpuidle/cpuidle-powernv.c
@@ -282,7 +282,7 @@ static int powernv_idle_probe(void)
if (cpuidle_disable != IDLE_NO_OVERRIDE)
return -ENODEV;
- if (firmware_has_feature(FW_FEATURE_OPALv3)) {
+ if (firmware_has_feature(FW_FEATURE_OPAL)) {
cpuidle_state_table = powernv_states;
/* Device tree can indicate more idle states */
max_idle_state = powernv_add_idle_states();
diff --git a/drivers/cpuidle/dt_idle_states.c b/drivers/cpuidle/dt_idle_states.c
index a5c111b67f37..ea11a33e7fff 100644
--- a/drivers/cpuidle/dt_idle_states.c
+++ b/drivers/cpuidle/dt_idle_states.c
@@ -174,8 +174,10 @@ int dt_init_idle_driver(struct cpuidle_driver *drv,
if (!state_node)
break;
- if (!of_device_is_available(state_node))
+ if (!of_device_is_available(state_node)) {
+ of_node_put(state_node);
continue;
+ }
if (!idle_state_valid(state_node, i, cpumask)) {
pr_warn("%s idle state not valid, bailing out\n",
diff --git a/drivers/cpuidle/lpm-levels-of.c b/drivers/cpuidle/lpm-levels-of.c
index 81a9f9763915..b4675df551b3 100644
--- a/drivers/cpuidle/lpm-levels-of.c
+++ b/drivers/cpuidle/lpm-levels-of.c
@@ -451,10 +451,6 @@ static int parse_legacy_cluster_params(struct device_node *node,
return 0;
failed:
pr_err("%s(): Failed reading %s\n", __func__, key);
- kfree(c->name);
- kfree(c->lpm_dev);
- c->name = NULL;
- c->lpm_dev = NULL;
return ret;
}
@@ -640,8 +636,6 @@ static int parse_cluster_level(struct device_node *node,
return 0;
failed:
pr_err("Failed %s() key = %s ret = %d\n", __func__, key, ret);
- kfree(level->mode);
- level->mode = NULL;
return ret;
}
@@ -836,19 +830,12 @@ static int parse_cpu_levels(struct device_node *node, struct lpm_cluster *c)
return 0;
failed:
- for (i = 0; i < c->cpu->nlevels; i++) {
- kfree(c->cpu->levels[i].name);
- c->cpu->levels[i].name = NULL;
- }
- kfree(c->cpu);
- c->cpu = NULL;
pr_err("%s(): Failed with error code:%d\n", __func__, ret);
return ret;
}
void free_cluster_node(struct lpm_cluster *cluster)
{
- int i;
struct lpm_cluster *cl, *m;
list_for_each_entry_safe(cl, m, &cluster->child, list) {
@@ -856,22 +843,6 @@ void free_cluster_node(struct lpm_cluster *cluster)
free_cluster_node(cl);
};
- if (cluster->cpu) {
- for (i = 0; i < cluster->cpu->nlevels; i++) {
- kfree(cluster->cpu->levels[i].name);
- cluster->cpu->levels[i].name = NULL;
- }
- }
- for (i = 0; i < cluster->nlevels; i++) {
- kfree(cluster->levels[i].mode);
- cluster->levels[i].mode = NULL;
- }
- kfree(cluster->cpu);
- kfree(cluster->name);
- kfree(cluster->lpm_dev);
- cluster->cpu = NULL;
- cluster->name = NULL;
- cluster->lpm_dev = NULL;
cluster->ndevices = 0;
}
@@ -989,9 +960,7 @@ failed_parse_cluster:
list_del(&c->list);
free_cluster_node(c);
failed_parse_params:
- c->parent = NULL;
pr_err("Failed parse params\n");
- kfree(c);
return NULL;
}
struct lpm_cluster *lpm_of_parse_cluster(struct platform_device *pdev)
diff --git a/drivers/cpuidle/lpm-levels.c b/drivers/cpuidle/lpm-levels.c
index 584a1857624a..324cce5d7354 100644
--- a/drivers/cpuidle/lpm-levels.c
+++ b/drivers/cpuidle/lpm-levels.c
@@ -1706,7 +1706,8 @@ static int cluster_cpuidle_register(struct lpm_cluster *cl)
struct cpuidle_state *st = &cl->drv->states[i];
struct lpm_cpu_level *cpu_level = &cl->cpu->levels[i];
snprintf(st->name, CPUIDLE_NAME_LEN, "C%u\n", i);
- snprintf(st->desc, CPUIDLE_DESC_LEN, cpu_level->name);
+ snprintf(st->desc, CPUIDLE_DESC_LEN, "%s",
+ cpu_level->name);
st->flags = 0;
st->exit_latency = cpu_level->pwr.latency_us;
st->power_usage = cpu_level->pwr.ss_power;
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 53e61459c69f..ee87eb77095c 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -224,12 +224,16 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
* without any error (HW optimizations for later
* CAAM eras), then try again.
*/
+ if (ret)
+ break;
+
rdsta_val = rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_IFMASK;
if ((status && status != JRSTA_SSRC_JUMP_HALT_CC) ||
- !(rdsta_val & (1 << sh_idx)))
+ !(rdsta_val & (1 << sh_idx))) {
ret = -EAGAIN;
- if (ret)
break;
+ }
+
dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx);
/* Clear the contents before recreating the descriptor */
memset(desc, 0x00, CAAM_CMD_SZ * 7);
diff --git a/drivers/crypto/msm/qcedev.c b/drivers/crypto/msm/qcedev.c
index a55f236961b8..953159c66159 100644
--- a/drivers/crypto/msm/qcedev.c
+++ b/drivers/crypto/msm/qcedev.c
@@ -1,6 +1,6 @@
/* Qualcomm CE device driver.
*
- * Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2010-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -843,6 +843,7 @@ static int qcedev_sha_final(struct qcedev_async_req *qcedev_areq,
memset(&handle->sha_ctxt.trailing_buf[0], 0, 64);
kzfree(k_buf_src);
+ qcedev_areq->sha_req.sreq.src = NULL;
return err;
}
@@ -1012,6 +1013,7 @@ static int qcedev_hmac_get_ohash(struct qcedev_async_req *qcedev_areq,
handle->sha_ctxt.first_blk = 0;
kzfree(k_src);
+ qcedev_areq->sha_req.sreq.src = NULL;
return err;
}
@@ -1166,8 +1168,10 @@ static int qcedev_vbuf_ablk_cipher_max_xfer(struct qcedev_async_req *areq,
if (err == 0 && copy_to_user(
(void __user *)creq->vbuf.dst[dst_i].vaddr,
(k_align_dst + byteoffset),
- creq->vbuf.dst[dst_i].len))
- return -EFAULT;
+ creq->vbuf.dst[dst_i].len)) {
+ err = -EFAULT;
+ goto exit;
+ }
k_align_dst += creq->vbuf.dst[dst_i].len +
byteoffset;
@@ -1176,9 +1180,11 @@ static int qcedev_vbuf_ablk_cipher_max_xfer(struct qcedev_async_req *areq,
} else {
if (err == 0 && copy_to_user(
(void __user *)creq->vbuf.dst[dst_i].vaddr,
- (k_align_dst + byteoffset),
- creq->data_len))
- return -EFAULT;
+ (k_align_dst + byteoffset),
+ creq->data_len)) {
+ err = -EFAULT;
+ goto exit;
+ }
k_align_dst += creq->data_len;
creq->vbuf.dst[dst_i].len -= creq->data_len;
@@ -1187,7 +1193,9 @@ static int qcedev_vbuf_ablk_cipher_max_xfer(struct qcedev_async_req *areq,
}
}
*di = dst_i;
-
+exit:
+ areq->cipher_req.creq.src = NULL;
+ areq->cipher_req.creq.dst = NULL;
return err;
};
diff --git a/drivers/crypto/msm/qcrypto.c b/drivers/crypto/msm/qcrypto.c
index f38fc422b35e..2bfd0525c529 100644
--- a/drivers/crypto/msm/qcrypto.c
+++ b/drivers/crypto/msm/qcrypto.c
@@ -1,6 +1,6 @@
/* Qualcomm Crypto driver
*
- * Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2010-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -3923,7 +3923,7 @@ static int _sha_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(&tfm->base);
uint8_t *in_buf;
int ret = 0;
- struct scatterlist sg;
+ struct scatterlist sg = {0};
struct ahash_request *ahash_req;
struct completion ahash_req_complete;
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index fd39893079d5..45ea8957a73a 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -401,16 +401,21 @@ static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
uint32_t aes_control;
int err;
unsigned long flags;
+ u8 *iv;
aes_control = SSS_AES_KEY_CHANGE_MODE;
if (mode & FLAGS_AES_DECRYPT)
aes_control |= SSS_AES_MODE_DECRYPT;
- if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC)
+ if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC) {
aes_control |= SSS_AES_CHAIN_MODE_CBC;
- else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR)
+ iv = req->info;
+ } else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR) {
aes_control |= SSS_AES_CHAIN_MODE_CTR;
-
+ iv = req->info;
+ } else {
+ iv = NULL; /* AES_ECB */
+ }
if (dev->ctx->keylen == AES_KEYSIZE_192)
aes_control |= SSS_AES_KEY_SIZE_192;
else if (dev->ctx->keylen == AES_KEYSIZE_256)
@@ -440,7 +445,7 @@ static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
goto outdata_error;
SSS_AES_WRITE(dev, AES_CONTROL, aes_control);
- s5p_set_aes(dev, dev->ctx->aes_key, req->info, dev->ctx->keylen);
+ s5p_set_aes(dev, dev->ctx->aes_key, iv, dev->ctx->keylen);
s5p_set_dma_indata(dev, req->src);
s5p_set_dma_outdata(dev, req->dst);
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 844a8ad666a9..64e86ad63d0a 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -607,7 +607,7 @@ struct devfreq *devm_devfreq_add_device(struct device *dev,
devfreq = devfreq_add_device(dev, profile, governor_name, data);
if (IS_ERR(devfreq)) {
devres_free(ptr);
- return ERR_PTR(-ENOMEM);
+ return devfreq;
}
*ptr = devfreq;
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 53d22eb73b56..be26f625bb3e 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -716,7 +716,7 @@ atc_prep_dma_interleaved(struct dma_chan *chan,
unsigned long flags)
{
struct at_dma_chan *atchan = to_at_dma_chan(chan);
- struct data_chunk *first = xt->sgl;
+ struct data_chunk *first;
struct at_desc *desc = NULL;
size_t xfer_count;
unsigned int dwidth;
@@ -728,6 +728,8 @@ atc_prep_dma_interleaved(struct dma_chan *chan,
if (unlikely(!xt || xt->numf != 1 || !xt->frame_size))
return NULL;
+ first = xt->sgl;
+
dev_info(chan2dev(chan),
"%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n",
__func__, &xt->src_start, &xt->dst_start, xt->numf,
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 66c073fc8afc..82a7c89caae2 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -1473,10 +1473,10 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) {
check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
rmb();
- initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD);
- rmb();
cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
rmb();
+ initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD);
+ rmb();
cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
rmb();
diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c
index 7638b24ce8d0..35fc58f4bf4b 100644
--- a/drivers/dma/dma-jz4740.c
+++ b/drivers/dma/dma-jz4740.c
@@ -557,7 +557,7 @@ static int jz4740_dma_probe(struct platform_device *pdev)
ret = dma_async_device_register(dd);
if (ret)
- return ret;
+ goto err_clk;
irq = platform_get_irq(pdev, 0);
ret = request_irq(irq, jz4740_dma_irq, 0, dev_name(&pdev->dev), dmadev);
@@ -570,6 +570,8 @@ static int jz4740_dma_probe(struct platform_device *pdev)
err_unregister:
dma_async_device_unregister(dd);
+err_clk:
+ clk_disable_unprepare(dmadev->clk);
return ret;
}
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 7254c20007f8..6796eb1a8a4c 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -329,7 +329,7 @@ static void dmatest_callback(void *arg)
{
struct dmatest_done *done = arg;
struct dmatest_thread *thread =
- container_of(arg, struct dmatest_thread, done_wait);
+ container_of(done, struct dmatest_thread, test_done);
if (!thread->done) {
done->done = true;
wake_up_all(done->wait);
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 0f6fd42f55ca..48d4dddf4941 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -911,6 +911,21 @@ static int sdma_disable_channel(struct dma_chan *chan)
return 0;
}
+static int sdma_disable_channel_with_delay(struct dma_chan *chan)
+{
+ sdma_disable_channel(chan);
+
+ /*
+ * According to NXP R&D team a delay of one BD SDMA cost time
+ * (maximum is 1ms) should be added after disable of the channel
+ * bit, to ensure SDMA core has really been stopped after SDMA
+ * clients call .device_terminate_all.
+ */
+ mdelay(1);
+
+ return 0;
+}
+
static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac)
{
struct sdma_engine *sdma = sdmac->sdma;
@@ -1707,17 +1722,24 @@ static int sdma_probe(struct platform_device *pdev)
if (IS_ERR(sdma->clk_ahb))
return PTR_ERR(sdma->clk_ahb);
- clk_prepare(sdma->clk_ipg);
- clk_prepare(sdma->clk_ahb);
+ ret = clk_prepare(sdma->clk_ipg);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare(sdma->clk_ahb);
+ if (ret)
+ goto err_clk;
ret = devm_request_irq(&pdev->dev, irq, sdma_int_handler, 0, "sdma",
sdma);
if (ret)
- return ret;
+ goto err_irq;
sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL);
- if (!sdma->script_addrs)
- return -ENOMEM;
+ if (!sdma->script_addrs) {
+ ret = -ENOMEM;
+ goto err_irq;
+ }
/* initially no scripts available */
saddr_arr = (s32 *)sdma->script_addrs;
@@ -1793,7 +1815,7 @@ static int sdma_probe(struct platform_device *pdev)
sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;
sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
sdma->dma_device.device_config = sdma_config;
- sdma->dma_device.device_terminate_all = sdma_disable_channel;
+ sdma->dma_device.device_terminate_all = sdma_disable_channel_with_delay;
sdma->dma_device.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
sdma->dma_device.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
sdma->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
@@ -1832,6 +1854,10 @@ err_register:
dma_async_device_unregister(&sdma->dma_device);
err_init:
kfree(sdma->script_addrs);
+err_irq:
+ clk_unprepare(sdma->clk_ahb);
+err_clk:
+ clk_unprepare(sdma->clk_ipg);
return ret;
}
@@ -1842,6 +1868,8 @@ static int sdma_remove(struct platform_device *pdev)
dma_async_device_unregister(&sdma->dma_device);
kfree(sdma->script_addrs);
+ clk_unprepare(sdma->clk_ahb);
+ clk_unprepare(sdma->clk_ipg);
/* Kill the tasklet */
for (i = 0; i < MAX_DMA_CHANNELS; i++) {
struct sdma_channel *sdmac = &sdma->channel[i];
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index abb75ebd65ea..ac8c28968422 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -395,7 +395,7 @@ static int ioat_dma_self_test(struct ioatdma_device *ioat_dma)
if (memcmp(src, dest, IOAT_TEST_SIZE)) {
dev_err(dev, "Self-test copy failed compare, disabling\n");
err = -ENODEV;
- goto free_resources;
+ goto unmap_dma;
}
unmap_dma:
diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c
index 8100ede095d5..c7bd1c5315f4 100644
--- a/drivers/dma/ti-dma-crossbar.c
+++ b/drivers/dma/ti-dma-crossbar.c
@@ -51,7 +51,15 @@ struct ti_am335x_xbar_map {
static inline void ti_am335x_xbar_write(void __iomem *iomem, int event, u8 val)
{
- writeb_relaxed(val, iomem + event);
+ /*
+ * TPCC_EVT_MUX_60_63 register layout is different than the
+ * rest, in the sense, that event 63 is mapped to lowest byte
+ * and event 60 is mapped to highest, handle it separately.
+ */
+ if (event >= 60 && event <= 63)
+ writeb_relaxed(val, iomem + (63 - event % 4));
+ else
+ writeb_relaxed(val, iomem + event);
}
static void ti_am335x_xbar_free(struct device *dev, void *route_data)
diff --git a/drivers/dma/zx296702_dma.c b/drivers/dma/zx296702_dma.c
index 6059d81e701a..8e55403847b2 100644
--- a/drivers/dma/zx296702_dma.c
+++ b/drivers/dma/zx296702_dma.c
@@ -26,7 +26,7 @@
#define DRIVER_NAME "zx-dma"
#define DMA_ALIGN 4
-#define DMA_MAX_SIZE (0x10000 - PAGE_SIZE)
+#define DMA_MAX_SIZE (0x10000 - 512)
#define LLI_BLOCK_SIZE (4 * PAGE_SIZE)
#define REG_ZX_SRC_ADDR 0x00
diff --git a/drivers/edac/mv64x60_edac.c b/drivers/edac/mv64x60_edac.c
index 0574e1bbe45c..3ce5609b4611 100644
--- a/drivers/edac/mv64x60_edac.c
+++ b/drivers/edac/mv64x60_edac.c
@@ -763,7 +763,7 @@ static int mv64x60_mc_err_probe(struct platform_device *pdev)
/* Non-ECC RAM? */
printk(KERN_WARNING "%s: No ECC DIMMs discovered\n", __func__);
res = -ENODEV;
- goto err2;
+ goto err;
}
edac_dbg(3, "init mci\n");
diff --git a/drivers/edac/octeon_edac-lmc.c b/drivers/edac/octeon_edac-lmc.c
index cda6dab5067a..6b65a102b49d 100644
--- a/drivers/edac/octeon_edac-lmc.c
+++ b/drivers/edac/octeon_edac-lmc.c
@@ -79,6 +79,7 @@ static void octeon_lmc_edac_poll_o2(struct mem_ctl_info *mci)
if (!pvt->inject)
int_reg.u64 = cvmx_read_csr(CVMX_LMCX_INT(mci->mc_idx));
else {
+ int_reg.u64 = 0;
if (pvt->error_type == 1)
int_reg.s.sec_err = 1;
if (pvt->error_type == 2)
diff --git a/drivers/esoc/esoc-mdm-4x.c b/drivers/esoc/esoc-mdm-4x.c
index d071e89d3124..99fd598b5069 100644
--- a/drivers/esoc/esoc-mdm-4x.c
+++ b/drivers/esoc/esoc-mdm-4x.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2015, 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -230,10 +230,15 @@ static int mdm_cmd_exe(enum esoc_cmd cmd, struct esoc_clink *esoc)
}
msleep(100);
}
- if (status_down)
+ if (status_down) {
dev_dbg(dev, "shutdown successful\n");
- else
+ esoc_clink_queue_request(ESOC_REQ_SHUTDOWN, esoc);
+ } else {
dev_err(mdm->dev, "graceful poff ipc fail\n");
+ graceful_shutdown = false;
+ goto force_poff;
+ }
+ break;
force_poff:
case ESOC_FORCE_PWR_OFF:
if (!graceful_shutdown) {
diff --git a/drivers/esoc/esoc-mdm-pon.c b/drivers/esoc/esoc-mdm-pon.c
index 4be66a16a3a1..0288082cea00 100644
--- a/drivers/esoc/esoc-mdm-pon.c
+++ b/drivers/esoc/esoc-mdm-pon.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2015, 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -55,7 +55,7 @@ static int mdm9x55_toggle_soft_reset(struct mdm_ctrl *mdm, bool atomic)
if (!atomic)
usleep_range(reset_time_us, reset_time_us + 100000);
else
- mdelay(mdm->reset_time_ms);
+ mdelay(DEF_MDM9X55_RESET_TIME);
gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
soft_reset_direction_de_assert);
return 0;
diff --git a/drivers/gpio/gpio-intel-mid.c b/drivers/gpio/gpio-intel-mid.c
index c50e930d97d3..297121acc57d 100644
--- a/drivers/gpio/gpio-intel-mid.c
+++ b/drivers/gpio/gpio-intel-mid.c
@@ -326,7 +326,7 @@ static void intel_mid_irq_init_hw(struct intel_mid_gpio *priv)
}
}
-static int intel_gpio_runtime_idle(struct device *dev)
+static int __maybe_unused intel_gpio_runtime_idle(struct device *dev)
{
int err = pm_schedule_suspend(dev, 500);
return err ?: -EBUSY;
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index 2a8122444614..9ba4aaa9f755 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -200,6 +200,48 @@ static int gpio_rcar_irq_set_wake(struct irq_data *d, unsigned int on)
return 0;
}
+static void gpio_rcar_irq_bus_lock(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct gpio_rcar_priv *p = container_of(gc, struct gpio_rcar_priv,
+ gpio_chip);
+
+ pm_runtime_get_sync(&p->pdev->dev);
+}
+
+static void gpio_rcar_irq_bus_sync_unlock(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct gpio_rcar_priv *p = container_of(gc, struct gpio_rcar_priv,
+ gpio_chip);
+
+ pm_runtime_put(&p->pdev->dev);
+}
+
+
+static int gpio_rcar_irq_request_resources(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct gpio_rcar_priv *p = container_of(gc, struct gpio_rcar_priv,
+ gpio_chip);
+ int error;
+
+ error = pm_runtime_get_sync(&p->pdev->dev);
+ if (error < 0)
+ return error;
+
+ return 0;
+}
+
+static void gpio_rcar_irq_release_resources(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct gpio_rcar_priv *p = container_of(gc, struct gpio_rcar_priv,
+ gpio_chip);
+
+ pm_runtime_put(&p->pdev->dev);
+}
+
static irqreturn_t gpio_rcar_irq_handler(int irq, void *dev_id)
{
struct gpio_rcar_priv *p = dev_id;
@@ -460,6 +502,10 @@ static int gpio_rcar_probe(struct platform_device *pdev)
irq_chip->irq_unmask = gpio_rcar_irq_enable;
irq_chip->irq_set_type = gpio_rcar_irq_set_type;
irq_chip->irq_set_wake = gpio_rcar_irq_set_wake;
+ irq_chip->irq_bus_lock = gpio_rcar_irq_bus_lock;
+ irq_chip->irq_bus_sync_unlock = gpio_rcar_irq_bus_sync_unlock;
+ irq_chip->irq_request_resources = gpio_rcar_irq_request_resources;
+ irq_chip->irq_release_resources = gpio_rcar_irq_release_resources;
irq_chip->flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_MASK_ON_SUSPEND;
ret = gpiochip_add(gpio_chip);
diff --git a/drivers/gpio/gpio-xgene.c b/drivers/gpio/gpio-xgene.c
index 18a8182d4fec..7f1f32324504 100644
--- a/drivers/gpio/gpio-xgene.c
+++ b/drivers/gpio/gpio-xgene.c
@@ -42,9 +42,7 @@ struct xgene_gpio {
struct gpio_chip chip;
void __iomem *base;
spinlock_t lock;
-#ifdef CONFIG_PM
u32 set_dr_val[XGENE_MAX_GPIO_BANKS];
-#endif
};
static inline struct xgene_gpio *to_xgene_gpio(struct gpio_chip *chip)
@@ -132,8 +130,7 @@ static int xgene_gpio_dir_out(struct gpio_chip *gc,
return 0;
}
-#ifdef CONFIG_PM
-static int xgene_gpio_suspend(struct device *dev)
+static __maybe_unused int xgene_gpio_suspend(struct device *dev)
{
struct xgene_gpio *gpio = dev_get_drvdata(dev);
unsigned long bank_offset;
@@ -146,7 +143,7 @@ static int xgene_gpio_suspend(struct device *dev)
return 0;
}
-static int xgene_gpio_resume(struct device *dev)
+static __maybe_unused int xgene_gpio_resume(struct device *dev)
{
struct xgene_gpio *gpio = dev_get_drvdata(dev);
unsigned long bank_offset;
@@ -160,10 +157,6 @@ static int xgene_gpio_resume(struct device *dev)
}
static SIMPLE_DEV_PM_OPS(xgene_gpio_pm, xgene_gpio_suspend, xgene_gpio_resume);
-#define XGENE_GPIO_PM_OPS (&xgene_gpio_pm)
-#else
-#define XGENE_GPIO_PM_OPS NULL
-#endif
static int xgene_gpio_probe(struct platform_device *pdev)
{
@@ -230,7 +223,7 @@ static struct platform_driver xgene_gpio_driver = {
.driver = {
.name = "xgene-gpio",
.of_match_table = xgene_gpio_of_match,
- .pm = XGENE_GPIO_PM_OPS,
+ .pm = &xgene_gpio_pm,
},
.probe = xgene_gpio_probe,
.remove = xgene_gpio_remove,
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 06d345b087f8..fe89fd56eabf 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -2117,6 +2117,8 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
struct gpio_desc *desc = NULL;
int status;
enum gpio_lookup_flags lookupflags = 0;
+ /* Maybe we have a device name, maybe not */
+ const char *devname = dev ? dev_name(dev) : "?";
dev_dbg(dev, "GPIO lookup for consumer %s\n", con_id);
@@ -2145,7 +2147,11 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
return desc;
}
- status = gpiod_request(desc, con_id);
+ /*
+ * If a connection label was passed use that, else attempt to use
+ * the device name as label
+ */
+ status = gpiod_request(desc, con_id ? con_id : devname);
if (status < 0)
return ERR_PTR(status);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index a142d5ae148d..5c40d6d710af 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -585,6 +585,9 @@ int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
size_t size;
u32 retry = 3;
+ if (amdgpu_acpi_pcie_notify_device_ready(adev))
+ return -EINVAL;
+
/* Get the device handle */
handle = ACPI_HANDLE(&adev->pdev->dev);
if (!handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 930083336968..1f0e6ede120c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -69,25 +69,18 @@ void amdgpu_connector_hotplug(struct drm_connector *connector)
/* don't do anything if sink is not display port, i.e.,
* passive dp->(dvi|hdmi) adaptor
*/
- if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
- int saved_dpms = connector->dpms;
- /* Only turn off the display if it's physically disconnected */
- if (!amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd)) {
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
- } else if (amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) {
- /* Don't try to start link training before we
- * have the dpcd */
- if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector))
- return;
-
- /* set it to OFF so that drm_helper_connector_dpms()
- * won't return immediately since the current state
- * is ON at this point.
- */
- connector->dpms = DRM_MODE_DPMS_OFF;
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
- }
- connector->dpms = saved_dpms;
+ if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT &&
+ amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd) &&
+ amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) {
+ /* Don't start link training before we have the DPCD */
+ if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector))
+ return;
+
+ /* Turn the connector off and back on immediately, which
+ * will trigger link training
+ */
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
}
}
}
@@ -739,9 +732,11 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force)
enum drm_connector_status ret = connector_status_disconnected;
int r;
- r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
- return connector_status_disconnected;
+ if (!drm_kms_helper_is_poll_worker()) {
+ r = pm_runtime_get_sync(connector->dev->dev);
+ if (r < 0)
+ return connector_status_disconnected;
+ }
if (encoder) {
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
@@ -760,8 +755,12 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force)
/* check acpi lid status ??? */
amdgpu_connector_update_scratch_regs(connector, ret);
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
+
+ if (!drm_kms_helper_is_poll_worker()) {
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ }
+
return ret;
}
@@ -862,9 +861,11 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force)
enum drm_connector_status ret = connector_status_disconnected;
int r;
- r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
- return connector_status_disconnected;
+ if (!drm_kms_helper_is_poll_worker()) {
+ r = pm_runtime_get_sync(connector->dev->dev);
+ if (r < 0)
+ return connector_status_disconnected;
+ }
encoder = amdgpu_connector_best_single_encoder(connector);
if (!encoder)
@@ -918,8 +919,10 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force)
amdgpu_connector_update_scratch_regs(connector, ret);
out:
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker()) {
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ }
return ret;
}
@@ -981,9 +984,11 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
enum drm_connector_status ret = connector_status_disconnected;
bool dret = false, broken_edid = false;
- r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
- return connector_status_disconnected;
+ if (!drm_kms_helper_is_poll_worker()) {
+ r = pm_runtime_get_sync(connector->dev->dev);
+ if (r < 0)
+ return connector_status_disconnected;
+ }
if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) {
ret = connector->status;
@@ -1108,8 +1113,10 @@ out:
amdgpu_connector_update_scratch_regs(connector, ret);
exit:
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker()) {
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ }
return ret;
}
@@ -1351,9 +1358,11 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force)
struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector);
int r;
- r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
- return connector_status_disconnected;
+ if (!drm_kms_helper_is_poll_worker()) {
+ r = pm_runtime_get_sync(connector->dev->dev);
+ if (r < 0)
+ return connector_status_disconnected;
+ }
if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) {
ret = connector->status;
@@ -1421,8 +1430,10 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force)
amdgpu_connector_update_scratch_regs(connector, ret);
out:
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker()) {
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ }
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index fc9f14747f70..a36230d1331c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1467,8 +1467,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
* ignore it */
vga_client_register(adev->pdev, adev, NULL, amdgpu_vga_set_decode);
- if (amdgpu_runtime_pm == 1)
- runtime = true;
if (amdgpu_device_is_px(ddev))
runtime = true;
vga_switcheroo_register_client(adev->pdev, &amdgpu_switcheroo_ops, runtime);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 82903ca78529..c555781685ea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -560,6 +560,12 @@ amdgpu_user_framebuffer_create(struct drm_device *dev,
return ERR_PTR(-ENOENT);
}
+ /* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */
+ if (obj->import_attach) {
+ DRM_DEBUG_KMS("Cannot create framebuffer from imported dma_buf\n");
+ return ERR_PTR(-EINVAL);
+ }
+
amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL);
if (amdgpu_fb == NULL) {
drm_gem_object_unreference_unlocked(obj);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index b57fffc2d4af..0a91261b6f5b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -2104,34 +2104,8 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev)
case CHIP_KAVERI:
adev->gfx.config.max_shader_engines = 1;
adev->gfx.config.max_tile_pipes = 4;
- if ((adev->pdev->device == 0x1304) ||
- (adev->pdev->device == 0x1305) ||
- (adev->pdev->device == 0x130C) ||
- (adev->pdev->device == 0x130F) ||
- (adev->pdev->device == 0x1310) ||
- (adev->pdev->device == 0x1311) ||
- (adev->pdev->device == 0x131C)) {
- adev->gfx.config.max_cu_per_sh = 8;
- adev->gfx.config.max_backends_per_se = 2;
- } else if ((adev->pdev->device == 0x1309) ||
- (adev->pdev->device == 0x130A) ||
- (adev->pdev->device == 0x130D) ||
- (adev->pdev->device == 0x1313) ||
- (adev->pdev->device == 0x131D)) {
- adev->gfx.config.max_cu_per_sh = 6;
- adev->gfx.config.max_backends_per_se = 2;
- } else if ((adev->pdev->device == 0x1306) ||
- (adev->pdev->device == 0x1307) ||
- (adev->pdev->device == 0x130B) ||
- (adev->pdev->device == 0x130E) ||
- (adev->pdev->device == 0x1315) ||
- (adev->pdev->device == 0x131B)) {
- adev->gfx.config.max_cu_per_sh = 4;
- adev->gfx.config.max_backends_per_se = 1;
- } else {
- adev->gfx.config.max_cu_per_sh = 3;
- adev->gfx.config.max_backends_per_se = 1;
- }
+ adev->gfx.config.max_cu_per_sh = 8;
+ adev->gfx.config.max_backends_per_se = 2;
adev->gfx.config.max_sh_per_se = 1;
adev->gfx.config.max_texture_channel_caches = 4;
adev->gfx.config.max_gprs = 256;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 74909e72a009..2acbd43f9a53 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -519,11 +519,17 @@ static ssize_t sysprops_show(struct kobject *kobj, struct attribute *attr,
return ret;
}
+static void kfd_topology_kobj_release(struct kobject *kobj)
+{
+ kfree(kobj);
+}
+
static const struct sysfs_ops sysprops_ops = {
.show = sysprops_show,
};
static struct kobj_type sysprops_type = {
+ .release = kfd_topology_kobj_release,
.sysfs_ops = &sysprops_ops,
};
@@ -559,6 +565,7 @@ static const struct sysfs_ops iolink_ops = {
};
static struct kobj_type iolink_type = {
+ .release = kfd_topology_kobj_release,
.sysfs_ops = &iolink_ops,
};
@@ -586,6 +593,7 @@ static const struct sysfs_ops mem_ops = {
};
static struct kobj_type mem_type = {
+ .release = kfd_topology_kobj_release,
.sysfs_ops = &mem_ops,
};
@@ -625,6 +633,7 @@ static const struct sysfs_ops cache_ops = {
};
static struct kobj_type cache_type = {
+ .release = kfd_topology_kobj_release,
.sysfs_ops = &cache_ops,
};
@@ -747,6 +756,7 @@ static const struct sysfs_ops node_ops = {
};
static struct kobj_type node_type = {
+ .release = kfd_topology_kobj_release,
.sysfs_ops = &node_ops,
};
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index cebcab560626..5d68189176cc 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -1182,17 +1182,13 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
ret = devm_request_irq(dev, irq, armada_drm_irq, 0, "armada_drm_crtc",
dcrtc);
- if (ret < 0) {
- kfree(dcrtc);
- return ret;
- }
+ if (ret < 0)
+ goto err_crtc;
if (dcrtc->variant->init) {
ret = dcrtc->variant->init(dcrtc, dev);
- if (ret) {
- kfree(dcrtc);
- return ret;
- }
+ if (ret)
+ goto err_crtc;
}
/* Ensure AXI pipeline is enabled */
@@ -1203,13 +1199,15 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
dcrtc->crtc.port = port;
primary = kzalloc(sizeof(*primary), GFP_KERNEL);
- if (!primary)
- return -ENOMEM;
+ if (!primary) {
+ ret = -ENOMEM;
+ goto err_crtc;
+ }
ret = armada_drm_plane_init(primary);
if (ret) {
kfree(primary);
- return ret;
+ goto err_crtc;
}
ret = drm_universal_plane_init(drm, &primary->base, 0,
@@ -1219,7 +1217,7 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
DRM_PLANE_TYPE_PRIMARY);
if (ret) {
kfree(primary);
- return ret;
+ goto err_crtc;
}
ret = drm_crtc_init_with_planes(drm, &dcrtc->crtc, &primary->base, NULL,
@@ -1238,6 +1236,9 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
err_crtc_init:
primary->base.funcs->destroy(&primary->base);
+err_crtc:
+ kfree(dcrtc);
+
return ret;
}
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 6bf4588de46c..fbd717324328 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -114,6 +114,9 @@ static struct edid_quirk {
/* AEO model 0 reports 8 bpc, but is a 6 bpc panel */
{ "AEO", 0, EDID_QUIRK_FORCE_6BPC },
+ /* CPT panel of Asus UX303LA reports 8 bpc, but is a 6 bpc panel */
+ { "CPT", 0x17df, EDID_QUIRK_FORCE_6BPC },
+
/* Belinea 10 15 55 */
{ "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 },
{ "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 },
@@ -3763,8 +3766,7 @@ monitor_name(struct detailed_timing *t, void *data)
* @edid: EDID to parse
*
* Fill the ELD (EDID-Like Data) buffer for passing to the audio driver. The
- * Conn_Type, HDCP and Port_ID ELD fields are left for the graphics driver to
- * fill in.
+ * HDCP and Port_ID ELD fields are left for the graphics driver to fill in.
*/
void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
{
@@ -3840,6 +3842,12 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
}
eld[5] |= sad_count << 4;
+ if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
+ connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+ eld[DRM_ELD_SAD_COUNT_CONN_TYPE] |= DRM_ELD_CONN_TYPE_DP;
+ else
+ eld[DRM_ELD_SAD_COUNT_CONN_TYPE] |= DRM_ELD_CONN_TYPE_HDMI;
+
eld[DRM_ELD_BASELINE_ELD_LEN] =
DIV_ROUND_UP(drm_eld_calc_baseline_block_size(eld), 4);
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 8090989185b2..4ddbc49125cd 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -1271,9 +1271,9 @@ void drm_vblank_put(struct drm_device *dev, unsigned int pipe)
if (atomic_dec_and_test(&vblank->refcount)) {
if (drm_vblank_offdelay == 0)
return;
- else if (dev->vblank_disable_immediate || drm_vblank_offdelay < 0)
+ else if (drm_vblank_offdelay < 0)
vblank_disable_fn((unsigned long)vblank);
- else
+ else if (!dev->vblank_disable_immediate)
mod_timer(&vblank->disable_timer,
jiffies + ((drm_vblank_offdelay * HZ)/1000));
}
@@ -1902,6 +1902,16 @@ bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe)
wake_up(&vblank->queue);
drm_handle_vblank_events(dev, pipe);
+ /* With instant-off, we defer disabling the interrupt until after
+ * we finish processing the following vblank. The disable has to
+ * be last (after drm_handle_vblank_events) so that the timestamp
+ * is always accurate.
+ */
+ if (dev->vblank_disable_immediate &&
+ drm_vblank_offdelay > 0 &&
+ !atomic_read(&vblank->refcount))
+ vblank_disable_fn((unsigned long)vblank);
+
spin_unlock_irqrestore(&dev->event_lock, irqflags);
return true;
diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c
index 6675b1428410..c257de351cfa 100644
--- a/drivers/gpu/drm/drm_modeset_lock.c
+++ b/drivers/gpu/drm/drm_modeset_lock.c
@@ -69,7 +69,7 @@ void drm_modeset_lock_all(struct drm_device *dev)
struct drm_modeset_acquire_ctx *ctx;
int ret;
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL | __GFP_NOFAIL);
if (WARN_ON(!ctx))
return;
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index f8b5fcfa91a2..1fe4b8e6596b 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -412,6 +412,26 @@ out:
}
/**
+ * drm_kms_helper_is_poll_worker - is %current task an output poll worker?
+ *
+ * Determine if %current task is an output poll worker. This can be used
+ * to select distinct code paths for output polling versus other contexts.
+ *
+ * One use case is to avoid a deadlock between the output poll worker and
+ * the autosuspend worker wherein the latter waits for polling to finish
+ * upon calling drm_kms_helper_poll_disable(), while the former waits for
+ * runtime suspend to finish upon calling pm_runtime_get_sync() in a
+ * connector ->detect hook.
+ */
+bool drm_kms_helper_is_poll_worker(void)
+{
+ struct work_struct *work = current_work();
+
+ return work && work->func == output_poll_execute;
+}
+EXPORT_SYMBOL(drm_kms_helper_is_poll_worker);
+
+/**
* drm_kms_helper_poll_disable - disable output polling
* @dev: drm_device
*
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
index d4813e03f5ee..00275c3856ce 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
@@ -821,14 +821,18 @@ void mdfld_dsi_dpi_mode_set(struct drm_encoder *encoder,
struct drm_device *dev = dsi_config->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
int pipe = mdfld_dsi_encoder_get_pipe(dsi_encoder);
-
u32 pipeconf_reg = PIPEACONF;
u32 dspcntr_reg = DSPACNTR;
+ u32 pipeconf, dspcntr;
- u32 pipeconf = dev_priv->pipeconf[pipe];
- u32 dspcntr = dev_priv->dspcntr[pipe];
u32 mipi = MIPI_PORT_EN | PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX;
+ if (WARN_ON(pipe < 0))
+ return;
+
+ pipeconf = dev_priv->pipeconf[pipe];
+ dspcntr = dev_priv->dspcntr[pipe];
+
if (pipe) {
pipeconf_reg = PIPECCONF;
dspcntr_reg = DSPCCNTR;
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
index 89f705c3a5eb..910a2f253990 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
@@ -382,16 +382,6 @@ static int mdfld_dsi_connector_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-static void mdfld_dsi_connector_dpms(struct drm_connector *connector, int mode)
-{
- if (mode == connector->dpms)
- return;
-
- /*first, execute dpms*/
-
- drm_helper_connector_dpms(connector, mode);
-}
-
static struct drm_encoder *mdfld_dsi_connector_best_encoder(
struct drm_connector *connector)
{
@@ -404,7 +394,7 @@ static struct drm_encoder *mdfld_dsi_connector_best_encoder(
/*DSI connector funcs*/
static const struct drm_connector_funcs mdfld_dsi_connector_funcs = {
- .dpms = /*drm_helper_connector_dpms*/mdfld_dsi_connector_dpms,
+ .dpms = drm_helper_connector_dpms,
.save = mdfld_dsi_connector_save,
.restore = mdfld_dsi_connector_restore,
.detect = mdfld_dsi_connector_detect,
diff --git a/drivers/gpu/drm/msm-hyp/msm_drv_hyp.c b/drivers/gpu/drm/msm-hyp/msm_drv_hyp.c
index baf7ee266088..133b1220f5fb 100644
--- a/drivers/gpu/drm/msm-hyp/msm_drv_hyp.c
+++ b/drivers/gpu/drm/msm-hyp/msm_drv_hyp.c
@@ -355,11 +355,35 @@ static int msm_ioctl_query_client_id(struct drm_device *dev, void *data,
return ret;
}
+static long msm_drm_ioctl(struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case DRM_IOCTL_PRIME_FD_TO_HANDLE:
+ {
+ struct drm_prime_handle cmd_data;
+
+ if (copy_from_user(&cmd_data, (void __user *)arg,
+ sizeof(struct drm_prime_handle)) != 0)
+ return -EFAULT;
+ cmd_data.handle = cmd_data.fd;
+ if (copy_to_user((void __user *)arg, &cmd_data,
+ sizeof(struct drm_prime_handle)) != 0)
+ return -EFAULT;
+ return 0;
+ }
+ case DRM_IOCTL_GEM_CLOSE:
+ return 0;
+ default:
+ return drm_ioctl(filp, cmd, arg);
+ }
+}
+
static const struct file_operations fops = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
- .unlocked_ioctl = drm_ioctl,
+ .unlocked_ioctl = msm_drm_ioctl,
.poll = drm_poll,
.read = drm_read,
.write = msm_drm_write,
diff --git a/drivers/gpu/drm/msm/dba_bridge.c b/drivers/gpu/drm/msm/dba_bridge.c
index 49999ba468e5..62294ddf8034 100644
--- a/drivers/gpu/drm/msm/dba_bridge.c
+++ b/drivers/gpu/drm/msm/dba_bridge.c
@@ -51,6 +51,7 @@ struct dba_bridge {
u32 num_of_input_lanes;
bool pluggable;
u32 panel_count;
+ bool cont_splash_enabled;
};
#define to_dba_bridge(x) container_of((x), struct dba_bridge, base)
@@ -123,10 +124,18 @@ error:
static void _dba_bridge_pre_enable(struct drm_bridge *bridge)
{
+ struct dba_bridge *d_bridge;
+
if (!bridge) {
SDE_ERROR("Invalid params\n");
return;
}
+
+ d_bridge = to_dba_bridge(bridge);
+
+ /* Skip power_on calling when splash is enabled in bootloader. */
+ if ((d_bridge->ops.power_on) && (!d_bridge->cont_splash_enabled))
+ d_bridge->ops.power_on(d_bridge->dba_ctx, true, 0);
}
static void _dba_bridge_enable(struct drm_bridge *bridge)
@@ -186,7 +195,8 @@ static void _dba_bridge_enable(struct drm_bridge *bridge)
video_cfg.scaninfo, video_cfg.ar, video_cfg.vic);
}
- if (d_bridge->ops.video_on) {
+ /* Skip video_on calling if splash is enabled in bootloader. */
+ if ((d_bridge->ops.video_on) && (!d_bridge->cont_splash_enabled)) {
rc = d_bridge->ops.video_on(d_bridge->dba_ctx, true,
&video_cfg, 0);
if (rc)
@@ -318,6 +328,7 @@ struct drm_bridge *dba_bridge_init(struct drm_device *dev,
bridge->panel_count = data->panel_count;
bridge->base.funcs = &_dba_bridge_ops;
bridge->base.encoder = encoder;
+ bridge->cont_splash_enabled = data->cont_splash_enabled;
rc = drm_bridge_attach(dev, &bridge->base);
if (rc) {
@@ -333,7 +344,10 @@ struct drm_bridge *dba_bridge_init(struct drm_device *dev,
encoder->bridge = &bridge->base;
}
- if (!bridge->pluggable) {
+ /* If early splash has enabled bridge chip in bootloader,
+ * below call should be skipped.
+ */
+ if (!bridge->pluggable && !bridge->cont_splash_enabled) {
if (bridge->ops.power_on)
bridge->ops.power_on(bridge->dba_ctx, true, 0);
if (bridge->ops.check_hpd)
diff --git a/drivers/gpu/drm/msm/dba_bridge.h b/drivers/gpu/drm/msm/dba_bridge.h
index 5562d2b2aef9..edc130f92257 100644
--- a/drivers/gpu/drm/msm/dba_bridge.h
+++ b/drivers/gpu/drm/msm/dba_bridge.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -44,6 +44,7 @@ struct dba_bridge_init {
struct drm_bridge *precede_bridge;
bool pluggable;
u32 panel_count;
+ bool cont_splash_enabled;
};
/**
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
index fb9617ce572a..b1cd666f8be4 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1089,7 +1089,7 @@ error:
static int dsi_enable_ulps(struct dsi_ctrl *dsi_ctrl)
{
int rc = 0;
- u32 lanes;
+ u32 lanes = 0;
u32 ulps_lanes;
if (dsi_ctrl->host_config.panel_mode == DSI_OP_CMD_MODE)
@@ -1598,7 +1598,7 @@ exit:
*
* Return: error code.
*/
-int dsi_ctrl_host_init(struct dsi_ctrl *dsi_ctrl)
+int dsi_ctrl_host_init(struct dsi_ctrl *dsi_ctrl, bool cont_splash_enabled)
{
int rc = 0;
@@ -1608,44 +1608,48 @@ int dsi_ctrl_host_init(struct dsi_ctrl *dsi_ctrl)
}
mutex_lock(&dsi_ctrl->ctrl_lock);
- rc = dsi_ctrl_check_state(dsi_ctrl, DSI_CTRL_OP_HOST_INIT, 0x1);
- if (rc) {
- pr_err("[DSI_%d] Controller state check failed, rc=%d\n",
- dsi_ctrl->index, rc);
- goto error;
- }
+ if (!cont_splash_enabled) {
+ rc = dsi_ctrl_check_state(
+ dsi_ctrl, DSI_CTRL_OP_HOST_INIT, 0x1);
+ if (rc) {
+ pr_err("[DSI_%d] Ctrl state check failed, rc=%d\n",
+ dsi_ctrl->index, rc);
+ goto error;
+ }
- dsi_ctrl->hw.ops.setup_lane_map(&dsi_ctrl->hw,
+ dsi_ctrl->hw.ops.setup_lane_map(&dsi_ctrl->hw,
&dsi_ctrl->host_config.lane_map);
- dsi_ctrl->hw.ops.host_setup(&dsi_ctrl->hw,
+ dsi_ctrl->hw.ops.host_setup(&dsi_ctrl->hw,
&dsi_ctrl->host_config.common_config);
- if (dsi_ctrl->host_config.panel_mode == DSI_OP_CMD_MODE) {
- dsi_ctrl->hw.ops.cmd_engine_setup(&dsi_ctrl->hw,
+ if (dsi_ctrl->host_config.panel_mode == DSI_OP_CMD_MODE) {
+ dsi_ctrl->hw.ops.cmd_engine_setup(&dsi_ctrl->hw,
&dsi_ctrl->host_config.common_config,
&dsi_ctrl->host_config.u.cmd_engine);
- dsi_ctrl->hw.ops.setup_cmd_stream(&dsi_ctrl->hw,
+ dsi_ctrl->hw.ops.setup_cmd_stream(&dsi_ctrl->hw,
dsi_ctrl->host_config.video_timing.h_active,
dsi_ctrl->host_config.video_timing.h_active * 3,
dsi_ctrl->host_config.video_timing.v_active,
0x0);
- } else {
- dsi_ctrl->hw.ops.video_engine_setup(&dsi_ctrl->hw,
+ } else {
+ dsi_ctrl->hw.ops.video_engine_setup(&dsi_ctrl->hw,
&dsi_ctrl->host_config.common_config,
&dsi_ctrl->host_config.u.video_engine);
- dsi_ctrl->hw.ops.set_video_timing(&dsi_ctrl->hw,
+ dsi_ctrl->hw.ops.set_video_timing(&dsi_ctrl->hw,
&dsi_ctrl->host_config.video_timing);
+ }
}
-
-
dsi_ctrl->hw.ops.enable_status_interrupts(&dsi_ctrl->hw, 0x0);
dsi_ctrl->hw.ops.enable_error_interrupts(&dsi_ctrl->hw, 0x0);
- /* Perform a soft reset before enabling dsi controller */
- dsi_ctrl->hw.ops.soft_reset(&dsi_ctrl->hw);
+ /* Perform a soft reset before enabling dsi controller
+ * But skip the reset if dsi is enabled in bootloader.
+ */
+ if (!cont_splash_enabled)
+ dsi_ctrl->hw.ops.soft_reset(&dsi_ctrl->hw);
pr_debug("[DSI_%d]Host initialization complete\n", dsi_ctrl->index);
dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_HOST_INIT, 0x1);
error:
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
index 993a35cbf84a..c343c41eb8e1 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2016, 2018 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -331,6 +331,7 @@ int dsi_ctrl_phy_sw_reset(struct dsi_ctrl *dsi_ctrl);
/**
* dsi_ctrl_host_init() - Initialize DSI host hardware.
* @dsi_ctrl: DSI controller handle.
+ * @cont_splash_enabled: Flag for DSI splash enabled in bootloader.
*
* Initializes DSI controller hardware with host configuration provided by
* dsi_ctrl_update_host_config(). Initialization can be performed only during
@@ -339,7 +340,7 @@ int dsi_ctrl_phy_sw_reset(struct dsi_ctrl *dsi_ctrl);
*
* Return: error code.
*/
-int dsi_ctrl_host_init(struct dsi_ctrl *dsi_ctrl);
+int dsi_ctrl_host_init(struct dsi_ctrl *dsi_ctrl, bool cont_splash_enabled);
/**
* dsi_ctrl_host_deinit() - De-Initialize DSI host hardware.
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_1_4.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_1_4.c
index 2f0f6c2f1b01..caba50832cca 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_1_4.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_1_4.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -565,7 +565,7 @@ u32 dsi_ctrl_hw_14_get_cmd_read_data(struct dsi_ctrl_hw *ctrl,
u32 read_cnt;
u32 rx_byte = 0;
u32 repeated_bytes = 0;
- u8 reg[16];
+ u8 reg[16] = {0};
u32 pkt_size = 0;
int buf_offset = read_offset;
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
index f2412daee8b6..1e5681a77a6e 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -174,6 +174,11 @@ static int dsi_display_ctrl_power_on(struct dsi_display *display)
int i;
struct dsi_display_ctrl *ctrl;
+ if (display->cont_splash_enabled) {
+ pr_debug("skip ctrl power on\n");
+ return rc;
+ }
+
/* Sequence does not matter for split dsi usecases */
for (i = 0; i < display->ctrl_count; i++) {
@@ -231,6 +236,12 @@ static int dsi_display_phy_power_on(struct dsi_display *display)
int i;
struct dsi_display_ctrl *ctrl;
+ /* early return for splash enabled case */
+ if (display->cont_splash_enabled) {
+ pr_debug("skip phy power on\n");
+ return rc;
+ }
+
/* Sequence does not matter for split dsi usecases */
for (i = 0; i < display->ctrl_count; i++) {
@@ -287,6 +298,12 @@ static int dsi_display_ctrl_core_clk_on(struct dsi_display *display)
int i;
struct dsi_display_ctrl *m_ctrl, *ctrl;
+ /* early return for splash enabled case */
+ if (display->cont_splash_enabled) {
+ pr_debug("skip core clk on calling\n");
+ return rc;
+ }
+
/*
* In case of split DSI usecases, the clock for master controller should
* be enabled before the other controller. Master controller in the
@@ -329,6 +346,12 @@ static int dsi_display_ctrl_link_clk_on(struct dsi_display *display)
int i;
struct dsi_display_ctrl *m_ctrl, *ctrl;
+ /* early return for splash enabled case */
+ if (display->cont_splash_enabled) {
+ pr_debug("skip ctrl link clk on calling\n");
+ return rc;
+ }
+
/*
* In case of split DSI usecases, the clock for master controller should
* be enabled before the other controller. Master controller in the
@@ -460,7 +483,8 @@ static int dsi_display_ctrl_init(struct dsi_display *display)
for (i = 0 ; i < display->ctrl_count; i++) {
ctrl = &display->ctrl[i];
- rc = dsi_ctrl_host_init(ctrl->ctrl);
+ rc = dsi_ctrl_host_init(ctrl->ctrl,
+ display->cont_splash_enabled);
if (rc) {
pr_err("[%s] failed to init host_%d, rc=%d\n",
display->name, i, rc);
@@ -720,7 +744,7 @@ static int dsi_display_phy_enable(struct dsi_display *display)
rc = dsi_phy_enable(m_ctrl->phy,
&display->config,
m_src,
- true);
+ true, display->cont_splash_enabled);
if (rc) {
pr_err("[%s] failed to enable DSI PHY, rc=%d\n",
display->name, rc);
@@ -735,7 +759,7 @@ static int dsi_display_phy_enable(struct dsi_display *display)
rc = dsi_phy_enable(ctrl->phy,
&display->config,
DSI_PLL_SOURCE_NON_NATIVE,
- true);
+ true, display->cont_splash_enabled);
if (rc) {
pr_err("[%s] failed to enable DSI PHY, rc=%d\n",
display->name, rc);
@@ -848,6 +872,11 @@ static int dsi_display_phy_sw_reset(struct dsi_display *display)
int i;
struct dsi_display_ctrl *m_ctrl, *ctrl;
+ if (display->cont_splash_enabled) {
+ pr_debug("skip phy sw reset\n");
+ return 0;
+ }
+
m_ctrl = &display->ctrl[display->cmd_master_idx];
rc = dsi_ctrl_phy_sw_reset(m_ctrl->ctrl);
@@ -1481,8 +1510,8 @@ static int dsi_display_dfps_update(struct dsi_display *display,
m_ctrl = &display->ctrl[display->clk_master_idx];
rc = dsi_ctrl_async_timing_update(m_ctrl->ctrl, timing);
if (rc) {
- pr_err("[%s] failed to dfps update host_%d, rc=%d\n",
- display->name, i, rc);
+ pr_err("[%s] failed to dfps update clock master, rc=%d\n",
+ display->name, rc);
goto error;
}
@@ -1748,6 +1777,45 @@ static int _dsi_display_dev_deinit(struct dsi_display *display)
return rc;
}
+/*
+ * _dsi_display_config_ctrl_for_splash
+ *
+ * Config ctrl engine for DSI display.
+ * @display: Handle to the display
+ * Returns: Zero on success
+ */
+static int _dsi_display_config_ctrl_for_splash(struct dsi_display *display)
+{
+ int rc = 0;
+
+ if (!display) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ if (display->config.panel_mode == DSI_OP_VIDEO_MODE) {
+ rc = dsi_display_vid_engine_enable(display);
+ if (rc) {
+ pr_err("[%s]failed to enable video engine, rc=%d\n",
+ display->name, rc);
+ goto error_out;
+ }
+ } else if (display->config.panel_mode == DSI_OP_CMD_MODE) {
+ rc = dsi_display_cmd_engine_enable(display);
+ if (rc) {
+ pr_err("[%s]failed to enable cmd engine, rc=%d\n",
+ display->name, rc);
+ goto error_out;
+ }
+ } else {
+ pr_err("[%s] Invalid configuration\n", display->name);
+ rc = -EINVAL;
+ }
+
+error_out:
+ return rc;
+}
+
/**
* dsi_display_bind - bind dsi device with controlling device
* @dev: Pointer to base of platform device
@@ -2141,6 +2209,8 @@ int dsi_display_drm_bridge_init(struct dsi_display *display,
init_data.num_of_input_lanes = num_of_lanes;
init_data.precede_bridge = precede_bridge;
init_data.panel_count = display->panel_count;
+ init_data.cont_splash_enabled =
+ display->cont_splash_enabled;
dba_bridge = dba_bridge_init(display->drm_dev, enc,
&init_data);
if (IS_ERR_OR_NULL(dba_bridge)) {
@@ -2451,26 +2521,28 @@ int dsi_display_prepare(struct dsi_display *display)
mutex_lock(&display->display_lock);
- for (i = 0; i < display->panel_count; i++) {
- rc = dsi_panel_pre_prepare(display->panel[i]);
- if (rc) {
- SDE_ERROR("[%s] panel pre-prepare failed, rc=%d\n",
- display->name, rc);
- goto error_panel_post_unprep;
+ if (!display->cont_splash_enabled) {
+ for (i = 0; i < display->panel_count; i++) {
+ rc = dsi_panel_pre_prepare(display->panel[i]);
+ if (rc) {
+ SDE_ERROR("[%s]pre-prepare failed, rc=%d\n",
+ display->name, rc);
+ goto error_panel_post_unprep;
+ }
}
}
rc = dsi_display_ctrl_power_on(display);
if (rc) {
pr_err("[%s] failed to power on dsi controllers, rc=%d\n",
- display->name, rc);
+ display->name, rc);
goto error_panel_post_unprep;
}
rc = dsi_display_phy_power_on(display);
if (rc) {
pr_err("[%s] failed to power on dsi phy, rc = %d\n",
- display->name, rc);
+ display->name, rc);
goto error_ctrl_pwr_off;
}
@@ -2497,21 +2569,21 @@ int dsi_display_prepare(struct dsi_display *display)
rc = dsi_display_ctrl_init(display);
if (rc) {
pr_err("[%s] failed to setup DSI controller, rc=%d\n",
- display->name, rc);
+ display->name, rc);
goto error_phy_disable;
}
rc = dsi_display_ctrl_link_clk_on(display);
if (rc) {
pr_err("[%s] failed to enable DSI link clocks, rc=%d\n",
- display->name, rc);
+ display->name, rc);
goto error_ctrl_deinit;
}
rc = dsi_display_ctrl_host_enable(display);
if (rc) {
pr_err("[%s] failed to enable DSI host, rc=%d\n",
- display->name, rc);
+ display->name, rc);
goto error_ctrl_link_off;
}
@@ -2519,11 +2591,10 @@ int dsi_display_prepare(struct dsi_display *display)
rc = dsi_panel_prepare(display->panel[j]);
if (rc) {
SDE_ERROR("[%s] panel prepare failed, rc=%d\n",
- display->name, rc);
+ display->name, rc);
goto error_panel_unprep;
}
}
-
goto error;
error_panel_unprep:
@@ -2559,6 +2630,12 @@ int dsi_display_enable(struct dsi_display *display)
return -EINVAL;
}
+ if (display->cont_splash_enabled) {
+ _dsi_display_config_ctrl_for_splash(display);
+ display->cont_splash_enabled = false;
+ return 0;
+ }
+
mutex_lock(&display->display_lock);
for (i = 0; i < display->panel_count; i++) {
@@ -2755,6 +2832,30 @@ int dsi_display_unprepare(struct dsi_display *display)
return rc;
}
+int dsi_dsiplay_setup_splash_resource(struct dsi_display *display)
+{
+ int ret = 0, i = 0;
+ struct dsi_display_ctrl *ctrl;
+
+ if (!display)
+ return -EINVAL;
+
+ for (i = 0; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ if (!ctrl)
+ return -EINVAL;
+
+ ret = dsi_ctrl_set_power_state(ctrl->ctrl,
+ DSI_CTRL_POWER_LINK_CLK_ON);
+ if (ret) {
+ SDE_ERROR("calling dsi_ctrl_set_power_state failed\n");
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
static int __init dsi_display_register(void)
{
dsi_phy_drv_register();
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
index 210b8d00850b..3723f19fd0e7 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -158,6 +158,8 @@ struct dsi_display {
/* DEBUG FS */
struct dentry *root;
+
+ bool cont_splash_enabled;
};
int dsi_display_dev_probe(struct platform_device *pdev);
@@ -338,4 +340,15 @@ int dsi_display_clock_gate(struct dsi_display *display, bool enable);
int dsi_dispaly_static_frame(struct dsi_display *display, bool enable);
int dsi_display_set_backlight(void *display, u32 bl_lvl);
+
+/**
+ * dsi_dsiplay_setup_splash_resource
+ * @display: Handle to display.
+ *
+ * Setup DSI splash resource to avoid reset and glitch if DSI is enabled
+ * in bootloder.
+ *
+ * Return: error code.
+ */
+int dsi_dsiplay_setup_splash_resource(struct dsi_display *display);
#endif /* _DSI_DISPLAY_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
index 309401eb3093..35000d7eb12a 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -439,7 +439,7 @@ int dsi_connector_get_modes(struct drm_connector *connector,
rc = dsi_display_get_modes(display, NULL, &count);
if (rc) {
pr_err("failed to get num of modes, rc=%d\n", rc);
- goto error;
+ goto end;
}
size = count * sizeof(*modes);
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c
index 1ccbbe7df573..da3b3b548e5f 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016, 2018 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -721,9 +721,10 @@ error:
* Return: error code.
*/
int dsi_phy_enable(struct msm_dsi_phy *phy,
- struct dsi_host_config *config,
- enum dsi_phy_pll_source pll_source,
- bool skip_validation)
+ struct dsi_host_config *config,
+ enum dsi_phy_pll_source pll_source,
+ bool skip_validation,
+ bool cont_splash_enabled)
{
int rc = 0;
@@ -758,7 +759,8 @@ int dsi_phy_enable(struct msm_dsi_phy *phy,
goto error_disable_clks;
}
- dsi_phy_enable_hw(phy);
+ if (!cont_splash_enabled)
+ dsi_phy_enable_hw(phy);
error_disable_clks:
rc = dsi_clk_enable_core_clks(&phy->clks.core_clks, false);
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.h b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.h
index 6c31bfa3ea00..aa21d0b347e8 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016, 2018 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -157,9 +157,10 @@ int dsi_phy_set_power_state(struct msm_dsi_phy *dsi_phy, bool enable);
* Return: error code.
*/
int dsi_phy_enable(struct msm_dsi_phy *dsi_phy,
- struct dsi_host_config *config,
- enum dsi_phy_pll_source pll_source,
- bool skip_validation);
+ struct dsi_host_config *config,
+ enum dsi_phy_pll_source pll_source,
+ bool skip_validation,
+ bool cont_splash_enabled);
/**
* dsi_phy_disable() - disable DSI PHY hardware.
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
index 6edcd6f57e70..f9bed1058f38 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.c
+++ b/drivers/gpu/drm/msm/dsi/dsi.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015, 2018 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -193,6 +193,9 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
struct drm_bridge *ext_bridge;
int ret, i;
+ if (!msm_dsi)
+ return -EINVAL;
+
if (WARN_ON(!encoders[MSM_DSI_VIDEO_ENCODER_ID] ||
!encoders[MSM_DSI_CMD_ENCODER_ID]))
return -EINVAL;
@@ -246,19 +249,17 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
return 0;
fail:
- if (msm_dsi) {
- /* bridge/connector are normally destroyed by drm: */
- if (msm_dsi->bridge) {
- msm_dsi_manager_bridge_destroy(msm_dsi->bridge);
- msm_dsi->bridge = NULL;
- }
+ /* bridge/connector are normally destroyed by drm: */
+ if (msm_dsi->bridge) {
+ msm_dsi_manager_bridge_destroy(msm_dsi->bridge);
+ msm_dsi->bridge = NULL;
+ }
- /* don't destroy connector if we didn't make it */
- if (msm_dsi->connector && !msm_dsi->external_bridge)
- msm_dsi->connector->funcs->destroy(msm_dsi->connector);
+ /* don't destroy connector if we didn't make it */
+ if (msm_dsi->connector && !msm_dsi->external_bridge)
+ msm_dsi->connector->funcs->destroy(msm_dsi->connector);
- msm_dsi->connector = NULL;
- }
+ msm_dsi->connector = NULL;
return ret;
}
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index e2b8deda46c2..845e4ad7b464 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015,2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015,2017-2018 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -898,7 +898,7 @@ static int dsi_cmd_dma_add(struct drm_gem_object *tx_gem,
data = msm_gem_vaddr(tx_gem);
- if (IS_ERR(data)) {
+ if (IS_ERR_OR_NULL(data)) {
ret = PTR_ERR(data);
pr_err("%s: get vaddr failed, %d\n", __func__, ret);
return ret;
@@ -1006,7 +1006,7 @@ static int dsi_cmd_dma_rx(struct msm_dsi_host *msm_host,
u32 *lp, *temp, data;
int i, j = 0, cnt;
u32 read_cnt;
- u8 reg[16];
+ u8 reg[16] = {0};
int repeated_bytes = 0;
int buf_offset = buf - msm_host->rx_buf;
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index 2091b748abbb..5d20e17d97d2 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015,2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015,2017-2018 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -618,16 +618,26 @@ fail:
struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id)
{
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
- struct drm_device *dev = msm_dsi->dev;
+ struct drm_device *dev;
struct drm_encoder *encoder;
struct drm_bridge *int_bridge, *ext_bridge;
struct drm_connector *connector;
struct list_head *connector_list;
+ if (!msm_dsi)
+ return ERR_PTR(-EINVAL);
+
+ dev = msm_dsi->dev;
+
int_bridge = msm_dsi->bridge;
ext_bridge = msm_dsi->external_bridge =
msm_dsi_host_get_bridge(msm_dsi->host);
+ if (!int_bridge || !ext_bridge) {
+ pr_err("%s: failed to get bridge info\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+
/*
* HACK: we may not know the external DSI bridge device's mode
* flags here. We'll get to know them only when the device
@@ -797,7 +807,7 @@ int msm_dsi_manager_register(struct msm_dsi *msm_dsi)
int id = msm_dsi->id;
int ret;
- if (id > DSI_MAX) {
+ if (id >= DSI_MAX) {
pr_err("%s: invalid id %d\n", __func__, id);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c
index 3c149f191871..46cc521a09f3 100644
--- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -1215,6 +1215,9 @@ static int _sde_hdmi_gpio_config(struct hdmi *hdmi, bool on)
gpio_free(config->hpd_gpio);
+ if (config->hpd5v_gpio != -1)
+ gpio_free(config->hpd5v_gpio);
+
if (config->mux_en_gpio != -1) {
gpio_set_value_cansleep(config->mux_en_gpio, 0);
gpio_free(config->mux_en_gpio);
@@ -1308,7 +1311,7 @@ static int _sde_hdmi_hpd_enable(struct sde_hdmi *sde_hdmi)
}
}
- if (!sde_kms->splash_info.handoff) {
+ if (!sde_hdmi->cont_splash_enabled) {
sde_hdmi_set_mode(hdmi, false);
_sde_hdmi_phy_reset(hdmi);
sde_hdmi_set_mode(hdmi, true);
@@ -1336,6 +1339,9 @@ static int _sde_hdmi_hpd_enable(struct sde_hdmi *sde_hdmi)
HDMI_HPD_CTRL_ENABLE | hpd_ctrl);
spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+ if (!sde_hdmi->non_pluggable)
+ hdmi->hpd_off = false;
+ SDE_DEBUG("enabled hdmi hpd\n");
return 0;
fail:
@@ -1347,30 +1353,28 @@ int sde_hdmi_core_enable(struct sde_hdmi *sde_hdmi)
struct hdmi *hdmi = sde_hdmi->ctrl.ctrl;
const struct hdmi_platform_config *config = hdmi->config;
struct device *dev = &hdmi->pdev->dev;
- int i, ret;
- struct drm_connector *connector;
- struct msm_drm_private *priv;
- struct sde_kms *sde_kms;
-
- connector = hdmi->connector;
- priv = connector->dev->dev_private;
- sde_kms = to_sde_kms(priv->kms);
+ int i, ret = 0;
for (i = 0; i < config->hpd_reg_cnt; i++) {
ret = regulator_enable(hdmi->hpd_regs[i]);
if (ret) {
SDE_ERROR("failed to enable hpd regulator: %s (%d)\n",
config->hpd_reg_names[i], ret);
+ goto err_regulator_enable;
}
}
ret = pinctrl_pm_select_default_state(dev);
- if (ret)
+ if (ret) {
SDE_ERROR("pinctrl state chg failed: %d\n", ret);
+ goto err_pinctrl_state;
+ }
ret = _sde_hdmi_gpio_config(hdmi, true);
- if (ret)
+ if (ret) {
SDE_ERROR("failed to configure GPIOs: %d\n", ret);
+ goto err_gpio_config;
+ }
for (i = 0; i < config->hpd_clk_cnt; i++) {
if (config->hpd_freq && config->hpd_freq[i]) {
@@ -1385,17 +1389,27 @@ int sde_hdmi_core_enable(struct sde_hdmi *sde_hdmi)
if (ret) {
SDE_ERROR("failed to enable hpd clk: %s (%d)\n",
config->hpd_clk_names[i], ret);
+ goto err_clk_prepare_enable;
}
}
sde_hdmi_set_mode(hdmi, true);
+ goto exit;
- /* Wait for vsync */
- msleep(20);
-
+err_clk_prepare_enable:
+ for (i = 0; i < config->hpd_clk_cnt; i++)
+ clk_disable_unprepare(hdmi->hpd_clks[i]);
+err_gpio_config:
+ _sde_hdmi_gpio_config(hdmi, false);
+err_pinctrl_state:
+ pinctrl_pm_select_sleep_state(dev);
+err_regulator_enable:
+ for (i = 0; i < config->hpd_reg_cnt; i++)
+ regulator_disable(hdmi->hpd_regs[i]);
+exit:
return ret;
}
-static void _sde_hdmi_hdp_disable(struct sde_hdmi *sde_hdmi)
+static void _sde_hdmi_hpd_disable(struct sde_hdmi *sde_hdmi)
{
struct hdmi *hdmi = sde_hdmi->ctrl.ctrl;
const struct hdmi_platform_config *config = hdmi->config;
@@ -1403,6 +1417,11 @@ static void _sde_hdmi_hdp_disable(struct sde_hdmi *sde_hdmi)
int i, ret = 0;
unsigned long flags;
+ if (!sde_hdmi->non_pluggable && hdmi->hpd_off) {
+ pr_warn("hdmi display hpd was already disabled\n");
+ return;
+ }
+
spin_lock_irqsave(&hdmi->reg_lock, flags);
/* Disable HPD interrupt */
hdmi_write(hdmi, REG_HDMI_HPD_CTRL, 0);
@@ -1429,12 +1448,44 @@ static void _sde_hdmi_hdp_disable(struct sde_hdmi *sde_hdmi)
pr_warn("failed to disable hpd regulator: %s (%d)\n",
config->hpd_reg_names[i], ret);
}
+
+ if (!sde_hdmi->non_pluggable)
+ hdmi->hpd_off = true;
+ SDE_DEBUG("disabled hdmi hpd\n");
+}
+
+/**
+ * _sde_hdmi_update_hpd_state() - Update the HDMI HPD clock state
+ *
+ * @state: non-zero to disbale HPD clock, 0 to enable.
+ * return: 0 on success, non-zero in case of failure.
+ *
+ */
+static int
+_sde_hdmi_update_hpd_state(struct sde_hdmi *hdmi_display, u64 state)
+{
+ struct hdmi *hdmi = hdmi_display->ctrl.ctrl;
+ int rc = 0;
+
+ if (hdmi_display->non_pluggable)
+ return 0;
+
+ SDE_DEBUG("changing hdmi hpd state to %llu\n", state);
+
+ if (state == SDE_MODE_HPD_ON) {
+ if (!hdmi->hpd_off)
+ pr_warn("hdmi display hpd was already enabled\n");
+ rc = _sde_hdmi_hpd_enable(hdmi_display);
+ } else
+ _sde_hdmi_hpd_disable(hdmi_display);
+
+ return rc;
}
void sde_hdmi_core_disable(struct sde_hdmi *sde_hdmi)
{
/* HPD contains all the core clock and pwr */
- _sde_hdmi_hdp_disable(sde_hdmi);
+ _sde_hdmi_hpd_disable(sde_hdmi);
}
static void _sde_hdmi_cec_update_phys_addr(struct sde_hdmi *display)
@@ -1664,7 +1715,7 @@ static int _sde_hdmi_ext_disp_init(struct sde_hdmi *display)
const char *phandle = "qcom,msm_ext_disp";
if (!display) {
- SDE_ERROR("[%s]Invalid params\n", display->name);
+ SDE_ERROR("Invalid params\n");
return -EINVAL;
}
@@ -2204,6 +2255,8 @@ int sde_hdmi_set_property(struct drm_connector *connector,
rc = _sde_hdmi_enable_pll_update(display, value);
else if (property_index == CONNECTOR_PROP_PLL_DELTA)
rc = _sde_hdmi_update_pll_delta(display, value);
+ else if (property_index == CONNECTOR_PROP_HPD_OFF)
+ rc = _sde_hdmi_update_hpd_state(display, value);
return rc;
}
@@ -2281,7 +2334,7 @@ int sde_hdmi_connector_pre_deinit(struct drm_connector *connector,
return -EINVAL;
}
- _sde_hdmi_hdp_disable(sde_hdmi);
+ _sde_hdmi_hpd_disable(sde_hdmi);
return 0;
}
@@ -3148,7 +3201,6 @@ int sde_hdmi_drm_init(struct sde_hdmi *display, struct drm_encoder *enc)
struct msm_drm_private *priv = NULL;
struct hdmi *hdmi;
struct platform_device *pdev;
- struct sde_kms *sde_kms;
DBG("");
if (!display || !display->drm_dev || !enc) {
@@ -3174,7 +3226,7 @@ int sde_hdmi_drm_init(struct sde_hdmi *display, struct drm_encoder *enc)
hdmi_audio_infoframe_init(&hdmi->audio.infoframe);
- hdmi->bridge = sde_hdmi_bridge_init(hdmi);
+ hdmi->bridge = sde_hdmi_bridge_init(hdmi, display);
if (IS_ERR(hdmi->bridge)) {
rc = PTR_ERR(hdmi->bridge);
SDE_ERROR("failed to create HDMI bridge: %d\n", rc);
@@ -3214,12 +3266,9 @@ int sde_hdmi_drm_init(struct sde_hdmi *display, struct drm_encoder *enc)
* clocks. This can skip the clock disabling operation in
* clock_late_init when finding clk.count == 1.
*/
- sde_kms = to_sde_kms(priv->kms);
- if (sde_kms->splash_info.handoff) {
+ if (display->cont_splash_enabled) {
sde_hdmi_bridge_power_on(hdmi->bridge);
hdmi->power_on = true;
- } else {
- hdmi->power_on = false;
}
mutex_unlock(&display->display_lock);
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h
index 471472ea23cf..9cf807e829c7 100644
--- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -196,6 +196,8 @@ struct sde_hdmi {
struct dss_io_data io[HDMI_TX_MAX_IO];
/* DEBUG FS */
struct dentry *root;
+
+ bool cont_splash_enabled;
};
/**
@@ -431,10 +433,12 @@ int sde_hdmi_get_property(struct drm_connector *connector,
/**
* sde_hdmi_bridge_init() - init sde hdmi bridge
* @hdmi: Handle to the hdmi.
+ * @display: Handle to the sde_hdmi
*
* Return: struct drm_bridge *.
*/
-struct drm_bridge *sde_hdmi_bridge_init(struct hdmi *hdmi);
+struct drm_bridge *sde_hdmi_bridge_init(struct hdmi *hdmi,
+ struct sde_hdmi *display);
/**
* sde_hdmi_set_mode() - Set HDMI mode API.
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c
index 5a4c4b02c340..bae6b1c84420 100644
--- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -90,6 +90,7 @@ static inline uint32_t SDE_HDMI_VSYNC_TOTAL_F2_V_TOTAL(uint32_t val)
struct sde_hdmi_bridge {
struct drm_bridge base;
struct hdmi *hdmi;
+ struct sde_hdmi *display;
};
#define to_hdmi_bridge(x) container_of(x, struct sde_hdmi_bridge, base)
@@ -124,24 +125,19 @@ static void sde_hdmi_clear_hdr_info(struct drm_bridge *bridge)
connector->hdr_supported = false;
}
-static void _sde_hdmi_bridge_power_on(struct drm_bridge *bridge)
+static int _sde_hdmi_bridge_power_on(struct drm_bridge *bridge)
{
struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
const struct hdmi_platform_config *config = hdmi->config;
- struct sde_connector *c_conn = to_sde_connector(hdmi->connector);
- struct sde_hdmi *display = NULL;
- int i, ret;
-
- if (c_conn)
- display = (struct sde_hdmi *)c_conn->display;
-
- if (display) {
- if (display->non_pluggable) {
- ret = sde_hdmi_core_enable(display);
- if (ret)
- SDE_ERROR("failed to enable HDMI core (%d)\n",
- ret);
+ int i, ret = 0;
+ struct sde_hdmi *display = sde_hdmi_bridge->display;
+
+ if ((display->non_pluggable) && (!hdmi->power_on)) {
+ ret = sde_hdmi_core_enable(display);
+ if (ret) {
+ SDE_ERROR("failed to enable HDMI core (%d)\n", ret);
+ goto err_core_enable;
}
}
@@ -150,15 +146,17 @@ static void _sde_hdmi_bridge_power_on(struct drm_bridge *bridge)
if (ret) {
SDE_ERROR("failed to enable pwr regulator: %s (%d)\n",
config->pwr_reg_names[i], ret);
+ goto err_regulator_enable;
}
}
- if (config->pwr_clk_cnt > 0) {
+ if (config->pwr_clk_cnt > 0 && hdmi->pixclock) {
DRM_DEBUG("pixclock: %lu", hdmi->pixclock);
ret = clk_set_rate(hdmi->pwr_clks[0], hdmi->pixclock);
if (ret) {
- SDE_ERROR("failed to set pixel clk: %s (%d)\n",
- config->pwr_clk_names[0], ret);
+ pr_warn("failed to set pixclock: %s %ld (%d)\n",
+ config->pwr_clk_names[0],
+ hdmi->pixclock, ret);
}
}
@@ -167,18 +165,31 @@ static void _sde_hdmi_bridge_power_on(struct drm_bridge *bridge)
if (ret) {
SDE_ERROR("failed to enable pwr clk: %s (%d)\n",
config->pwr_clk_names[i], ret);
+ goto err_prepare_enable;
}
}
+ goto exit;
+
+err_prepare_enable:
+ for (i = 0; i < config->pwr_clk_cnt; i++)
+ clk_disable_unprepare(hdmi->pwr_clks[i]);
+err_regulator_enable:
+ for (i = 0; i < config->pwr_reg_cnt; i++)
+ regulator_disable(hdmi->pwr_regs[i]);
+err_core_enable:
+ if (display->non_pluggable)
+ sde_hdmi_core_disable(display);
+exit:
+ return ret;
}
-static void _sde_hdmi_bridge_power_off(struct drm_bridge *bridge)
+static int _sde_hdmi_bridge_power_off(struct drm_bridge *bridge)
{
struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
const struct hdmi_platform_config *config = hdmi->config;
- struct sde_connector *c_conn = to_sde_connector(hdmi->connector);
- struct sde_hdmi *display = (struct sde_hdmi *)c_conn->display;
- int i, ret;
+ struct sde_hdmi *display = sde_hdmi_bridge->display;
+ int i, ret = 0;
/* Wait for vsync */
msleep(20);
@@ -188,15 +199,15 @@ static void _sde_hdmi_bridge_power_off(struct drm_bridge *bridge)
for (i = 0; i < config->pwr_reg_cnt; i++) {
ret = regulator_disable(hdmi->pwr_regs[i]);
- if (ret) {
+ if (ret)
SDE_ERROR("failed to disable pwr regulator: %s (%d)\n",
config->pwr_reg_names[i], ret);
- }
}
- if (display->non_pluggable) {
+ if (display->non_pluggable)
sde_hdmi_core_disable(display);
- }
+
+ return ret;
}
static int _sde_hdmi_bridge_ddc_clear_irq(struct hdmi *hdmi,
@@ -490,16 +501,20 @@ static void _sde_hdmi_bridge_pre_enable(struct drm_bridge *bridge)
struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
struct hdmi_phy *phy = hdmi->phy;
- struct sde_connector *c_conn = to_sde_connector(hdmi->connector);
- struct sde_hdmi *display = (struct sde_hdmi *)c_conn->display;
+ struct sde_hdmi *display = sde_hdmi_bridge->display;
DRM_DEBUG("power up");
if (!hdmi->power_on) {
- _sde_hdmi_bridge_power_on(bridge);
+ if (_sde_hdmi_bridge_power_on(bridge)) {
+ DEV_ERR("failed to power on bridge\n");
+ return;
+ }
hdmi->power_on = true;
}
+ _sde_hdmi_bridge_setup_scrambler(hdmi, &display->mode);
+
if (phy)
phy->funcs->powerup(phy, hdmi->pixclock);
@@ -572,8 +587,7 @@ static void _sde_hdmi_bridge_enable(struct drm_bridge *bridge)
{
struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
- struct sde_connector *c_conn = to_sde_connector(hdmi->connector);
- struct sde_hdmi *display = (struct sde_hdmi *)c_conn->display;
+ struct sde_hdmi *display = sde_hdmi_bridge->display;
/* need to update hdcp info here to ensure right HDCP support*/
sde_hdmi_update_hdcp_info(hdmi->connector);
@@ -588,9 +602,7 @@ static void _sde_hdmi_bridge_enable(struct drm_bridge *bridge)
static void _sde_hdmi_bridge_disable(struct drm_bridge *bridge)
{
struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
- struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
- struct sde_connector *c_conn = to_sde_connector(hdmi->connector);
- struct sde_hdmi *display = (struct sde_hdmi *)c_conn->display;
+ struct sde_hdmi *display = sde_hdmi_bridge->display;
mutex_lock(&display->display_lock);
@@ -610,8 +622,7 @@ static void _sde_hdmi_bridge_post_disable(struct drm_bridge *bridge)
struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
struct hdmi_phy *phy = hdmi->phy;
- struct sde_connector *c_conn = to_sde_connector(hdmi->connector);
- struct sde_hdmi *display = (struct sde_hdmi *)c_conn->display;
+ struct sde_hdmi *display = sde_hdmi_bridge->display;
sde_hdmi_notify_clients(display, display->connected);
@@ -835,20 +846,13 @@ static void _sde_hdmi_bridge_mode_set(struct drm_bridge *bridge,
{
struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
- struct drm_connector *connector = hdmi->connector;
- struct sde_connector *c_conn = to_sde_connector(connector);
- struct sde_hdmi *display = (struct sde_hdmi *)c_conn->display;
+ struct sde_hdmi *display = sde_hdmi_bridge->display;
int hstart, hend, vstart, vend;
uint32_t frame_ctrl;
u32 div = 0;
mode = adjusted_mode;
- if (display->non_pluggable && !hdmi->power_on) {
- if (sde_hdmi_core_enable(display))
- pr_err("mode set enable core failured\n");
- }
-
display->dc_enable = mode->private_flags &
(MSM_MODE_FLAG_RGB444_DC_ENABLE |
MSM_MODE_FLAG_YUV420_DC_ENABLE);
@@ -923,11 +927,7 @@ static void _sde_hdmi_bridge_mode_set(struct drm_bridge *bridge,
}
_sde_hdmi_save_mode(hdmi, mode);
- _sde_hdmi_bridge_setup_scrambler(hdmi, mode);
_sde_hdmi_bridge_setup_deep_color(hdmi);
- if (display->non_pluggable && !hdmi->power_on) {
- sde_hdmi_core_disable(display);
- }
}
static bool _sde_hdmi_bridge_mode_fixup(struct drm_bridge *bridge,
@@ -961,7 +961,8 @@ static const struct drm_bridge_funcs _sde_hdmi_bridge_funcs = {
/* initialize bridge */
-struct drm_bridge *sde_hdmi_bridge_init(struct hdmi *hdmi)
+struct drm_bridge *sde_hdmi_bridge_init(struct hdmi *hdmi,
+ struct sde_hdmi *display)
{
struct drm_bridge *bridge = NULL;
struct sde_hdmi_bridge *sde_hdmi_bridge;
@@ -975,6 +976,7 @@ struct drm_bridge *sde_hdmi_bridge_init(struct hdmi *hdmi)
}
sde_hdmi_bridge->hdmi = hdmi;
+ sde_hdmi_bridge->display = display;
bridge = &sde_hdmi_bridge->base;
bridge->funcs = &_sde_hdmi_bridge_funcs;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index 8ca7b36ee0c8..9a0733bf81ff 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -50,6 +50,9 @@ struct hdmi {
const struct hdmi_platform_config *config;
+ /* hpd state: */
+ bool hpd_off;
+
/* audio state: */
struct hdmi_audio audio;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index 422a9a78f3b2..65e085fd2b6a 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2015, 2018 The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -242,7 +242,7 @@ static void blend_setup(struct drm_crtc *crtc)
/* The reset for blending */
for (i = STAGE0; i <= STAGE_MAX; i++) {
- if (!pstates[i])
+ if (!pstates[i] || !pstates[i]->base.fb)
continue;
format = to_mdp_format(
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index 873ab11d34d2..d751625bbfd7 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2014-2015 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2014-2015, 2018 The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -193,7 +193,8 @@ static void mdp5_plane_reset(struct drm_plane *plane)
kfree(to_mdp5_plane_state(plane->state));
mdp5_state = kzalloc(sizeof(*mdp5_state), GFP_KERNEL);
-
+ if (!mdp5_state)
+ return;
/* assign default blend parameters */
mdp5_state->alpha = 255;
mdp5_state->premultiplied = 0;
@@ -218,8 +219,10 @@ mdp5_plane_duplicate_state(struct drm_plane *plane)
mdp5_state = kmemdup(to_mdp5_plane_state(plane->state),
sizeof(*mdp5_state), GFP_KERNEL);
+ if (!mdp5_state)
+ return NULL;
- if (mdp5_state && mdp5_state->base.fb)
+ if (mdp5_state->base.fb)
drm_framebuffer_reference(mdp5_state->base.fb);
mdp5_state->mode_changed = false;
@@ -684,14 +687,21 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
bool vflip, hflip;
unsigned long flags;
int ret;
+ const struct msm_format *msm_fmt;
+ msm_fmt = msm_framebuffer_format(fb);
nplanes = drm_format_num_planes(fb->pixel_format);
/* bad formats should already be rejected: */
if (WARN_ON(nplanes > pipe2nclients(pipe)))
return -EINVAL;
- format = to_mdp_format(msm_framebuffer_format(fb));
+ if (!msm_fmt) {
+ pr_err("invalid format");
+ return -EINVAL;
+ }
+
+ format = to_mdp_format(msm_fmt);
pix_format = format->base.pixel_format;
/* src values are in Q16 fixed point, convert to integer: */
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index 0c119ec5d97c..7d40f38092d4 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2014 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -562,6 +562,11 @@ int msm_atomic_commit(struct drm_device *dev,
struct msm_commit *commit;
int i, ret;
+ if (!priv || priv->shutdown_in_progress) {
+ DRM_ERROR("priv is null or shutdwon is in-progress\n");
+ return -EINVAL;
+ }
+
SDE_ATRACE_BEGIN("atomic_commit");
ret = drm_atomic_helper_prepare_planes(dev, state);
if (ret) {
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 6f968e93d959..b57663013dcb 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -439,6 +439,7 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
struct msm_kms *kms;
struct sde_dbg_power_ctrl dbg_power_ctrl = { NULL };
int ret, i;
+ struct sched_param param;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) {
@@ -532,7 +533,12 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
goto fail;
}
}
-
+ /**
+ * this priority was found during empiric testing to have appropriate
+ * realtime scheduling to process display updates and interact with
+ * other real time and normal priority task
+ */
+ param.sched_priority = 16;
/* initialize commit thread structure */
for (i = 0; i < priv->num_crtcs; i++) {
priv->disp_thread[i].crtc_id = priv->crtcs[i]->base.id;
@@ -543,6 +549,11 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
&priv->disp_thread[i].worker,
"crtc_commit:%d",
priv->disp_thread[i].crtc_id);
+ ret = sched_setscheduler(priv->disp_thread[i].thread,
+ SCHED_FIFO, &param);
+ if (ret)
+ pr_warn("display thread priority update failed: %d\n",
+ ret);
if (IS_ERR(priv->disp_thread[i].thread)) {
dev_err(dev->dev, "failed to create kthread\n");
@@ -2199,6 +2210,28 @@ static const struct platform_device_id msm_id[] = {
{ }
};
+static void msm_pdev_shutdown(struct platform_device *pdev)
+{
+ struct drm_device *ddev = platform_get_drvdata(pdev);
+ struct msm_drm_private *priv = NULL;
+
+ if (!ddev) {
+ DRM_ERROR("invalid drm device node\n");
+ return;
+ }
+
+ priv = ddev->dev_private;
+ if (!priv) {
+ DRM_ERROR("invalid msm drm private node\n");
+ return;
+ }
+
+ msm_lastclose(ddev);
+
+ /* set this after lastclose to allow kickoff from lastclose */
+ priv->shutdown_in_progress = true;
+}
+
static const struct of_device_id dt_match[] = {
{ .compatible = "qcom,mdp" }, /* mdp4 */
{ .compatible = "qcom,sde-kms" }, /* sde */
@@ -2209,6 +2242,7 @@ MODULE_DEVICE_TABLE(of, dt_match);
static struct platform_driver msm_platform_driver = {
.probe = msm_pdev_probe,
.remove = msm_pdev_remove,
+ .shutdown = msm_pdev_shutdown,
.driver = {
.name = "msm_drm",
.of_match_table = dt_match,
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 25dc5f9ef561..e0ac0582e791 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -164,6 +164,7 @@ enum msm_mdp_conn_property {
CONNECTOR_PROP_TOPOLOGY_NAME,
CONNECTOR_PROP_TOPOLOGY_CONTROL,
CONNECTOR_PROP_LP,
+ CONNECTOR_PROP_HPD_OFF,
/* total # of properties */
CONNECTOR_PROP_COUNT
@@ -374,6 +375,9 @@ struct msm_drm_private {
/* list of clients waiting for events */
struct list_head client_event_list;
+
+ /* update the flag when msm driver receives shutdown notification */
+ bool shutdown_in_progress;
};
struct msm_format {
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index a3f0392c2f88..d222fdd69a57 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -33,15 +33,31 @@ static int msm_framebuffer_create_handle(struct drm_framebuffer *fb,
struct drm_file *file_priv,
unsigned int *handle)
{
- struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
+ struct msm_framebuffer *msm_fb;
+
+ if (!fb) {
+ DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
+ return -EINVAL;
+ }
+
+ msm_fb = to_msm_framebuffer(fb);
+
return drm_gem_handle_create(file_priv,
msm_fb->planes[0], handle);
}
static void msm_framebuffer_destroy(struct drm_framebuffer *fb)
{
- struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
- int i, n = drm_format_num_planes(fb->pixel_format);
+ struct msm_framebuffer *msm_fb;
+ int i, n;
+
+ if (!fb) {
+ DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
+ return;
+ }
+
+ msm_fb = to_msm_framebuffer(fb);
+ n = drm_format_num_planes(fb->pixel_format);
DBG("destroy: FB ID: %d (%p)", fb->base.id, fb);
@@ -72,9 +88,16 @@ static const struct drm_framebuffer_funcs msm_framebuffer_funcs = {
#ifdef CONFIG_DEBUG_FS
void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
{
- struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
- int i, n = drm_format_num_planes(fb->pixel_format);
+ struct msm_framebuffer *msm_fb;
+ int i, n;
+ if (!fb) {
+ DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
+ return;
+ }
+
+ msm_fb = to_msm_framebuffer(fb);
+ n = drm_format_num_planes(fb->pixel_format);
seq_printf(m, "fb: %dx%d@%4.4s (%2d, ID:%d)\n",
fb->width, fb->height, (char *)&fb->pixel_format,
fb->refcount.refcount.counter, fb->base.id);
@@ -95,10 +118,17 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
int msm_framebuffer_prepare(struct drm_framebuffer *fb,
struct msm_gem_address_space *aspace)
{
- struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
- int ret, i, n = drm_format_num_planes(fb->pixel_format);
+ struct msm_framebuffer *msm_fb;
+ int ret, i, n;
uint64_t iova;
+ if (!fb) {
+ DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
+ return -EINVAL;
+ }
+
+ msm_fb = to_msm_framebuffer(fb);
+ n = drm_format_num_planes(fb->pixel_format);
for (i = 0; i < n; i++) {
ret = msm_gem_get_iova(msm_fb->planes[i], aspace, &iova);
DBG("FB[%u]: iova[%d]: %08llx (%d)", fb->base.id, i, iova, ret);
@@ -112,8 +142,16 @@ int msm_framebuffer_prepare(struct drm_framebuffer *fb,
void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
struct msm_gem_address_space *aspace)
{
- struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
- int i, n = drm_format_num_planes(fb->pixel_format);
+ struct msm_framebuffer *msm_fb;
+ int i, n;
+
+ if (fb == NULL) {
+ DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
+ return;
+ }
+
+ msm_fb = to_msm_framebuffer(fb);
+ n = drm_format_num_planes(fb->pixel_format);
for (i = 0; i < n; i++)
msm_gem_put_iova(msm_fb->planes[i], aspace);
@@ -123,9 +161,15 @@ void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
struct msm_gem_address_space *aspace, int plane)
{
- struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
+ struct msm_framebuffer *msm_fb;
uint64_t iova;
+ if (!fb) {
+ DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
+ return -EINVAL;
+ }
+
+ msm_fb = to_msm_framebuffer(fb);
if (!msm_fb->planes[plane])
return 0;
@@ -137,7 +181,14 @@ uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane)
{
- struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
+ struct msm_framebuffer *msm_fb;
+
+ if (!fb) {
+ DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
+ return ERR_PTR(-EINVAL);
+ }
+
+ msm_fb = to_msm_framebuffer(fb);
return msm_fb->planes[plane];
}
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 2e528b112e1f..af36b95beadb 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -347,6 +347,10 @@ static int submit_reloc(struct msm_gpu *gpu,
* to do it page-by-page, w/ kmap() if not vmap()d..
*/
ptr = msm_gem_vaddr(&obj->base);
+ if (!ptr) {
+ DRM_ERROR("Invalid format");
+ return -EINVAL;
+ }
if (IS_ERR(ptr)) {
ret = PTR_ERR(ptr);
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index b52c4752c5fe..4586b62401fb 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -237,7 +237,8 @@ static struct device *find_context_bank(const char *name)
/* Get the parent device */
parent = of_find_device_by_node(node->parent);
-
+ if (!parent)
+ return ERR_PTR(-ENODEV);
/* Populate the sub nodes */
of_platform_populate(parent->dev.of_node, NULL, NULL, &parent->dev);
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index cd3a710f8f27..74dea95d90de 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -46,6 +46,10 @@ struct msm_mmu_funcs {
void (*destroy)(struct msm_mmu *mmu);
void (*enable)(struct msm_mmu *mmu);
void (*disable)(struct msm_mmu *mmu);
+ int (*early_splash_map)(struct msm_mmu *mmu, uint64_t iova,
+ struct sg_table *sgt, u32 flags);
+ void (*early_splash_unmap)(struct msm_mmu *mmu, uint64_t iova,
+ struct sg_table *sgt);
int (*set_property)(struct msm_mmu *mmu,
enum iommu_attr attr, void *data);
};
diff --git a/drivers/gpu/drm/msm/msm_prop.c b/drivers/gpu/drm/msm/msm_prop.c
index 10f89de25831..02ed2b7a062f 100644
--- a/drivers/gpu/drm/msm/msm_prop.c
+++ b/drivers/gpu/drm/msm/msm_prop.c
@@ -340,9 +340,16 @@ void msm_property_install_enum(struct msm_property_info *info,
info->property_data[property_idx].default_value = default_value;
info->property_data[property_idx].force_dirty = false;
+ /* select first defined value for enums */
+ if (!is_bitmask)
+ info->property_data[property_idx].default_value =
+ values->type;
+
/* always attach property, if created */
if (*prop) {
- drm_object_attach_property(info->base, *prop, 0);
+ drm_object_attach_property(info->base, *prop,
+ info->property_data
+ [property_idx].default_value);
++info->install_count;
}
}
diff --git a/drivers/gpu/drm/msm/msm_smmu.c b/drivers/gpu/drm/msm/msm_smmu.c
index 4247243055b6..aefbe0988fe5 100644
--- a/drivers/gpu/drm/msm/msm_smmu.c
+++ b/drivers/gpu/drm/msm/msm_smmu.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -120,30 +120,19 @@ static int msm_smmu_map(struct msm_mmu *mmu, uint64_t iova,
{
struct msm_smmu *smmu = to_msm_smmu(mmu);
struct msm_smmu_client *client = msm_smmu_to_client(smmu);
- struct iommu_domain *domain;
int ret;
if (!client || !sgt)
return -EINVAL;
- if (iova != 0) {
- if (!client->mmu_mapping || !client->mmu_mapping->domain)
- return -EINVAL;
-
- domain = client->mmu_mapping->domain;
-
- return iommu_map_sg(domain, iova, sgt->sgl,
- sgt->nents, flags);
- } else {
- if (priv)
- ret = msm_dma_map_sg_lazy(client->dev, sgt->sgl,
- sgt->nents, DMA_BIDIRECTIONAL, priv);
- else
- ret = dma_map_sg(client->dev, sgt->sgl, sgt->nents,
- DMA_BIDIRECTIONAL);
+ if (priv)
+ ret = msm_dma_map_sg_lazy(client->dev, sgt->sgl,
+ sgt->nents, DMA_BIDIRECTIONAL, priv);
+ else
+ ret = dma_map_sg(client->dev, sgt->sgl, sgt->nents,
+ DMA_BIDIRECTIONAL);
- return (ret != sgt->nents) ? -ENOMEM : 0;
- }
+ return (ret != sgt->nents) ? -ENOMEM : 0;
}
static void msm_smmu_unmap(struct msm_mmu *mmu, uint64_t iova,
@@ -160,6 +149,47 @@ static void msm_smmu_unmap(struct msm_mmu *mmu, uint64_t iova,
DMA_BIDIRECTIONAL);
}
+static int msm_smmu_early_splash_map(struct msm_mmu *mmu, uint64_t iova,
+ struct sg_table *sgt, u32 flags)
+{
+ struct msm_smmu *smmu = to_msm_smmu(mmu);
+ struct msm_smmu_client *client = msm_smmu_to_client(smmu);
+ struct iommu_domain *domain;
+
+ if (!client || !sgt)
+ return -EINVAL;
+
+ if (!client->mmu_mapping || !client->mmu_mapping->domain)
+ return -EINVAL;
+
+ domain = client->mmu_mapping->domain;
+
+ return iommu_map_sg(domain, iova, sgt->sgl, sgt->nents, flags);
+}
+
+static void msm_smmu_early_splash_unmap(struct msm_mmu *mmu, uint64_t iova,
+ struct sg_table *sgt)
+{
+ struct msm_smmu *smmu = to_msm_smmu(mmu);
+ struct msm_smmu_client *client = msm_smmu_to_client(smmu);
+ struct iommu_domain *domain;
+ struct scatterlist *sg;
+ size_t len = 0;
+ int unmapped, i = 0;
+
+ if (!client || !client->mmu_mapping || !client->mmu_mapping->domain)
+ return;
+
+ domain = client->mmu_mapping->domain;
+
+ for_each_sg(sgt->sgl, sg, sgt->nents, i)
+ len += sg->length;
+
+ unmapped = iommu_unmap(domain, iova, len);
+ if (unmapped < len)
+ DRM_ERROR("could not unmap iova@%llx\n", iova);
+}
+
static void msm_smmu_destroy(struct msm_mmu *mmu)
{
struct msm_smmu *smmu = to_msm_smmu(mmu);
@@ -199,6 +229,8 @@ static const struct msm_mmu_funcs funcs = {
.map = msm_smmu_map,
.unmap = msm_smmu_unmap,
.destroy = msm_smmu_destroy,
+ .early_splash_map = msm_smmu_early_splash_map,
+ .early_splash_unmap = msm_smmu_early_splash_unmap,
.set_property = msm_smmu_set_property,
};
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c
index 6a741a7ce0f6..1bc3d0a926eb 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.c
+++ b/drivers/gpu/drm/msm/sde/sde_connector.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -46,6 +46,11 @@ static const struct drm_prop_enum_list e_power_mode[] = {
{SDE_MODE_DPMS_OFF, "OFF"},
};
+static const struct drm_prop_enum_list hpd_clock_state[] = {
+ {SDE_MODE_HPD_ON, "ON"},
+ {SDE_MODE_HPD_OFF, "OFF"},
+};
+
int sde_connector_get_info(struct drm_connector *connector,
struct msm_display_info *info)
{
@@ -475,6 +480,9 @@ static int sde_connector_atomic_set_property(struct drm_connector *connector,
_sde_connector_update_power_locked(c_conn);
mutex_unlock(&c_conn->lock);
break;
+ case CONNECTOR_PROP_HPD_OFF:
+ c_conn->hpd_mode = val;
+ break;
default:
break;
}
@@ -565,7 +573,6 @@ void sde_connector_complete_commit(struct drm_connector *connector)
struct drm_device *dev;
struct msm_drm_private *priv;
struct sde_connector *c_conn;
- struct sde_kms *sde_kms;
if (!connector) {
SDE_ERROR("invalid connector\n");
@@ -574,21 +581,20 @@ void sde_connector_complete_commit(struct drm_connector *connector)
dev = connector->dev;
priv = dev->dev_private;
- sde_kms = to_sde_kms(priv->kms);
/* signal connector's retire fence */
sde_fence_signal(&to_sde_connector(connector)->retire_fence, 0);
- /* after first vsync comes,
- * early splash resource should start to be released.
+ /*
+ * After LK totally exits, LK's early splash resource
+ * should be released.
*/
- if (sde_splash_get_lk_complete_status(&sde_kms->splash_info)) {
+ if (sde_splash_get_lk_complete_status(priv->kms)) {
c_conn = to_sde_connector(connector);
- sde_splash_clean_up_free_resource(priv->kms,
- &priv->phandle,
- c_conn->connector_type,
- c_conn->display);
+ sde_splash_free_resource(priv->kms, &priv->phandle,
+ c_conn->connector_type,
+ c_conn->display);
}
}
@@ -819,6 +825,7 @@ struct drm_connector *sde_connector_init(struct drm_device *dev,
c_conn->display = display;
c_conn->dpms_mode = DRM_MODE_DPMS_ON;
+ c_conn->hpd_mode = SDE_MODE_HPD_ON;
c_conn->lp_mode = 0;
c_conn->last_panel_power_mode = SDE_MODE_DPMS_ON;
@@ -946,6 +953,11 @@ struct drm_connector *sde_connector_init(struct drm_device *dev,
ARRAY_SIZE(e_power_mode),
CONNECTOR_PROP_LP, 0);
+ msm_property_install_enum(&c_conn->property_info, "HPD_OFF",
+ DRM_MODE_PROP_ATOMIC, 0, hpd_clock_state,
+ ARRAY_SIZE(hpd_clock_state),
+ CONNECTOR_PROP_HPD_OFF, 0);
+
rc = msm_property_install_get_status(&c_conn->property_info);
if (rc) {
SDE_ERROR("failed to create one or more properties\n");
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.h b/drivers/gpu/drm/msm/sde/sde_connector.h
index 0f563ac25da8..7db98afad713 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.h
+++ b/drivers/gpu/drm/msm/sde/sde_connector.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -22,6 +22,9 @@
#include "sde_kms.h"
#include "sde_fence.h"
+#define SDE_MODE_HPD_ON 0
+#define SDE_MODE_HPD_OFF 1
+
#define SDE_CONNECTOR_NAME_SIZE 16
struct sde_connector;
@@ -207,6 +210,7 @@ struct sde_connector {
struct sde_fence retire_fence;
struct sde_connector_ops ops;
int dpms_mode;
+ u64 hpd_mode;
int lp_mode;
int last_panel_power_mode;
diff --git a/drivers/gpu/drm/msm/sde/sde_core_irq.c b/drivers/gpu/drm/msm/sde/sde_core_irq.c
index 83c8982b2e00..4f7e688650de 100644
--- a/drivers/gpu/drm/msm/sde/sde_core_irq.c
+++ b/drivers/gpu/drm/msm/sde/sde_core_irq.c
@@ -31,23 +31,35 @@ static void sde_core_irq_callback_handler(void *arg, int irq_idx)
struct sde_irq *irq_obj = &sde_kms->irq_obj;
struct sde_irq_callback *cb;
unsigned long irq_flags;
+ bool cb_tbl_error = false;
+ int enable_counts = 0;
pr_debug("irq_idx=%d\n", irq_idx);
- if (list_empty(&irq_obj->irq_cb_tbl[irq_idx]))
- SDE_ERROR("irq_idx=%d has no registered callback\n", irq_idx);
+ spin_lock_irqsave(&sde_kms->irq_obj.cb_lock, irq_flags);
+ if (list_empty(&irq_obj->irq_cb_tbl[irq_idx])) {
+ /* print error outside lock */
+ cb_tbl_error = true;
+ enable_counts = atomic_read(
+ &sde_kms->irq_obj.enable_counts[irq_idx]);
+ }
atomic_inc(&irq_obj->irq_counts[irq_idx]);
/*
* Perform registered function callback
*/
- spin_lock_irqsave(&sde_kms->irq_obj.cb_lock, irq_flags);
list_for_each_entry(cb, &irq_obj->irq_cb_tbl[irq_idx], list)
if (cb->func)
cb->func(cb->arg, irq_idx);
spin_unlock_irqrestore(&sde_kms->irq_obj.cb_lock, irq_flags);
+ if (cb_tbl_error) {
+ SDE_ERROR("irq has no registered callback, idx %d enables %d\n",
+ irq_idx, enable_counts);
+ SDE_EVT32_IRQ(irq_idx, enable_counts, SDE_EVTLOG_ERROR);
+ }
+
/*
* Clear pending interrupt status in HW.
* NOTE: sde_core_irq_callback_handler is protected by top-level
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index 3f5aa4d276c9..6ad1ce16c20a 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -39,10 +39,10 @@
#include "sde_trace.h"
/* default input fence timeout, in ms */
-#define SDE_CRTC_INPUT_FENCE_TIMEOUT 2000
+#define SDE_CRTC_INPUT_FENCE_TIMEOUT 10000
/*
- * The default input fence timeout is 2 seconds while max allowed
+ * The default input fence timeout is 10 seconds while max allowed
* range is 10 seconds. Any value above 10 seconds adds glitches beyond
* tolerance limit.
*/
@@ -240,6 +240,10 @@ static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc,
plane->state->fb->base.id : -1);
format = to_sde_format(msm_framebuffer_format(pstate->base.fb));
+ if (!format) {
+ SDE_ERROR("%s: get sde format failed\n", __func__);
+ return;
+ }
/* blend config update */
if (pstate->stage != SDE_STAGE_BASE) {
@@ -300,6 +304,8 @@ static void _sde_crtc_blend_setup(struct drm_crtc *crtc)
struct sde_crtc_mixer *mixer = sde_crtc->mixers;
struct sde_hw_ctl *ctl;
struct sde_hw_mixer *lm;
+ struct sde_splash_info *sinfo;
+ struct sde_kms *sde_kms = _sde_crtc_get_kms(crtc);
int i;
@@ -310,6 +316,17 @@ static void _sde_crtc_blend_setup(struct drm_crtc *crtc)
return;
}
+ if (!sde_kms) {
+ SDE_ERROR("invalid sde_kms\n");
+ return;
+ }
+
+ sinfo = &sde_kms->splash_info;
+ if (!sinfo) {
+ SDE_ERROR("invalid splash info\n");
+ return;
+ }
+
for (i = 0; i < sde_crtc->num_mixers; i++) {
if (!mixer[i].hw_lm || !mixer[i].hw_ctl) {
SDE_ERROR("invalid lm or ctl assigned to mixer\n");
@@ -319,7 +336,10 @@ static void _sde_crtc_blend_setup(struct drm_crtc *crtc)
mixer[i].flush_mask = 0;
if (mixer[i].hw_ctl->ops.clear_all_blendstages)
mixer[i].hw_ctl->ops.clear_all_blendstages(
- mixer[i].hw_ctl);
+ mixer[i].hw_ctl,
+ sinfo->handoff,
+ sinfo->reserved_pipe_info,
+ MAX_BLOCKS);
}
/* initialize stage cfg */
@@ -346,7 +366,8 @@ static void _sde_crtc_blend_setup(struct drm_crtc *crtc)
mixer[i].flush_mask);
ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx,
- &sde_crtc->stage_cfg, i);
+ &sde_crtc->stage_cfg, i,
+ sinfo->handoff, sinfo->reserved_pipe_info, MAX_BLOCKS);
}
}
@@ -915,6 +936,11 @@ void sde_crtc_commit_kickoff(struct drm_crtc *crtc)
dev = crtc->dev;
sde_crtc = to_sde_crtc(crtc);
sde_kms = _sde_crtc_get_kms(crtc);
+ if (!sde_kms) {
+ SDE_ERROR("invalid sde_kms\n");
+ return;
+ }
+
priv = sde_kms->dev->dev_private;
/*
@@ -1432,43 +1458,67 @@ static int sde_crtc_atomic_check(struct drm_crtc *crtc,
goto end;
}
- /*
- * enforce pipe priority restrictions
+ /* validate source split:
* use pstates sorted by stage to check planes on same stage
* we assume that all pipes are in source split so its valid to compare
* without taking into account left/right mixer placement
*/
for (i = 1; i < cnt; i++) {
struct plane_state *prv_pstate, *cur_pstate;
- int32_t prv_x, cur_x, prv_id, cur_id;
+ struct sde_rect left_rect, right_rect;
+ int32_t left_pid, right_pid;
+ int32_t stage;
prv_pstate = &pstates[i - 1];
cur_pstate = &pstates[i];
if (prv_pstate->stage != cur_pstate->stage)
continue;
- prv_x = prv_pstate->drm_pstate->crtc_x;
- cur_x = cur_pstate->drm_pstate->crtc_x;
- prv_id = prv_pstate->sde_pstate->base.plane->base.id;
- cur_id = cur_pstate->sde_pstate->base.plane->base.id;
+ stage = cur_pstate->stage;
- /*
- * Planes are enumerated in pipe-priority order such that planes
- * with lower drm_id must be left-most in a shared blend-stage
- * when using source split.
+ left_pid = prv_pstate->sde_pstate->base.plane->base.id;
+ POPULATE_RECT(&left_rect, prv_pstate->drm_pstate->crtc_x,
+ prv_pstate->drm_pstate->crtc_y,
+ prv_pstate->drm_pstate->crtc_w,
+ prv_pstate->drm_pstate->crtc_h, false);
+
+ right_pid = cur_pstate->sde_pstate->base.plane->base.id;
+ POPULATE_RECT(&right_rect, cur_pstate->drm_pstate->crtc_x,
+ cur_pstate->drm_pstate->crtc_y,
+ cur_pstate->drm_pstate->crtc_w,
+ cur_pstate->drm_pstate->crtc_h, false);
+
+ if (right_rect.x < left_rect.x) {
+ swap(left_pid, right_pid);
+ swap(left_rect, right_rect);
+ }
+
+ /**
+ * - planes are enumerated in pipe-priority order such that
+ * planes with lower drm_id must be left-most in a shared
+ * blend-stage when using source split.
+ * - planes in source split must be contiguous in width
+ * - planes in source split must have same dest yoff and height
*/
- if (cur_x > prv_x && cur_id < prv_id) {
+ if (right_pid < left_pid) {
SDE_ERROR(
- "shared z_pos %d lower id plane%d @ x%d should be left of plane%d @ x %d\n",
- cur_pstate->stage, cur_id, cur_x,
- prv_id, prv_x);
+ "invalid src split cfg. priority mismatch. stage: %d left: %d right: %d\n",
+ stage, left_pid, right_pid);
rc = -EINVAL;
goto end;
- } else if (cur_x < prv_x && cur_id > prv_id) {
+ } else if (right_rect.x != (left_rect.x + left_rect.w)) {
SDE_ERROR(
- "shared z_pos %d lower id plane%d @ x%d should be left of plane%d @ x %d\n",
- cur_pstate->stage, prv_id, prv_x,
- cur_id, cur_x);
+ "non-contiguous coordinates for src split. stage: %d left: %d - %d right: %d - %d\n",
+ stage, left_rect.x, left_rect.w,
+ right_rect.x, right_rect.w);
+ rc = -EINVAL;
+ goto end;
+ } else if ((left_rect.y != right_rect.y) ||
+ (left_rect.h != right_rect.h)) {
+ SDE_ERROR(
+ "source split at stage: %d. invalid yoff/height: l_y: %d r_y: %d l_h: %d r_h: %d\n",
+ stage, left_rect.y, right_rect.y,
+ left_rect.h, right_rect.h);
rc = -EINVAL;
goto end;
}
@@ -1543,6 +1593,10 @@ static void sde_crtc_install_properties(struct drm_crtc *crtc,
sde_crtc = to_sde_crtc(crtc);
dev = crtc->dev;
sde_kms = _sde_crtc_get_kms(crtc);
+ if (!sde_kms) {
+ SDE_ERROR("invalid sde_kms\n");
+ return;
+ }
info = kzalloc(sizeof(struct sde_kms_info), GFP_KERNEL);
if (!info) {
@@ -1588,8 +1642,18 @@ static void sde_crtc_install_properties(struct drm_crtc *crtc,
sde_kms_info_add_keyint(info, "hw_version", catalog->hwversion);
sde_kms_info_add_keyint(info, "max_linewidth",
catalog->max_mixer_width);
- sde_kms_info_add_keyint(info, "max_blendstages",
- catalog->max_mixer_blendstages);
+
+ /* till now, we can't know which display early RVC will run on.
+ * Not to impact early RVC's layer, we decrease all lm's blend stage.
+ * This should be restored after handoff is done.
+ */
+ if (sde_kms->splash_info.handoff)
+ sde_kms_info_add_keyint(info, "max_blendstages",
+ catalog->max_mixer_blendstages - 1);
+ else
+ sde_kms_info_add_keyint(info, "max_blendstages",
+ catalog->max_mixer_blendstages);
+
if (catalog->qseed_type == SDE_SSPP_SCALER_QSEED2)
sde_kms_info_add_keystr(info, "qseed_type", "qseed2");
if (catalog->qseed_type == SDE_SSPP_SCALER_QSEED3)
@@ -1683,7 +1747,6 @@ static int sde_crtc_atomic_get_property(struct drm_crtc *crtc,
struct sde_crtc *sde_crtc;
struct sde_crtc_state *cstate;
int i, ret = -EINVAL;
- bool conn_offset = 0;
if (!crtc || !state) {
SDE_ERROR("invalid argument(s)\n");
@@ -1691,20 +1754,13 @@ static int sde_crtc_atomic_get_property(struct drm_crtc *crtc,
sde_crtc = to_sde_crtc(crtc);
cstate = to_sde_crtc_state(state);
- for (i = 0; i < cstate->num_connectors; ++i) {
- conn_offset = sde_connector_needs_offset(
- cstate->connectors[i]);
- if (conn_offset)
- break;
- }
-
i = msm_property_index(&sde_crtc->property_info, property);
if (i == CRTC_PROP_OUTPUT_FENCE) {
int offset = sde_crtc_get_property(cstate,
CRTC_PROP_OUTPUT_FENCE_OFFSET);
ret = sde_fence_create(&sde_crtc->output_fence, val,
- offset + conn_offset);
+ offset);
if (ret)
SDE_ERROR("fence create failed\n");
} else {
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
index cb8b349e72c7..fa17768d9939 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -43,6 +43,8 @@
/* timeout in frames waiting for frame done */
#define SDE_ENCODER_FRAME_DONE_TIMEOUT 60
+#define MISR_BUFF_SIZE 256
+
/*
* Two to anticipate panels that can do cmd/vid dynamic switching
* plan is to create all possible physical encoder types, and switch between
@@ -600,6 +602,12 @@ static void sde_encoder_underrun_callback(struct drm_encoder *drm_enc,
SDE_ATRACE_BEGIN("encoder_underrun_callback");
atomic_inc(&phy_enc->underrun_cnt);
SDE_EVT32(DRMID(drm_enc), atomic_read(&phy_enc->underrun_cnt));
+
+ trace_sde_encoder_underrun(DRMID(drm_enc),
+ atomic_read(&phy_enc->underrun_cnt));
+ SDE_DBG_CTRL("stop_ftrace");
+ SDE_DBG_CTRL("panic_underrun");
+
SDE_ATRACE_END("encoder_underrun_callback");
}
@@ -1046,16 +1054,18 @@ static ssize_t _sde_encoder_misr_set(struct file *file,
struct sde_encoder_virt *sde_enc;
struct drm_encoder *drm_enc;
int i = 0;
- char buf[10];
+ char buf[MISR_BUFF_SIZE + 1];
+ size_t buff_copy;
u32 enable, frame_count;
drm_enc = file->private_data;
sde_enc = to_sde_encoder_virt(drm_enc);
- if (copy_from_user(buf, user_buf, count))
- return -EFAULT;
+ buff_copy = min_t(size_t, MISR_BUFF_SIZE, count);
+ if (copy_from_user(buf, user_buf, buff_copy))
+ return -EINVAL;
- buf[count] = 0; /* end of string */
+ buf[buff_copy] = 0; /* end of string */
if (sscanf(buf, "%u %u", &enable, &frame_count) != 2)
return -EFAULT;
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
index d58c06de1684..2f89c571fcfc 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
@@ -333,10 +333,24 @@ static void sde_encoder_phys_vid_underrun_irq(void *arg, int irq_idx)
phys_enc);
}
+static bool _sde_encoder_phys_is_ppsplit(struct sde_encoder_phys *phys_enc)
+{
+ enum sde_rm_topology_name topology;
+
+ if (!phys_enc)
+ return false;
+
+ topology = sde_connector_get_topology_name(phys_enc->connector);
+ if (topology == SDE_RM_TOPOLOGY_PPSPLIT)
+ return true;
+
+ return false;
+}
+
static bool sde_encoder_phys_vid_needs_single_flush(
struct sde_encoder_phys *phys_enc)
{
- return phys_enc && phys_enc->split_role != ENC_ROLE_SOLO;
+ return phys_enc && _sde_encoder_phys_is_ppsplit(phys_enc);
}
static int sde_encoder_phys_vid_register_irq(struct sde_encoder_phys *phys_enc,
@@ -674,7 +688,7 @@ static int sde_encoder_phys_vid_wait_for_vblank(
KICKOFF_TIMEOUT_MS);
if (ret <= 0) {
irq_status = sde_core_irq_read(phys_enc->sde_kms,
- INTR_IDX_VSYNC, true);
+ vid_enc->irq_idx[INTR_IDX_VSYNC], true);
if (irq_status) {
SDE_EVT32(DRMID(phys_enc->parent),
vid_enc->hw_intf->idx - INTF_0);
diff --git a/drivers/gpu/drm/msm/sde/sde_formats.c b/drivers/gpu/drm/msm/sde/sde_formats.c
index 2187d221a352..340cba536367 100644
--- a/drivers/gpu/drm/msm/sde/sde_formats.c
+++ b/drivers/gpu/drm/msm/sde/sde_formats.c
@@ -22,6 +22,11 @@
#define SDE_UBWC_META_BLOCK_SIZE 256
#define SDE_UBWC_PLANE_SIZE_ALIGNMENT 4096
+#define SDE_TILE_HEIGHT_DEFAULT 1
+#define SDE_TILE_HEIGHT_TILED 4
+#define SDE_TILE_HEIGHT_UBWC 4
+#define SDE_TILE_HEIGHT_NV12 8
+
#define SDE_MAX_IMG_WIDTH 0x3FFF
#define SDE_MAX_IMG_HEIGHT 0x3FFF
@@ -48,9 +53,30 @@ bp, flg, fm, np) \
.bpp = bp, \
.fetch_mode = fm, \
.flag = {(flg)}, \
- .num_planes = np \
+ .num_planes = np, \
+ .tile_height = SDE_TILE_HEIGHT_DEFAULT \
}
+#define INTERLEAVED_RGB_FMT_TILED(fmt, a, r, g, b, e0, e1, e2, e3, uc, \
+alpha, bp, flg, fm, np, th) \
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = SDE_PLANE_INTERLEAVED, \
+ .alpha_enable = alpha, \
+ .element = { (e0), (e1), (e2), (e3) }, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = SDE_CHROMA_RGB, \
+ .unpack_align_msb = 0, \
+ .unpack_tight = 1, \
+ .unpack_count = uc, \
+ .bpp = bp, \
+ .fetch_mode = fm, \
+ .flag = {(flg)}, \
+ .num_planes = np, \
+ .tile_height = th \
+}
+
+
#define INTERLEAVED_YUV_FMT(fmt, a, r, g, b, e0, e1, e2, e3, \
alpha, chroma, count, bp, flg, fm, np) \
{ \
@@ -66,7 +92,8 @@ alpha, chroma, count, bp, flg, fm, np) \
.bpp = bp, \
.fetch_mode = fm, \
.flag = {(flg)}, \
- .num_planes = np \
+ .num_planes = np, \
+ .tile_height = SDE_TILE_HEIGHT_DEFAULT \
}
#define PSEUDO_YUV_FMT(fmt, a, r, g, b, e0, e1, chroma, flg, fm, np) \
@@ -83,7 +110,27 @@ alpha, chroma, count, bp, flg, fm, np) \
.bpp = 2, \
.fetch_mode = fm, \
.flag = {(flg)}, \
- .num_planes = np \
+ .num_planes = np, \
+ .tile_height = SDE_TILE_HEIGHT_DEFAULT \
+}
+
+#define PSEUDO_YUV_FMT_TILED(fmt, a, r, g, b, e0, e1, chroma, \
+flg, fm, np, th) \
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = SDE_PLANE_PSEUDO_PLANAR, \
+ .alpha_enable = false, \
+ .element = { (e0), (e1), 0, 0 }, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = chroma, \
+ .unpack_align_msb = 0, \
+ .unpack_tight = 1, \
+ .unpack_count = 2, \
+ .bpp = 2, \
+ .fetch_mode = fm, \
+ .flag = {(flg)}, \
+ .num_planes = np, \
+ .tile_height = th \
}
#define PSEUDO_YUV_FMT_LOOSE(fmt, a, r, g, b, e0, e1, chroma, flg, fm, np)\
@@ -100,9 +147,30 @@ alpha, chroma, count, bp, flg, fm, np) \
.bpp = 2, \
.fetch_mode = fm, \
.flag = {(flg)}, \
- .num_planes = np \
+ .num_planes = np, \
+ .tile_height = SDE_TILE_HEIGHT_DEFAULT \
}
+#define PSEUDO_YUV_FMT_LOOSE_TILED(fmt, a, r, g, b, e0, e1, chroma, \
+flg, fm, np, th) \
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = SDE_PLANE_PSEUDO_PLANAR, \
+ .alpha_enable = false, \
+ .element = { (e0), (e1), 0, 0 }, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = chroma, \
+ .unpack_align_msb = 1, \
+ .unpack_tight = 0, \
+ .unpack_count = 2, \
+ .bpp = 2, \
+ .fetch_mode = fm, \
+ .flag = {(flg)}, \
+ .num_planes = np, \
+ .tile_height = th \
+}
+
+
#define PLANAR_YUV_FMT(fmt, a, r, g, b, e0, e1, e2, alpha, chroma, bp, \
flg, fm, np) \
{ \
@@ -118,7 +186,8 @@ flg, fm, np) \
.bpp = bp, \
.fetch_mode = fm, \
.flag = {(flg)}, \
- .num_planes = np \
+ .num_planes = np, \
+ .tile_height = SDE_TILE_HEIGHT_DEFAULT \
}
/*
@@ -414,75 +483,99 @@ static const struct sde_format sde_format_map[] = {
* These tables hold the A5x tile formats supported.
*/
static const struct sde_format sde_format_map_tile[] = {
- INTERLEAVED_RGB_FMT(ARGB8888,
+ INTERLEAVED_RGB_FMT_TILED(BGR565,
+ 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
+ false, 2, 0,
+ SDE_FETCH_UBWC, 1, SDE_TILE_HEIGHT_TILED),
+
+ INTERLEAVED_RGB_FMT_TILED(ARGB8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
true, 4, 0,
- SDE_FETCH_UBWC, 1),
+ SDE_FETCH_UBWC, 1, SDE_TILE_HEIGHT_TILED),
- INTERLEAVED_RGB_FMT(ABGR8888,
+ INTERLEAVED_RGB_FMT_TILED(ABGR8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
true, 4, 0,
- SDE_FETCH_UBWC, 1),
+ SDE_FETCH_UBWC, 1, SDE_TILE_HEIGHT_TILED),
- INTERLEAVED_RGB_FMT(RGBA8888,
+ INTERLEAVED_RGB_FMT_TILED(XBGR8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ false, 4, 0,
+ SDE_FETCH_UBWC, 1, SDE_TILE_HEIGHT_TILED),
+
+ INTERLEAVED_RGB_FMT_TILED(RGBA8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
true, 4, 0,
- SDE_FETCH_UBWC, 1),
+ SDE_FETCH_UBWC, 1, SDE_TILE_HEIGHT_TILED),
- INTERLEAVED_RGB_FMT(BGRA8888,
+ INTERLEAVED_RGB_FMT_TILED(BGRA8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
true, 4, 0,
- SDE_FETCH_UBWC, 1),
+ SDE_FETCH_UBWC, 1, SDE_TILE_HEIGHT_TILED),
- INTERLEAVED_RGB_FMT(BGRX8888,
+ INTERLEAVED_RGB_FMT_TILED(BGRX8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
false, 4, 0,
- SDE_FETCH_UBWC, 1),
+ SDE_FETCH_UBWC, 1, SDE_TILE_HEIGHT_TILED),
- INTERLEAVED_RGB_FMT(XRGB8888,
+ INTERLEAVED_RGB_FMT_TILED(XRGB8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
false, 4, 0,
- SDE_FETCH_UBWC, 1),
+ SDE_FETCH_UBWC, 1, SDE_TILE_HEIGHT_TILED),
- INTERLEAVED_RGB_FMT(RGBX8888,
+ INTERLEAVED_RGB_FMT_TILED(RGBX8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
false, 4, 0,
- SDE_FETCH_UBWC, 1),
+ SDE_FETCH_UBWC, 1, SDE_TILE_HEIGHT_TILED),
- PSEUDO_YUV_FMT(NV12,
+ INTERLEAVED_RGB_FMT_TILED(ABGR2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, SDE_FORMAT_FLAG_DX,
+ SDE_FETCH_UBWC, 1, SDE_TILE_HEIGHT_TILED),
+
+ INTERLEAVED_RGB_FMT_TILED(XBGR2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, SDE_FORMAT_FLAG_DX,
+ SDE_FETCH_UBWC, 1, SDE_TILE_HEIGHT_TILED),
+
+ PSEUDO_YUV_FMT_TILED(NV12,
0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C1_B_Cb, C2_R_Cr,
SDE_CHROMA_420, SDE_FORMAT_FLAG_YUV,
- SDE_FETCH_UBWC, 2),
+ SDE_FETCH_UBWC, 2, SDE_TILE_HEIGHT_NV12),
- PSEUDO_YUV_FMT(NV21,
+ PSEUDO_YUV_FMT_TILED(NV21,
0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C2_R_Cr, C1_B_Cb,
SDE_CHROMA_420, SDE_FORMAT_FLAG_YUV,
- SDE_FETCH_UBWC, 2),
+ SDE_FETCH_UBWC, 2, SDE_TILE_HEIGHT_NV12),
};
static const struct sde_format sde_format_map_p010_tile[] = {
- PSEUDO_YUV_FMT_LOOSE(NV12,
+ PSEUDO_YUV_FMT_LOOSE_TILED(NV12,
0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C1_B_Cb, C2_R_Cr,
SDE_CHROMA_420, (SDE_FORMAT_FLAG_YUV | SDE_FORMAT_FLAG_DX),
- SDE_FETCH_UBWC, 2),
+ SDE_FETCH_UBWC, 2, SDE_TILE_HEIGHT_NV12),
};
static const struct sde_format sde_format_map_tp10_tile[] = {
- PSEUDO_YUV_FMT(NV12,
+ PSEUDO_YUV_FMT_TILED(NV12,
0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C1_B_Cb, C2_R_Cr,
SDE_CHROMA_420, (SDE_FORMAT_FLAG_YUV | SDE_FORMAT_FLAG_DX),
- SDE_FETCH_UBWC, 2),
+ SDE_FETCH_UBWC, 2, SDE_TILE_HEIGHT_NV12),
};
/*
@@ -492,42 +585,42 @@ static const struct sde_format sde_format_map_tp10_tile[] = {
* the data will be passed by user-space.
*/
static const struct sde_format sde_format_map_ubwc[] = {
- INTERLEAVED_RGB_FMT(BGR565,
+ INTERLEAVED_RGB_FMT_TILED(BGR565,
0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
false, 2, SDE_FORMAT_FLAG_COMPRESSED,
- SDE_FETCH_UBWC, 2),
+ SDE_FETCH_UBWC, 2, SDE_TILE_HEIGHT_UBWC),
- INTERLEAVED_RGB_FMT(ABGR8888,
+ INTERLEAVED_RGB_FMT_TILED(ABGR8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
true, 4, SDE_FORMAT_FLAG_COMPRESSED,
- SDE_FETCH_UBWC, 2),
+ SDE_FETCH_UBWC, 2, SDE_TILE_HEIGHT_UBWC),
- INTERLEAVED_RGB_FMT(XBGR8888,
+ INTERLEAVED_RGB_FMT_TILED(XBGR8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
false, 4, SDE_FORMAT_FLAG_COMPRESSED,
- SDE_FETCH_UBWC, 2),
+ SDE_FETCH_UBWC, 2, SDE_TILE_HEIGHT_UBWC),
- INTERLEAVED_RGB_FMT(ABGR2101010,
+ INTERLEAVED_RGB_FMT_TILED(ABGR2101010,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
true, 4, SDE_FORMAT_FLAG_DX | SDE_FORMAT_FLAG_COMPRESSED,
- SDE_FETCH_UBWC, 2),
+ SDE_FETCH_UBWC, 2, SDE_TILE_HEIGHT_UBWC),
- INTERLEAVED_RGB_FMT(XBGR2101010,
+ INTERLEAVED_RGB_FMT_TILED(XBGR2101010,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
true, 4, SDE_FORMAT_FLAG_DX | SDE_FORMAT_FLAG_COMPRESSED,
- SDE_FETCH_UBWC, 2),
+ SDE_FETCH_UBWC, 2, SDE_TILE_HEIGHT_UBWC),
- PSEUDO_YUV_FMT(NV12,
+ PSEUDO_YUV_FMT_TILED(NV12,
0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C1_B_Cb, C2_R_Cr,
SDE_CHROMA_420, SDE_FORMAT_FLAG_YUV |
SDE_FORMAT_FLAG_COMPRESSED,
- SDE_FETCH_UBWC, 4),
+ SDE_FETCH_UBWC, 4, SDE_TILE_HEIGHT_NV12),
};
static const struct sde_format sde_format_map_p010[] = {
@@ -539,21 +632,21 @@ static const struct sde_format sde_format_map_p010[] = {
};
static const struct sde_format sde_format_map_p010_ubwc[] = {
- PSEUDO_YUV_FMT_LOOSE(NV12,
+ PSEUDO_YUV_FMT_LOOSE_TILED(NV12,
0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C1_B_Cb, C2_R_Cr,
SDE_CHROMA_420, (SDE_FORMAT_FLAG_YUV | SDE_FORMAT_FLAG_DX |
SDE_FORMAT_FLAG_COMPRESSED),
- SDE_FETCH_UBWC, 4),
+ SDE_FETCH_UBWC, 4, SDE_TILE_HEIGHT_NV12),
};
static const struct sde_format sde_format_map_tp10_ubwc[] = {
- PSEUDO_YUV_FMT(NV12,
+ PSEUDO_YUV_FMT_TILED(NV12,
0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C1_B_Cb, C2_R_Cr,
SDE_CHROMA_420, (SDE_FORMAT_FLAG_YUV | SDE_FORMAT_FLAG_DX |
SDE_FORMAT_FLAG_COMPRESSED),
- SDE_FETCH_UBWC, 4),
+ SDE_FETCH_UBWC, 4, SDE_TILE_HEIGHT_NV12),
};
/* _sde_get_v_h_subsample_rate - Get subsample rates for all formats we support
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
index ed9a6ea37397..9e0bf09bff0a 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1205,7 +1205,7 @@ static int sde_mixer_parse_dt(struct device_node *np,
if (!prop_exists[MIXER_LEN])
mixer->len = DEFAULT_SDE_HW_BLOCK_LEN;
- if (lm_pair_mask[i])
+ if ((i < ARRAY_SIZE(lm_pair_mask)) && lm_pair_mask[i])
mixer->lm_pair_mask = 1 << lm_pair_mask[i];
sblk->maxblendstages = max_blendstages;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_cdm.c b/drivers/gpu/drm/msm/sde/sde_hw_cdm.c
index da04be4e9719..c056b8198441 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_cdm.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_cdm.c
@@ -80,6 +80,7 @@ static struct sde_cdm_cfg *_cdm_offset(enum sde_cdm cdm,
if (cdm == m->cdm[i].id) {
b->base_off = addr;
b->blk_off = m->cdm[i].base;
+ b->length = m->cdm[i].len;
b->hwversion = m->hwversion;
b->log_mask = SDE_DBG_MASK_CDM;
return &m->cdm[i];
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
index 46e2a13cecc4..341738f624db 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -258,6 +258,35 @@ static inline int sde_hw_ctl_get_bitmask_cdm(struct sde_hw_ctl *ctx,
return 0;
}
+static inline int sde_hw_ctl_get_splash_mixercfg(const u32 *resv_pipes,
+ u32 length)
+{
+ int i = 0;
+ u32 mixercfg = 0;
+
+ for (i = 0; i < length; i++) {
+ /* LK's splash VIG layer always stays on top */
+ switch (resv_pipes[i]) {
+ case SSPP_VIG0:
+ mixercfg |= 0x7 << 0;
+ break;
+ case SSPP_VIG1:
+ mixercfg |= 0x7 << 3;
+ break;
+ case SSPP_VIG2:
+ mixercfg |= 0x7 << 6;
+ break;
+ case SSPP_VIG3:
+ mixercfg |= 0x7 << 26;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return mixercfg;
+}
+
static u32 sde_hw_ctl_poll_reset_status(struct sde_hw_ctl *ctx, u32 count)
{
struct sde_hw_blk_reg_map *c = &ctx->hw;
@@ -312,15 +341,29 @@ static int sde_hw_ctl_wait_reset_status(struct sde_hw_ctl *ctx)
return 0;
}
-static void sde_hw_ctl_clear_all_blendstages(struct sde_hw_ctl *ctx)
+static void sde_hw_ctl_clear_all_blendstages(struct sde_hw_ctl *ctx,
+ bool handoff, const u32 *resv_pipes, u32 resv_pipes_length)
{
struct sde_hw_blk_reg_map *c = &ctx->hw;
int i;
for (i = 0; i < ctx->mixer_count; i++) {
int mixer_id = ctx->mixer_hw_caps[i].id;
+ u32 mixercfg = 0;
+
+ /*
+ * if bootloaer still has early RVC running, mixer status
+ * can't be direcly cleared.
+ */
+ if (handoff) {
+ mixercfg =
+ sde_hw_ctl_get_splash_mixercfg(resv_pipes,
+ resv_pipes_length);
+
+ mixercfg &= SDE_REG_READ(c, CTL_LAYER(mixer_id));
+ }
- SDE_REG_WRITE(c, CTL_LAYER(mixer_id), 0);
+ SDE_REG_WRITE(c, CTL_LAYER(mixer_id), mixercfg);
SDE_REG_WRITE(c, CTL_LAYER_EXT(mixer_id), 0);
SDE_REG_WRITE(c, CTL_LAYER_EXT2(mixer_id), 0);
SDE_REG_WRITE(c, CTL_LAYER_EXT3(mixer_id), 0);
@@ -328,7 +371,8 @@ static void sde_hw_ctl_clear_all_blendstages(struct sde_hw_ctl *ctx)
}
static void sde_hw_ctl_setup_blendstage(struct sde_hw_ctl *ctx,
- enum sde_lm lm, struct sde_hw_stage_cfg *stage_cfg, u32 index)
+ enum sde_lm lm, struct sde_hw_stage_cfg *stage_cfg, u32 index,
+ bool handoff, const u32 *resv_pipes, u32 resv_pipes_length)
{
struct sde_hw_blk_reg_map *c = &ctx->hw;
u32 mixercfg, mixercfg_ext, mix, ext, mixercfg_ext2;
@@ -353,6 +397,20 @@ static void sde_hw_ctl_setup_blendstage(struct sde_hw_ctl *ctx,
mixercfg_ext = 0;
mixercfg_ext2 = 0;
+ /*
+ * if bootloader still have RVC running, its mixer stauts
+ * should be updated to kernel's mixer setup.
+ */
+ if (handoff) {
+ mixercfg =
+ sde_hw_ctl_get_splash_mixercfg(resv_pipes,
+ resv_pipes_length);
+
+ mixercfg &= SDE_REG_READ(c, CTL_LAYER(lm));
+ mixercfg |= BIT(24);
+ stages--;
+ }
+
for (i = 0; i <= stages; i++) {
/* overflow to ext register if 'i + 1 > 7' */
mix = (i + 1) & 0x7;
@@ -458,6 +516,38 @@ static void sde_hw_ctl_intf_cfg(struct sde_hw_ctl *ctx,
SDE_REG_WRITE(c, CTL_TOP, intf_cfg);
}
+static inline u32 sde_hw_ctl_read_ctl_top_for_splash(struct sde_hw_ctl *ctx)
+{
+ struct sde_hw_blk_reg_map *c;
+ u32 ctl_top;
+
+ if (!ctx) {
+ pr_err("Invalid ctx\n");
+ return 0;
+ }
+
+ c = &ctx->hw;
+ ctl_top = SDE_REG_READ(c, CTL_TOP);
+ return ctl_top;
+}
+
+static inline u32 sde_hw_ctl_read_ctl_layers_for_splash(struct sde_hw_ctl *ctx,
+ int index)
+{
+ struct sde_hw_blk_reg_map *c;
+ u32 ctl_top;
+
+ if (!ctx) {
+ pr_err("Invalid ctx\n");
+ return 0;
+ }
+
+ c = &ctx->hw;
+ ctl_top = SDE_REG_READ(c, CTL_LAYER(index));
+
+ return ctl_top;
+}
+
static void _setup_ctl_ops(struct sde_hw_ctl_ops *ops,
unsigned long cap)
{
@@ -478,6 +568,8 @@ static void _setup_ctl_ops(struct sde_hw_ctl_ops *ops,
ops->get_bitmask_intf = sde_hw_ctl_get_bitmask_intf;
ops->get_bitmask_cdm = sde_hw_ctl_get_bitmask_cdm;
ops->get_bitmask_wb = sde_hw_ctl_get_bitmask_wb;
+ ops->read_ctl_top_for_splash = sde_hw_ctl_read_ctl_top_for_splash;
+ ops->read_ctl_layers_for_splash = sde_hw_ctl_read_ctl_layers_for_splash;
};
struct sde_hw_ctl *sde_hw_ctl_init(enum sde_ctl idx,
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.h b/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
index 74dbde92639a..a008ecf4a11d 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -146,17 +146,40 @@ struct sde_hw_ctl_ops {
/**
* Set all blend stages to disabled
* @ctx : ctl path ctx pointer
+ * @handoff : handoff flag
+ * @resv_pipes : reserved pipes in DT
+ * @resv_pipes_length: array size of array reserved_pipes
*/
- void (*clear_all_blendstages)(struct sde_hw_ctl *ctx);
+ void (*clear_all_blendstages)(struct sde_hw_ctl *ctx,
+ bool handoff, const u32 *resv_pipes, u32 resv_pipes_length);
/**
* Configure layer mixer to pipe configuration
* @ctx : ctl path ctx pointer
* @lm : layer mixer enumeration
* @cfg : blend stage configuration
+ * @handoff : handoff flag
+ * @resv_pipes : reserved pipes in DT
+ * @resv_pipes_length: array size of array reserved_pipes
*/
void (*setup_blendstage)(struct sde_hw_ctl *ctx,
- enum sde_lm lm, struct sde_hw_stage_cfg *cfg, u32 index);
+ enum sde_lm lm, struct sde_hw_stage_cfg *cfg, u32 index,
+ bool handoff, const u32 *resv_pipes, u32 resv_pipes_length);
+
+ /**
+ * read CTL_TOP register value for splash case
+ * @ctx : ctl path ctx pointer
+ * @Return : CTL top register value
+ */
+ u32 (*read_ctl_top_for_splash)(struct sde_hw_ctl *ctx);
+
+ /**
+ * read CTL layers register value for splash case
+ * @ctx : ctl path ctx pointer
+ * @index : layer index for this ctl path
+ * @Return : CTL layers register value
+ */
+ u32 (*read_ctl_layers_for_splash)(struct sde_hw_ctl *ctx, int index);
};
/**
diff --git a/drivers/gpu/drm/msm/sde/sde_irq.c b/drivers/gpu/drm/msm/sde/sde_irq.c
index eeb7a0002eab..7864b9fef87b 100644
--- a/drivers/gpu/drm/msm/sde/sde_irq.c
+++ b/drivers/gpu/drm/msm/sde/sde_irq.c
@@ -19,6 +19,8 @@
#include "sde_irq.h"
#include "sde_core_irq.h"
+static uint32_t g_sde_irq_status;
+
irqreturn_t sde_irq(struct msm_kms *kms)
{
struct sde_kms *sde_kms = to_sde_kms(kms);
@@ -27,6 +29,9 @@ irqreturn_t sde_irq(struct msm_kms *kms)
sde_kms->hw_intr->ops.get_interrupt_sources(sde_kms->hw_intr,
&interrupts);
+ /* store irq status in case of irq-storm debugging */
+ g_sde_irq_status = interrupts;
+
/*
* Taking care of MDP interrupt
*/
@@ -40,13 +45,30 @@ irqreturn_t sde_irq(struct msm_kms *kms)
*/
while (interrupts) {
irq_hw_number_t hwirq = fls(interrupts) - 1;
+ unsigned int mapping;
+ int rc;
+
+ mapping = irq_find_mapping(sde_kms->irq_controller.domain,
+ hwirq);
+ if (mapping == 0) {
+ SDE_EVT32(hwirq, SDE_EVTLOG_ERROR);
+ goto error;
+ }
+
+ rc = generic_handle_irq(mapping);
+ if (rc < 0) {
+ SDE_EVT32(hwirq, mapping, rc, SDE_EVTLOG_ERROR);
+ goto error;
+ }
- generic_handle_irq(irq_find_mapping(
- sde_kms->irq_controller.domain, hwirq));
interrupts &= ~(1 << hwirq);
}
return IRQ_HANDLED;
+
+error:
+ /* bad situation, inform irq system, it may disable overall MDSS irq */
+ return IRQ_NONE;
}
void sde_irq_preinstall(struct msm_kms *kms)
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index 34a32d79f22c..86a5c23b5258 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -343,10 +343,12 @@ static void sde_kms_prepare_commit(struct msm_kms *kms,
struct drm_device *dev = sde_kms->dev;
struct msm_drm_private *priv = dev->dev_private;
- if (sde_kms->splash_info.handoff)
- sde_splash_clean_up_exit_lk(kms);
+ if (sde_kms->splash_info.handoff &&
+ sde_kms->splash_info.display_splash_enabled)
+ sde_splash_lk_stop_splash(kms, state);
- sde_power_resource_enable(&priv->phandle, sde_kms->core_client, true);
+ sde_power_resource_enable(&priv->phandle,
+ sde_kms->core_client, true);
}
static void sde_kms_commit(struct msm_kms *kms,
@@ -385,11 +387,12 @@ static void sde_kms_wait_for_commit_done(struct msm_kms *kms,
struct drm_crtc *crtc)
{
struct drm_encoder *encoder;
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev;
int ret;
- if (!kms || !crtc || !crtc->state) {
- SDE_ERROR("invalid params\n");
+ dev = crtc->dev;
+ if (!dev) {
+ SDE_ERROR("invalid dev\n");
return;
}
@@ -637,6 +640,15 @@ static int _sde_kms_setup_displays(struct drm_device *dev,
continue;
}
+ rc = sde_splash_setup_display_resource(&sde_kms->splash_info,
+ display, DRM_MODE_CONNECTOR_DSI);
+ if (rc) {
+ SDE_ERROR("dsi %d splash resource setup failed %d\n",
+ i, rc);
+ sde_encoder_destroy(encoder);
+ continue;
+ }
+
rc = dsi_display_drm_bridge_init(display, encoder);
if (rc) {
SDE_ERROR("dsi bridge %d init failed, %d\n", i, rc);
@@ -729,6 +741,15 @@ static int _sde_kms_setup_displays(struct drm_device *dev,
continue;
}
+ rc = sde_splash_setup_display_resource(&sde_kms->splash_info,
+ display, DRM_MODE_CONNECTOR_HDMIA);
+ if (rc) {
+ SDE_ERROR("hdmi %d splash resource setup failed %d\n",
+ i, rc);
+ sde_encoder_destroy(encoder);
+ continue;
+ }
+
rc = sde_hdmi_drm_init(display, encoder);
if (rc) {
SDE_ERROR("hdmi drm %d init failed, %d\n", i, rc);
@@ -810,6 +831,7 @@ static int _sde_kms_drm_obj_init(struct sde_kms *sde_kms)
struct msm_drm_private *priv;
struct sde_mdss_cfg *catalog;
+ struct sde_splash_info *sinfo;
int primary_planes_idx, i, ret;
int max_crtc_count, max_plane_count;
@@ -822,6 +844,7 @@ static int _sde_kms_drm_obj_init(struct sde_kms *sde_kms)
dev = sde_kms->dev;
priv = dev->dev_private;
catalog = sde_kms->catalog;
+ sinfo = &sde_kms->splash_info;
ret = sde_core_irq_domain_add(sde_kms);
if (ret)
@@ -849,7 +872,7 @@ static int _sde_kms_drm_obj_init(struct sde_kms *sde_kms)
primary = false;
plane = sde_plane_init(dev, catalog->vp[i].id,
- primary, 1UL << crtc_id, true);
+ primary, 1UL << crtc_id, true, false);
if (IS_ERR(plane)) {
SDE_ERROR("sde_plane_init failed\n");
ret = PTR_ERR(plane);
@@ -867,14 +890,22 @@ static int _sde_kms_drm_obj_init(struct sde_kms *sde_kms)
for (i = 0; i < max_plane_count; i++) {
bool primary = true;
+ bool resv_plane = false;
if (catalog->sspp[i].features & BIT(SDE_SSPP_CURSOR)
|| primary_planes_idx >= max_crtc_count)
primary = false;
+ if (sde_splash_query_plane_is_reserved(sinfo,
+ catalog->sspp[i].id)) {
+ resv_plane = true;
+ DRM_INFO("pipe%d is reserved\n",
+ catalog->sspp[i].id);
+ }
+
plane = sde_plane_init(dev, catalog->sspp[i].id,
primary, (1UL << max_crtc_count) - 1,
- false);
+ false, resv_plane);
if (IS_ERR(plane)) {
SDE_ERROR("sde_plane_init failed\n");
ret = PTR_ERR(plane);
@@ -1335,12 +1366,17 @@ static int sde_kms_hw_init(struct msm_kms *kms)
*/
sinfo = &sde_kms->splash_info;
if (sinfo->handoff) {
- rc = sde_splash_parse_dt(dev);
+ rc = sde_splash_parse_memory_dt(dev);
if (rc) {
- SDE_ERROR("parse dt for splash info failed: %d\n", rc);
+ SDE_ERROR("parse memory dt failed: %d\n", rc);
goto power_error;
}
+ rc = sde_splash_parse_reserved_plane_dt(sinfo,
+ sde_kms->catalog);
+ if (rc)
+ SDE_ERROR("parse reserved plane dt failed: %d\n", rc);
+
sde_splash_init(&priv->phandle, kms);
}
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
index f5f125c3f71c..ceac5a931e7e 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2014-2017 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2014-2018 The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -303,6 +303,11 @@ static void _sde_plane_set_qos_lut(struct sde_phy_plane *pp,
fb->pixel_format,
fb->modifier,
drm_format_num_planes(fb->pixel_format));
+ if (!fmt) {
+ SDE_ERROR("%s: faile to get fmt\n", __func__);
+ return;
+ }
+
total_fl = _sde_plane_calc_fill_level(pp, fmt,
pp->pipe_cfg.src_rect.w);
@@ -362,6 +367,10 @@ static void _sde_plane_set_danger_lut(struct sde_phy_plane *pp,
fb->pixel_format,
fb->modifier,
drm_format_num_planes(fb->pixel_format));
+ if (!fmt) {
+ SDE_ERROR("%s: fail to get fmt\n", __func__);
+ return;
+ }
if (SDE_FORMAT_IS_LINEAR(fmt)) {
danger_lut = pp->pipe_sblk->danger_lut_linear;
@@ -694,11 +703,11 @@ static inline void _sde_plane_set_scanout(struct sde_phy_plane *pp,
static int _sde_plane_setup_scaler3_lut(struct sde_phy_plane *pp,
struct sde_plane_state *pstate)
{
- struct sde_plane *psde = pp->sde_plane;
+ struct sde_plane *psde;
struct sde_hw_scaler3_cfg *cfg;
int ret = 0;
- if (!pp || !pp->scaler3_cfg) {
+ if (!pp || !pp->sde_plane || !pp->scaler3_cfg) {
SDE_ERROR("invalid args\n");
return -EINVAL;
} else if (!pstate) {
@@ -707,6 +716,7 @@ static int _sde_plane_setup_scaler3_lut(struct sde_phy_plane *pp,
return -EINVAL;
}
+ psde = pp->sde_plane;
cfg = pp->scaler3_cfg;
cfg->dir_lut = msm_property_get_blob(
@@ -1349,51 +1359,55 @@ static int _sde_plane_mode_set(struct drm_plane *plane,
src.y = DIV_ROUND_UP(src.y, 2);
src.y &= ~0x1;
}
- }
- list_for_each_entry(pp, &psde->phy_plane_head, phy_plane_list)
- num_of_phy_planes++;
+ list_for_each_entry(pp, &psde->phy_plane_head, phy_plane_list)
+ num_of_phy_planes++;
- /*
- * Only need to use one physical plane if plane width is still within
- * the limitation.
- */
- is_across_mixer_boundary = (plane->state->crtc_x < crtc_split_width) &&
+ /*
+ * Only need to use one physical plane if plane width
+ * is still within the limitation.
+ */
+ is_across_mixer_boundary =
+ (plane->state->crtc_x < crtc_split_width) &&
(plane->state->crtc_x + plane->state->crtc_w >
- crtc_split_width);
- if (crtc_split_width >= (src.x + src.w) && !is_across_mixer_boundary)
- num_of_phy_planes = 1;
-
- if (num_of_phy_planes > 1) {
- /* Adjust width for multi-pipe */
- src.w /= num_of_phy_planes;
- dst.w /= num_of_phy_planes;
- }
+ crtc_split_width);
+ if (crtc_split_width >= (src.x + src.w) &&
+ !is_across_mixer_boundary)
+ num_of_phy_planes = 1;
- list_for_each_entry(pp, &psde->phy_plane_head, phy_plane_list) {
- /* Adjust offset for multi-pipe */
if (num_of_phy_planes > 1) {
- src.x += src.w * pp->index;
- dst.x += dst.w * pp->index;
+ /* Adjust width for multi-pipe */
+ src.w /= num_of_phy_planes;
+ dst.w /= num_of_phy_planes;
}
- pp->pipe_cfg.src_rect = src;
- pp->pipe_cfg.dst_rect = dst;
-
- /* check for color fill */
- pp->color_fill = (uint32_t)sde_plane_get_property(pstate,
- PLANE_PROP_COLOR_FILL);
- if (pp->color_fill & SDE_PLANE_COLOR_FILL_FLAG) {
- /* skip remaining processing on color fill */
- pstate->dirty = 0x0;
- } else if (pp->pipe_hw->ops.setup_rects) {
- _sde_plane_setup_scaler(pp, fmt, pstate);
- pp->pipe_hw->ops.setup_rects(pp->pipe_hw,
- &pp->pipe_cfg, &pp->pixel_ext,
- pp->scaler3_cfg);
+ list_for_each_entry(pp, &psde->phy_plane_head, phy_plane_list) {
+ /* Adjust offset for multi-pipe */
+ if (num_of_phy_planes > 1) {
+ src.x += src.w * pp->index;
+ dst.x += dst.w * pp->index;
+ }
+ pp->pipe_cfg.src_rect = src;
+ pp->pipe_cfg.dst_rect = dst;
+
+ /* check for color fill */
+ pp->color_fill = (uint32_t)sde_plane_get_property(
+ pstate, PLANE_PROP_COLOR_FILL);
+ if (pp->color_fill & SDE_PLANE_COLOR_FILL_FLAG) {
+ /* skip remaining processing on color fill */
+ pstate->dirty = 0x0;
+ } else if (pp->pipe_hw->ops.setup_rects) {
+ _sde_plane_setup_scaler(pp, fmt, pstate);
+
+ pp->pipe_hw->ops.setup_rects(pp->pipe_hw,
+ &pp->pipe_cfg, &pp->pixel_ext,
+ pp->scaler3_cfg);
+ }
}
+ }
- if (((pstate->dirty & SDE_PLANE_DIRTY_FORMAT) ||
+ list_for_each_entry(pp, &psde->phy_plane_head, phy_plane_list) {
+ if (((pstate->dirty & SDE_PLANE_DIRTY_FORMAT) ||
(src_flags &
SDE_SSPP_SECURE_OVERLAY_SESSION)) &&
pp->pipe_hw->ops.setup_format) {
@@ -1450,7 +1464,7 @@ static int _sde_plane_mode_set(struct drm_plane *plane,
static int sde_plane_prepare_fb(struct drm_plane *plane,
const struct drm_plane_state *new_state)
{
- struct drm_framebuffer *fb = new_state->fb;
+ struct drm_framebuffer *fb;
struct sde_plane *psde = to_sde_plane(plane);
struct sde_plane_state *pstate;
int rc;
@@ -1461,6 +1475,7 @@ static int sde_plane_prepare_fb(struct drm_plane *plane,
if (!new_state->fb)
return 0;
+ fb = new_state->fb;
pstate = to_sde_plane_state(new_state);
rc = _sde_plane_get_aspace(psde, pstate, &psde->aspace);
@@ -1783,7 +1798,7 @@ static void sde_plane_atomic_update(struct drm_plane *plane,
/* helper to install properties which are common to planes and crtcs */
static void _sde_plane_install_properties(struct drm_plane *plane,
- struct sde_mdss_cfg *catalog)
+ struct sde_mdss_cfg *catalog, bool plane_reserved)
{
static const struct drm_prop_enum_list e_blend_op[] = {
{SDE_DRM_BLEND_OP_NOT_DEFINED, "not_defined"},
@@ -1800,7 +1815,7 @@ static void _sde_plane_install_properties(struct drm_plane *plane,
{SDE_DRM_FB_NON_SEC_DIR_TRANS, "non_sec_direct_translation"},
{SDE_DRM_FB_SEC_DIR_TRANS, "sec_direct_translation"},
};
- const struct sde_format_extended *format_list;
+ const struct sde_format_extended *format_list = NULL;
struct sde_kms_info *info;
struct sde_plane *psde = to_sde_plane(plane);
int zpos_max = 255;
@@ -1979,6 +1994,16 @@ static void _sde_plane_install_properties(struct drm_plane *plane,
sde_kms_info_add_keyint(info, "max_downscale", maxdwnscale);
sde_kms_info_add_keyint(info, "max_horizontal_deci", maxhdeciexp);
sde_kms_info_add_keyint(info, "max_vertical_deci", maxvdeciexp);
+
+ /* When early RVC is enabled in bootloader and doesn't exit,
+ * user app should not touch the pipe which RVC is on.
+ * So mark the plane_unavailibility to the special pipe's property,
+ * user can parse this property of this pipe and stop this pipe's
+ * allocation after parsing.
+ * plane_reserved is 1, means the pipe is occupied in bootloader.
+ * plane_reserved is 0, means it's not used in bootloader.
+ */
+ sde_kms_info_add_keyint(info, "plane_unavailability", plane_reserved);
msm_property_set_blob(&psde->property_info, &psde->blob_info,
info->data, info->len, PLANE_PROP_INFO);
@@ -2716,7 +2741,8 @@ end:
/* initialize plane */
struct drm_plane *sde_plane_init(struct drm_device *dev,
uint32_t pipe, bool primary_plane,
- unsigned long possible_crtcs, bool vp_enabled)
+ unsigned long possible_crtcs,
+ bool vp_enabled, bool plane_reserved)
{
struct drm_plane *plane = NULL;
struct sde_plane *psde;
@@ -2841,7 +2867,7 @@ struct drm_plane *sde_plane_init(struct drm_device *dev,
PLANE_PROP_COUNT, PLANE_PROP_BLOBCOUNT,
sizeof(struct sde_plane_state));
- _sde_plane_install_properties(plane, kms->catalog);
+ _sde_plane_install_properties(plane, kms->catalog, plane_reserved);
/* save user friendly pipe name for later */
snprintf(psde->pipe_name, SDE_NAME_SIZE, "plane%u", plane->base.id);
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.h b/drivers/gpu/drm/msm/sde/sde_plane.h
index 7b91822d4cde..8ac582643926 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.h
+++ b/drivers/gpu/drm/msm/sde/sde_plane.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -77,10 +77,12 @@ void sde_plane_flush(struct drm_plane *plane);
* @primary_plane: true if this pipe is primary plane for crtc
* @possible_crtcs: bitmask of crtc that can be attached to the given pipe
* @vp_enabled: Flag indicating if virtual planes enabled
+ * @plane_reserved: Flag indicating the plane is occupied in bootloader
*/
struct drm_plane *sde_plane_init(struct drm_device *dev,
uint32_t pipe, bool primary_plane,
- unsigned long possible_crtcs, bool vp_enabled);
+ unsigned long possible_crtcs,
+ bool vp_enabled, bool plane_reserved);
/**
* sde_plane_wait_input_fence - wait for input fence object
diff --git a/drivers/gpu/drm/msm/sde/sde_rm.c b/drivers/gpu/drm/msm/sde/sde_rm.c
index de0551b22d2e..6055dc861c72 100644
--- a/drivers/gpu/drm/msm/sde/sde_rm.c
+++ b/drivers/gpu/drm/msm/sde/sde_rm.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -24,6 +24,7 @@
#include "sde_encoder.h"
#include "sde_connector.h"
#include "sde_hw_sspp.h"
+#include "sde_splash.h"
#define RESERVED_BY_OTHER(h, r) \
((h)->rsvp && ((h)->rsvp->enc_id != (r)->enc_id))
@@ -417,6 +418,8 @@ int sde_rm_init(struct sde_rm *rm,
mutex_init(&rm->rm_lock);
+ rm->dev = dev;
+
INIT_LIST_HEAD(&rm->rsvps);
for (type = 0; type < SDE_HW_BLK_MAX; type++)
INIT_LIST_HEAD(&rm->hw_blks[type]);
@@ -652,7 +655,8 @@ static bool _sde_rm_check_lm_and_get_connected_blks(
static int _sde_rm_reserve_lms(
struct sde_rm *rm,
struct sde_rm_rsvp *rsvp,
- struct sde_rm_requirements *reqs)
+ struct sde_rm_requirements *reqs,
+ uint32_t prefer_lm_id)
{
struct sde_rm_hw_blk *lm[MAX_BLOCKS];
@@ -678,6 +682,10 @@ static int _sde_rm_reserve_lms(
lm_count = 0;
lm[lm_count] = iter_i.blk;
+ /* find the matched lm id */
+ if ((prefer_lm_id > 0) && (iter_i.blk->id != prefer_lm_id))
+ continue;
+
if (!_sde_rm_check_lm_and_get_connected_blks(rm, rsvp, reqs,
lm[lm_count], &dspp[lm_count], &pp[lm_count],
NULL))
@@ -699,6 +707,7 @@ static int _sde_rm_reserve_lms(
continue;
lm[lm_count] = iter_j.blk;
+
++lm_count;
}
}
@@ -747,7 +756,8 @@ static int _sde_rm_reserve_lms(
static int _sde_rm_reserve_ctls(
struct sde_rm *rm,
struct sde_rm_rsvp *rsvp,
- struct sde_rm_requirements *reqs)
+ struct sde_rm_requirements *reqs,
+ uint32_t prefer_ctl_id)
{
struct sde_rm_hw_blk *ctls[MAX_BLOCKS];
struct sde_rm_hw_iter iter;
@@ -769,6 +779,14 @@ static int _sde_rm_reserve_ctls(
SDE_DEBUG("ctl %d caps 0x%lX\n", iter.blk->id, caps);
+ /* early return when finding the matched ctl id */
+ if ((prefer_ctl_id > 0) && (iter.blk->id == prefer_ctl_id)) {
+ ctls[i] = iter.blk;
+
+ if (++i == reqs->num_ctl)
+ break;
+ }
+
if (reqs->needs_split_display != has_split_display)
continue;
@@ -928,10 +946,10 @@ static int _sde_rm_make_next_rsvp(
* - Check mixers without DSPPs
* - Only then allow to grab from mixers with DSPP capability
*/
- ret = _sde_rm_reserve_lms(rm, rsvp, reqs);
+ ret = _sde_rm_reserve_lms(rm, rsvp, reqs, 0);
if (ret && !RM_RQ_DSPP(reqs)) {
reqs->top_ctrl |= BIT(SDE_RM_TOPCTL_DSPP);
- ret = _sde_rm_reserve_lms(rm, rsvp, reqs);
+ ret = _sde_rm_reserve_lms(rm, rsvp, reqs, 0);
}
if (ret) {
@@ -944,10 +962,10 @@ static int _sde_rm_make_next_rsvp(
* - Check mixers without Split Display
* - Only then allow to grab from CTLs with split display capability
*/
- _sde_rm_reserve_ctls(rm, rsvp, reqs);
+ _sde_rm_reserve_ctls(rm, rsvp, reqs, 0);
if (ret && !reqs->needs_split_display) {
reqs->needs_split_display = true;
- _sde_rm_reserve_ctls(rm, rsvp, reqs);
+ _sde_rm_reserve_ctls(rm, rsvp, reqs, 0);
}
if (ret) {
SDE_ERROR("unable to find appropriate CTL\n");
@@ -962,6 +980,109 @@ static int _sde_rm_make_next_rsvp(
return ret;
}
+static int _sde_rm_make_next_rsvp_for_splash(
+ struct sde_rm *rm,
+ struct drm_encoder *enc,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ struct sde_rm_rsvp *rsvp,
+ struct sde_rm_requirements *reqs)
+{
+ int ret;
+ struct msm_drm_private *priv;
+ struct sde_kms *sde_kms;
+ struct sde_splash_info *sinfo;
+ int i;
+ int intf_id = INTF_0;
+ u32 prefer_lm_id = 0;
+ u32 prefer_ctl_id = 0;
+
+ if (!enc->dev || !enc->dev->dev_private) {
+ SDE_ERROR("drm device invalid\n");
+ return -EINVAL;
+ }
+
+ priv = enc->dev->dev_private;
+ if (!priv->kms) {
+ SDE_ERROR("invalid kms\n");
+ return -EINVAL;
+ }
+
+ sde_kms = to_sde_kms(priv->kms);
+ sinfo = &sde_kms->splash_info;
+
+ /* Get the intf id first, and reserve the same lk and ctl
+ * in bootloader for kernel resource manager
+ */
+ for (i = 0; i < ARRAY_SIZE(reqs->hw_res.intfs); i++) {
+ if (reqs->hw_res.intfs[i] == INTF_MODE_NONE)
+ continue;
+ intf_id = i + INTF_0;
+ break;
+ }
+
+ /* get preferred lm id and ctl id */
+ for (i = 0; i < CTL_MAX - 1; i++) {
+ if (sinfo->res.top[i].intf_sel != intf_id)
+ continue;
+
+ prefer_lm_id = sinfo->res.top[i].lm[0].lm_id;
+ prefer_ctl_id = sinfo->res.top[i].lm[0].ctl_id;
+ break;
+ }
+
+ SDE_DEBUG("intf_id %d, prefer lm_id %d, ctl_id %d\n",
+ intf_id, prefer_lm_id, prefer_ctl_id);
+
+ /* Create reservation info, tag reserved blocks with it as we go */
+ rsvp->seq = ++rm->rsvp_next_seq;
+ rsvp->enc_id = enc->base.id;
+ rsvp->topology = reqs->top_name;
+ list_add_tail(&rsvp->list, &rm->rsvps);
+
+ /*
+ * Assign LMs and blocks whose usage is tied to them: DSPP & Pingpong.
+ * Do assignment preferring to give away low-resource mixers first:
+ * - Check mixers without DSPPs
+ * - Only then allow to grab from mixers with DSPP capability
+ */
+ ret = _sde_rm_reserve_lms(rm, rsvp, reqs, prefer_lm_id);
+ if (ret && !RM_RQ_DSPP(reqs)) {
+ reqs->top_ctrl |= BIT(SDE_RM_TOPCTL_DSPP);
+ ret = _sde_rm_reserve_lms(rm, rsvp, reqs, prefer_lm_id);
+ }
+
+ if (ret) {
+ SDE_ERROR("unable to find appropriate mixers\n");
+ return ret;
+ }
+
+ /*
+ * Do assignment preferring to give away low-resource CTLs first:
+ * - Check mixers without Split Display
+ * - Only then allow to grab from CTLs with split display capability
+ */
+ for (i = 0; i < sinfo->res.ctl_top_cnt; i++)
+ SDE_DEBUG("splash_info ctl_ids[%d] = %d\n",
+ i, sinfo->res.ctl_ids[i]);
+
+ ret = _sde_rm_reserve_ctls(rm, rsvp, reqs, prefer_ctl_id);
+ if (ret && !reqs->needs_split_display) {
+ reqs->needs_split_display = true;
+ _sde_rm_reserve_ctls(rm, rsvp, reqs, prefer_ctl_id);
+ }
+
+ if (ret) {
+ SDE_ERROR("unable to find appropriate CTL\n");
+ return ret;
+ }
+
+ /* Assign INTFs, WBs, and blks whose usage is tied to them: CTL & CDM */
+ ret = _sde_rm_reserve_intf_related_hw(rm, rsvp, &reqs->hw_res);
+
+ return ret;
+}
+
static int _sde_rm_populate_requirements(
struct sde_rm *rm,
struct drm_encoder *enc,
@@ -1253,6 +1374,8 @@ int sde_rm_reserve(
{
struct sde_rm_rsvp *rsvp_cur, *rsvp_nxt;
struct sde_rm_requirements reqs;
+ struct msm_drm_private *priv;
+ struct sde_kms *sde_kms;
int ret;
if (!rm || !enc || !crtc_state || !conn_state) {
@@ -1260,6 +1383,19 @@ int sde_rm_reserve(
return -EINVAL;
}
+ if (!enc->dev || !enc->dev->dev_private) {
+ SDE_ERROR("invalid drm device\n");
+ return -EINVAL;
+ }
+
+ priv = enc->dev->dev_private;
+ if (!priv->kms) {
+ SDE_ERROR("invald kms\n");
+ return -EINVAL;
+ }
+
+ sde_kms = to_sde_kms(priv->kms);
+
/* Check if this is just a page-flip */
if (!drm_atomic_crtc_needs_modeset(crtc_state))
return 0;
@@ -1318,8 +1454,13 @@ int sde_rm_reserve(
}
/* Check the proposed reservation, store it in hw's "next" field */
- ret = _sde_rm_make_next_rsvp(rm, enc, crtc_state, conn_state,
- rsvp_nxt, &reqs);
+ if (sde_kms->splash_info.handoff) {
+ SDE_DEBUG("Reserve resource for splash\n");
+ ret = _sde_rm_make_next_rsvp_for_splash
+ (rm, enc, crtc_state, conn_state, rsvp_nxt, &reqs);
+ } else
+ ret = _sde_rm_make_next_rsvp(rm, enc, crtc_state, conn_state,
+ rsvp_nxt, &reqs);
_sde_rm_print_rsvps(rm, SDE_RM_STAGE_AFTER_RSVPNEXT);
@@ -1352,3 +1493,92 @@ end:
return ret;
}
+
+static int _sde_rm_get_ctl_lm_for_splash(struct sde_hw_ctl *ctl,
+ int max_lm_cnt, u8 lm_cnt, u8 *lm_ids,
+ struct splash_ctl_top *top, int index)
+{
+ int j;
+ struct splash_lm_hw *lm;
+
+ if (!ctl || !top) {
+ SDE_ERROR("invalid parameters\n");
+ return 0;
+ }
+
+ lm = top->lm;
+ for (j = 0; j < max_lm_cnt; j++) {
+ lm[top->ctl_lm_cnt].lm_reg_value =
+ ctl->ops.read_ctl_layers_for_splash(ctl, j + LM_0);
+
+ if (lm[top->ctl_lm_cnt].lm_reg_value) {
+ lm[top->ctl_lm_cnt].ctl_id = index + CTL_0;
+ lm_ids[lm_cnt++] = j + LM_0;
+ lm[top->ctl_lm_cnt].lm_id = j + LM_0;
+ top->ctl_lm_cnt++;
+ }
+ }
+
+ return top->ctl_lm_cnt;
+}
+
+static void _sde_rm_get_ctl_top_for_splash(struct sde_hw_ctl *ctl,
+ struct splash_ctl_top *top)
+{
+ if (!ctl || !top) {
+ SDE_ERROR("invalid ctl or top\n");
+ return;
+ }
+
+ if (!ctl->ops.read_ctl_top_for_splash) {
+ SDE_ERROR("read_ctl_top not initialized\n");
+ return;
+ }
+
+ top->value = ctl->ops.read_ctl_top_for_splash(ctl);
+ top->intf_sel = (top->value >> 4) & 0xf;
+}
+
+int sde_rm_read_resource_for_splash(struct sde_rm *rm,
+ void *splash_info,
+ struct sde_mdss_cfg *cat)
+{
+ struct sde_rm_hw_iter ctl_iter;
+ int index = 0;
+ struct sde_splash_info *sinfo;
+ struct sde_hw_ctl *ctl;
+
+ if (!rm || !splash_info || !cat)
+ return -EINVAL;
+
+ sinfo = (struct sde_splash_info *)splash_info;
+
+ sde_rm_init_hw_iter(&ctl_iter, 0, SDE_HW_BLK_CTL);
+
+ while (_sde_rm_get_hw_locked(rm, &ctl_iter)) {
+ ctl = (struct sde_hw_ctl *)ctl_iter.hw;
+
+ _sde_rm_get_ctl_top_for_splash(ctl,
+ &sinfo->res.top[index]);
+
+ if (sinfo->res.top[index].intf_sel) {
+ sinfo->res.lm_cnt +=
+ _sde_rm_get_ctl_lm_for_splash(ctl,
+ cat->mixer_count,
+ sinfo->res.lm_cnt,
+ sinfo->res.lm_ids,
+ &sinfo->res.top[index], index);
+
+ sinfo->res.ctl_ids[sinfo->res.ctl_top_cnt] =
+ index + CTL_0;
+
+ sinfo->res.ctl_top_cnt++;
+ }
+ index++;
+ }
+
+ SDE_DEBUG("%s: ctl_top_cnt=%d, lm_cnt=%d\n", __func__,
+ sinfo->res.ctl_top_cnt, sinfo->res.lm_cnt);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_rm.h b/drivers/gpu/drm/msm/sde/sde_rm.h
index 87e95bfebe98..bec398a3b996 100644
--- a/drivers/gpu/drm/msm/sde/sde_rm.h
+++ b/drivers/gpu/drm/msm/sde/sde_rm.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -212,4 +212,14 @@ int sde_rm_check_property_topctl(uint64_t val);
*/
int sde_rm_check_property_topctl(uint64_t val);
+/**
+ * sde_rm_read_resource_for_splash - read splash resource used in bootloader
+ * @rm: SDE Resource Manager handle
+ * @sinfo: handle for splash info
+ * @cat: Pointer to hardware catalog
+ */
+int sde_rm_read_resource_for_splash(struct sde_rm *rm,
+ void *sinfo,
+ struct sde_mdss_cfg *cat);
+
#endif /* __SDE_RM_H__ */
diff --git a/drivers/gpu/drm/msm/sde/sde_splash.c b/drivers/gpu/drm/msm/sde/sde_splash.c
index 19e6406600cd..9c3964e99c1f 100644
--- a/drivers/gpu/drm/msm/sde/sde_splash.c
+++ b/drivers/gpu/drm/msm/sde/sde_splash.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -22,7 +22,9 @@
#include "sde_hw_util.h"
#include "sde_hw_intf.h"
#include "sde_hw_catalog.h"
+#include "sde_rm.h"
#include "dsi_display.h"
+#include "sde_hdmi.h"
#define MDP_SSPP_TOP0_OFF 0x1000
#define DISP_INTF_SEL 0x004
@@ -34,10 +36,12 @@
#define SCRATCH_REGISTER_2 0x01C
#define SDE_LK_RUNNING_VALUE 0xC001CAFE
-#define SDE_LK_SHUT_DOWN_VALUE 0xDEADDEAD
+#define SDE_LK_STOP_SPLASH_VALUE 0xDEADDEAD
#define SDE_LK_EXIT_VALUE 0xDEADBEEF
-#define SDE_LK_EXIT_MAX_LOOP 20
+#define INTF_HDMI_SEL (BIT(25) | BIT(24))
+#define INTF_DSI0_SEL BIT(8)
+#define INTF_DSI1_SEL BIT(16)
static DEFINE_MUTEX(sde_splash_lock);
@@ -184,26 +188,14 @@ static bool _sde_splash_lk_check(struct sde_hw_intr *intr)
}
/**
- * _sde_splash_notify_lk_to_exit.
+ * _sde_splash_notify_lk_stop_splash.
*
- * Function to monitor LK's status and tell it to exit.
+ * Function to stop early splash in LK.
*/
-static void _sde_splash_notify_lk_exit(struct sde_hw_intr *intr)
+static inline void _sde_splash_notify_lk_stop_splash(struct sde_hw_intr *intr)
{
- int i = 0;
-
- /* first is to write exit signal to scratch register*/
- SDE_REG_WRITE(&intr->hw, SCRATCH_REGISTER_1, SDE_LK_SHUT_DOWN_VALUE);
-
- while ((SDE_LK_EXIT_VALUE !=
- SDE_REG_READ(&intr->hw, SCRATCH_REGISTER_1)) &&
- (++i < SDE_LK_EXIT_MAX_LOOP)) {
- DRM_INFO("wait for LK's exit");
- msleep(20);
- }
-
- if (i == SDE_LK_EXIT_MAX_LOOP)
- SDE_ERROR("Loop LK's exit failed\n");
+ /* write splash stop signal to scratch register*/
+ SDE_REG_WRITE(&intr->hw, SCRATCH_REGISTER_1, SDE_LK_STOP_SPLASH_VALUE);
}
static int _sde_splash_gem_new(struct drm_device *dev,
@@ -283,6 +275,44 @@ static void _sde_splash_destroy_splash_node(struct sde_splash_info *sinfo)
sinfo->splash_mem_size = NULL;
}
+static void _sde_splash_sent_pipe_update_uevent(struct sde_kms *sde_kms)
+{
+ char *event_string;
+ char *envp[2];
+ struct drm_device *dev;
+ struct device *kdev;
+ int i = 0;
+
+ if (!sde_kms || !sde_kms->dev) {
+ DRM_ERROR("invalid input\n");
+ return;
+ }
+
+ dev = sde_kms->dev;
+ kdev = dev->primary->kdev;
+
+ event_string = kzalloc(SZ_4K, GFP_KERNEL);
+ if (!event_string) {
+ SDE_ERROR("failed to allocate event string\n");
+ return;
+ }
+
+ for (i = 0; i < MAX_BLOCKS; i++) {
+ if (sde_kms->splash_info.reserved_pipe_info[i] != 0xFFFFFFFF)
+ snprintf(event_string, SZ_4K, "pipe%d avialable",
+ sde_kms->splash_info.reserved_pipe_info[i]);
+ }
+
+ DRM_INFO("generating pipe update event[%s]", event_string);
+
+ envp[0] = event_string;
+ envp[1] = NULL;
+
+ kobject_uevent_env(&kdev->kobj, KOBJ_CHANGE, envp);
+
+ kfree(event_string);
+}
+
static void _sde_splash_get_connector_ref_cnt(struct sde_splash_info *sinfo,
u32 *hdmi_cnt, u32 *dsi_cnt)
{
@@ -292,30 +322,60 @@ static void _sde_splash_get_connector_ref_cnt(struct sde_splash_info *sinfo,
mutex_unlock(&sde_splash_lock);
}
-static int _sde_splash_free_resource(struct msm_mmu *mmu,
- struct sde_splash_info *sinfo, enum splash_connector_type conn)
+static int _sde_splash_free_module_resource(struct msm_mmu *mmu,
+ struct sde_splash_info *sinfo)
{
- struct msm_gem_object *msm_obj = to_msm_bo(sinfo->obj[conn]);
+ int i = 0;
+ struct msm_gem_object *msm_obj;
- if (!msm_obj)
- return -EINVAL;
+ for (i = 0; i < sinfo->splash_mem_num; i++) {
+ msm_obj = to_msm_bo(sinfo->obj[i]);
+
+ if (!msm_obj)
+ return -EINVAL;
- if (mmu->funcs && mmu->funcs->unmap)
- mmu->funcs->unmap(mmu, sinfo->splash_mem_paddr[conn],
- msm_obj->sgt, NULL);
+ if (mmu->funcs && mmu->funcs->unmap)
+ mmu->funcs->early_splash_unmap(mmu,
+ sinfo->splash_mem_paddr[i], msm_obj->sgt);
- _sde_splash_free_bootup_memory_to_system(sinfo->splash_mem_paddr[conn],
- sinfo->splash_mem_size[conn]);
+ _sde_splash_free_bootup_memory_to_system(
+ sinfo->splash_mem_paddr[i],
+ sinfo->splash_mem_size[i]);
- _sde_splash_destroy_gem_object(msm_obj);
+ _sde_splash_destroy_gem_object(msm_obj);
+ }
return 0;
}
+static bool _sde_splash_validate_commit(struct sde_kms *sde_kms,
+ struct drm_atomic_state *state)
+{
+ int i, nplanes;
+ struct drm_plane *plane;
+ struct drm_device *dev = sde_kms->dev;
+
+ nplanes = dev->mode_config.num_total_plane;
+
+ for (i = 0; i < nplanes; i++) {
+ plane = state->planes[i];
+
+ /*
+ * As plane state has been swapped, we need to check
+ * fb in state->planes, not fb in state->plane_state.
+ */
+ if (plane && plane->fb)
+ return true;
+ }
+
+ return false;
+}
+
__ref int sde_splash_init(struct sde_power_handle *phandle, struct msm_kms *kms)
{
struct sde_kms *sde_kms;
struct sde_splash_info *sinfo;
+ int ret = 0;
int i = 0;
if (!phandle || !kms) {
@@ -329,12 +389,13 @@ __ref int sde_splash_init(struct sde_power_handle *phandle, struct msm_kms *kms)
sinfo->dsi_connector_cnt = 0;
sinfo->hdmi_connector_cnt = 0;
+ /* Vote data bus after splash is enabled in bootloader */
sde_power_data_bus_bandwidth_ctrl(phandle,
sde_kms->core_client, true);
for (i = 0; i < sinfo->splash_mem_num; i++) {
if (!memblock_is_reserved(sinfo->splash_mem_paddr[i])) {
- SDE_ERROR("failed to reserve memory\n");
+ SDE_ERROR("LK's splash memory is not reserved\n");
/* withdraw the vote when failed. */
sde_power_data_bus_bandwidth_ctrl(phandle,
@@ -344,7 +405,10 @@ __ref int sde_splash_init(struct sde_power_handle *phandle, struct msm_kms *kms)
}
}
- return 0;
+ ret = sde_rm_read_resource_for_splash(&sde_kms->rm,
+ (void *)sinfo, sde_kms->catalog);
+
+ return ret;
}
void sde_splash_destroy(struct sde_splash_info *sinfo,
@@ -372,12 +436,12 @@ void sde_splash_destroy(struct sde_splash_info *sinfo,
}
/*
- * sde_splash_parse_dt.
+ * sde_splash_parse_memory_dt.
* In the function, it will parse and reserve two kinds of memory node.
* First is to get the reserved memory for display buffers.
- * Second is to get the memory node LK's code stack is running on.
+ * Second is to get the memory node which LK's heap memory is running on.
*/
-int sde_splash_parse_dt(struct drm_device *dev)
+int sde_splash_parse_memory_dt(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
struct sde_kms *sde_kms;
@@ -404,6 +468,79 @@ int sde_splash_parse_dt(struct drm_device *dev)
return 0;
}
+static inline u32 _sde_splash_parse_sspp_id(struct sde_mdss_cfg *cfg,
+ const char *name)
+{
+ int i;
+
+ for (i = 0; i < cfg->sspp_count; i++) {
+ if (!strcmp(cfg->sspp[i].name, name))
+ return cfg->sspp[i].id;
+ }
+
+ return 0;
+}
+
+int sde_splash_parse_reserved_plane_dt(struct sde_splash_info *splash_info,
+ struct sde_mdss_cfg *cfg)
+{
+ struct device_node *parent, *node;
+ struct property *prop;
+ const char *cname;
+ int ret = 0, i = 0;
+
+ if (!splash_info || !cfg)
+ return -EINVAL;
+
+ parent = of_find_node_by_path("/qcom,sde-reserved-plane");
+ if (!parent)
+ return -EINVAL;
+
+ for (i = 0; i < MAX_BLOCKS; i++)
+ splash_info->reserved_pipe_info[i] = 0xFFFFFFFF;
+
+ i = 0;
+ for_each_child_of_node(parent, node) {
+ if (i >= MAX_BLOCKS) {
+ SDE_ERROR("num of nodes(%d) is bigger than max(%d)\n",
+ i, MAX_BLOCKS);
+ ret = -EINVAL;
+ goto parent_node_err;
+ }
+
+ of_property_for_each_string(node, "qcom,plane-name",
+ prop, cname)
+ splash_info->reserved_pipe_info[i] =
+ _sde_splash_parse_sspp_id(cfg, cname);
+ i++;
+ }
+
+parent_node_err:
+ of_node_put(parent);
+
+ return ret;
+}
+
+bool sde_splash_query_plane_is_reserved(struct sde_splash_info *sinfo,
+ uint32_t pipe)
+{
+ int i = 0;
+
+ if (!sinfo)
+ return false;
+
+ /* early return if no splash is enabled */
+ if (!sinfo->handoff)
+ return false;
+
+ for (i = 0; i < MAX_BLOCKS; i++) {
+ if (sinfo->reserved_pipe_info[i] == pipe)
+ return true;
+ }
+
+ return false;
+}
+
int sde_splash_get_handoff_status(struct msm_kms *kms)
{
uint32_t intf_sel = 0;
@@ -448,17 +585,20 @@ int sde_splash_get_handoff_status(struct msm_kms *kms)
* considered as single display. So decrement
* 'num_of_display_on' by 1
*/
- if (split_display)
+ if (split_display) {
num_of_display_on--;
+ sinfo->split_is_enabled = true;
+ }
}
if (num_of_display_on) {
sinfo->handoff = true;
- sinfo->program_scratch_regs = true;
+ sinfo->display_splash_enabled = true;
sinfo->lk_is_exited = false;
+ sinfo->intf_sel_status = intf_sel;
} else {
sinfo->handoff = false;
- sinfo->program_scratch_regs = false;
+ sinfo->display_splash_enabled = false;
sinfo->lk_is_exited = true;
}
@@ -489,8 +629,9 @@ int sde_splash_smmu_map(struct drm_device *dev, struct msm_mmu *mmu,
msm_obj = to_msm_bo(sinfo->obj[i]);
if (mmu->funcs && mmu->funcs->map) {
- ret = mmu->funcs->map(mmu, sinfo->splash_mem_paddr[i],
- msm_obj->sgt, IOMMU_READ | IOMMU_NOEXEC, NULL);
+ ret = mmu->funcs->early_splash_map(mmu,
+ sinfo->splash_mem_paddr[i], msm_obj->sgt,
+ IOMMU_READ | IOMMU_NOEXEC);
if (!ret) {
SDE_ERROR("Map blk %d @%pK failed.\n",
@@ -503,6 +644,71 @@ int sde_splash_smmu_map(struct drm_device *dev, struct msm_mmu *mmu,
return ret ? 0 : -ENOMEM;
}
+static bool _sde_splash_get_panel_intf_status(struct sde_splash_info *sinfo,
+ const char *display_name, int connector_type)
+{
+ bool ret = false;
+ int intf_status = 0;
+
+ if (sinfo && sinfo->handoff) {
+ if (connector_type == DRM_MODE_CONNECTOR_DSI) {
+ if (!strcmp(display_name, "dsi_adv_7533_1")) {
+ if (sinfo->intf_sel_status & INTF_DSI0_SEL)
+ ret = true;
+ } else if (!strcmp(display_name, "dsi_adv_7533_2")) {
+ if (sinfo->intf_sel_status & INTF_DSI1_SEL)
+ ret = true;
+ } else
+ DRM_INFO("wrong display name %s\n",
+ display_name);
+ } else if (connector_type == DRM_MODE_CONNECTOR_HDMIA) {
+ intf_status = sinfo->intf_sel_status & INTF_HDMI_SEL;
+ ret = (intf_status == INTF_HDMI_SEL);
+ }
+ }
+
+ return ret;
+}
+
+int sde_splash_setup_display_resource(struct sde_splash_info *sinfo,
+ void *disp, int connector_type)
+{
+ if (!sinfo || !disp)
+ return -EINVAL;
+
+ /* early return if splash is not enabled in bootloader */
+ if (!sinfo->handoff)
+ return 0;
+
+ if (connector_type == DRM_MODE_CONNECTOR_DSI) {
+ struct dsi_display *display = (struct dsi_display *)disp;
+
+ display->cont_splash_enabled =
+ _sde_splash_get_panel_intf_status(sinfo,
+ display->name,
+ connector_type);
+
+ DRM_INFO("DSI splash %s\n",
+ display->cont_splash_enabled ? "enabled" : "disabled");
+
+ if (display->cont_splash_enabled) {
+ if (dsi_dsiplay_setup_splash_resource(display))
+ return -EINVAL;
+ }
+ } else if (connector_type == DRM_MODE_CONNECTOR_HDMIA) {
+ struct sde_hdmi *sde_hdmi = (struct sde_hdmi *)disp;
+
+ sde_hdmi->cont_splash_enabled =
+ _sde_splash_get_panel_intf_status(sinfo,
+ NULL, connector_type);
+
+ DRM_INFO("HDMI splash %s\n",
+ sde_hdmi->cont_splash_enabled ? "enabled" : "disabled");
+ }
+
+ return 0;
+}
+
void sde_splash_setup_connector_count(struct sde_splash_info *sinfo,
int connector_type)
{
@@ -518,20 +724,31 @@ void sde_splash_setup_connector_count(struct sde_splash_info *sinfo,
}
}
-bool sde_splash_get_lk_complete_status(struct sde_splash_info *sinfo)
+bool sde_splash_get_lk_complete_status(struct msm_kms *kms)
{
- bool ret = 0;
+ struct sde_kms *sde_kms = to_sde_kms(kms);
+ struct sde_hw_intr *intr;
- mutex_lock(&sde_splash_lock);
- ret = !sinfo->handoff && !sinfo->lk_is_exited;
- mutex_unlock(&sde_splash_lock);
+ if (!sde_kms || !sde_kms->hw_intr) {
+ SDE_ERROR("invalid kms\n");
+ return false;
+ }
- return ret;
+ intr = sde_kms->hw_intr;
+
+ if (sde_kms->splash_info.handoff &&
+ SDE_LK_EXIT_VALUE == SDE_REG_READ(&intr->hw,
+ SCRATCH_REGISTER_1)) {
+ SDE_DEBUG("LK totoally exits\n");
+ return true;
+ }
+
+ return false;
}
-int sde_splash_clean_up_free_resource(struct msm_kms *kms,
- struct sde_power_handle *phandle,
- int connector_type, void *display)
+int sde_splash_free_resource(struct msm_kms *kms,
+ struct sde_power_handle *phandle,
+ int connector_type, void *display)
{
struct sde_kms *sde_kms;
struct sde_splash_info *sinfo;
@@ -554,65 +771,76 @@ int sde_splash_clean_up_free_resource(struct msm_kms *kms,
return -EINVAL;
}
+ /* Get connector number where the early splash in on. */
_sde_splash_get_connector_ref_cnt(sinfo, &hdmi_conn_count,
&dsi_conn_count);
mutex_lock(&sde_splash_lock);
- if (hdmi_conn_count == 0 && dsi_conn_count == 0 &&
- !sinfo->lk_is_exited) {
- /* When both hdmi's and dsi's handoff are finished,
- * 1. Destroy splash node objects.
- * 2. Release the memory which LK's stack is running on.
- * 3. Withdraw AHB data bus bandwidth voting.
- */
- DRM_INFO("HDMI and DSI resource handoff is completed\n");
+ if (!sinfo->handoff) {
+ mutex_unlock(&sde_splash_lock);
+ return 0;
+ }
- sinfo->lk_is_exited = true;
+ /*
+ * Start to free all LK's resource till user commit happens
+ * on each display which early splash is enabled on.
+ */
+ if (hdmi_conn_count == 0 && dsi_conn_count == 0) {
+ mmu = sde_kms->aspace[0]->mmu;
+ if (!mmu) {
+ mutex_unlock(&sde_splash_lock);
+ return -EINVAL;
+ }
+
+ /* free HDMI's, DSI's and early camera's reserved memory */
+ _sde_splash_free_module_resource(mmu, sinfo);
_sde_splash_destroy_splash_node(sinfo);
+ /* free lk_pool heap memory */
_sde_splash_free_bootup_memory_to_system(sinfo->lk_pool_paddr,
- sinfo->lk_pool_size);
+ sinfo->lk_pool_size);
+ /* withdraw data bus vote */
sde_power_data_bus_bandwidth_ctrl(phandle,
- sde_kms->core_client, false);
+ sde_kms->core_client, false);
+ /*
+ * Turn off MDP core power to keep power on/off operations
+ * be matched, as MDP core power is enabled already when
+ * early splash is enabled.
+ */
+ sde_power_resource_enable(phandle,
+ sde_kms->core_client, false);
+
+ /* send uevent to notify user to recycle resource */
+ _sde_splash_sent_pipe_update_uevent(sde_kms);
+
+ /* Finally mark handoff flag to false to say
+ * handoff is complete.
+ */
+ sinfo->handoff = false;
+
+ DRM_INFO("HDMI and DSI resource handoff is completed\n");
mutex_unlock(&sde_splash_lock);
return 0;
}
- mmu = sde_kms->aspace[0]->mmu;
-
+ /*
+ * Ensure user commit happens on different connectors
+ * who has splash.
+ */
switch (connector_type) {
case DRM_MODE_CONNECTOR_HDMIA:
- if (sinfo->hdmi_connector_cnt == 1) {
+ if (sinfo->hdmi_connector_cnt == 1)
sinfo->hdmi_connector_cnt--;
-
- ret = _sde_splash_free_resource(mmu,
- sinfo, SPLASH_HDMI);
- }
break;
case DRM_MODE_CONNECTOR_DSI:
- /*
- * Basically, we have commits coming on two DSI connectors.
- * So when releasing DSI resource, it's ensured that the
- * coming commits should happen on different DSIs, to promise
- * the handoff has finished on the two DSIs, then it's safe
- * to release DSI resource, otherwise, problem happens when
- * freeing memory, while DSI0 or DSI1 is still visiting
- * the memory.
- */
if (strcmp(dsi_display->display_type, "unknown") &&
strcmp(last_commit_display_type,
- dsi_display->display_type)) {
- if (sinfo->dsi_connector_cnt > 1)
- sinfo->dsi_connector_cnt--;
- else if (sinfo->dsi_connector_cnt == 1) {
- ret = _sde_splash_free_resource(mmu,
- sinfo, SPLASH_DSI);
-
+ dsi_display->display_type)) {
+ if (sinfo->dsi_connector_cnt >= 1)
sinfo->dsi_connector_cnt--;
- }
last_commit_display_type = dsi_display->display_type;
}
@@ -620,20 +848,20 @@ int sde_splash_clean_up_free_resource(struct msm_kms *kms,
default:
ret = -EINVAL;
SDE_ERROR("%s: invalid connector_type %d\n",
- __func__, connector_type);
+ __func__, connector_type);
}
mutex_unlock(&sde_splash_lock);
-
return ret;
}
/*
* In below function, it will
- * 1. Notify LK to exit and wait for exiting is done.
+ * 1. Notify LK to stop display splash.
* 2. Set DOMAIN_ATTR_EARLY_MAP to 1 to enable stage 1 translation in iommu.
*/
-int sde_splash_clean_up_exit_lk(struct msm_kms *kms)
+int sde_splash_lk_stop_splash(struct msm_kms *kms,
+ struct drm_atomic_state *state)
{
struct sde_splash_info *sinfo;
struct msm_mmu *mmu;
@@ -649,12 +877,12 @@ int sde_splash_clean_up_exit_lk(struct msm_kms *kms)
/* Monitor LK's status and tell it to exit. */
mutex_lock(&sde_splash_lock);
- if (sinfo->program_scratch_regs) {
+ if (_sde_splash_validate_commit(sde_kms, state) &&
+ sinfo->display_splash_enabled) {
if (_sde_splash_lk_check(sde_kms->hw_intr))
- _sde_splash_notify_lk_exit(sde_kms->hw_intr);
+ _sde_splash_notify_lk_stop_splash(sde_kms->hw_intr);
- sinfo->handoff = false;
- sinfo->program_scratch_regs = false;
+ sinfo->display_splash_enabled = false;
}
mutex_unlock(&sde_splash_lock);
@@ -671,7 +899,8 @@ int sde_splash_clean_up_exit_lk(struct msm_kms *kms)
*/
if (mmu->funcs && mmu->funcs->set_property) {
ret = mmu->funcs->set_property(mmu,
- DOMAIN_ATTR_EARLY_MAP, &sinfo->handoff);
+ DOMAIN_ATTR_EARLY_MAP,
+ &sinfo->display_splash_enabled);
if (ret)
SDE_ERROR("set_property failed\n");
diff --git a/drivers/gpu/drm/msm/sde/sde_splash.h b/drivers/gpu/drm/msm/sde/sde_splash.h
index babf88335e49..c4bb7b08f817 100644
--- a/drivers/gpu/drm/msm/sde/sde_splash.h
+++ b/drivers/gpu/drm/msm/sde/sde_splash.h
@@ -1,5 +1,5 @@
/**
- * Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -15,18 +15,43 @@
#include "msm_kms.h"
#include "msm_mmu.h"
+#include "sde_hw_mdss.h"
enum splash_connector_type {
SPLASH_DSI = 0,
SPLASH_HDMI,
};
+struct splash_lm_hw {
+ u8 lm_id;
+ u8 ctl_id;
+ u32 lm_reg_value;
+};
+
+struct splash_ctl_top {
+ u32 value;
+ u8 intf_sel;
+ u8 ctl_lm_cnt;
+ struct splash_lm_hw lm[LM_MAX - LM_0];
+};
+
+struct sde_res_data {
+ struct splash_ctl_top top[CTL_MAX - CTL_0];
+ u8 ctl_ids[CTL_MAX - CTL_0];
+ u8 lm_ids[LM_MAX - LM_0];
+ u8 ctl_top_cnt;
+ u8 lm_cnt;
+};
+
struct sde_splash_info {
/* handoff flag */
bool handoff;
- /* flag of display scratch registers */
- bool program_scratch_regs;
+ /* current hw configuration */
+ struct sde_res_data res;
+
+ /* flag of display splash status */
+ bool display_splash_enabled;
/* to indicate LK is totally exited */
bool lk_is_exited;
@@ -49,11 +74,20 @@ struct sde_splash_info {
/* memory size of lk pool */
size_t lk_pool_size;
+ /* enabled statue of displays*/
+ uint32_t intf_sel_status;
+
+ /* DSI split enabled flag */
+ bool split_is_enabled;
+
/* registered hdmi connector count */
uint32_t hdmi_connector_cnt;
/* registered dst connector count */
uint32_t dsi_connector_cnt;
+
+ /* reserved pipe info for early RVC */
+ uint32_t reserved_pipe_info[MAX_BLOCKS];
};
/* APIs for early splash handoff functions */
@@ -82,28 +116,46 @@ void sde_splash_setup_connector_count(struct sde_splash_info *sinfo,
int connector_type);
/**
- * sde_splash_clean_up_exit_lk.
+ * sde_splash_lk_stop_splash.
*
- * Tell LK to exit, and clean up the resource.
+ * Tell LK to stop display splash once one valid user commit arrives.
*/
-int sde_splash_clean_up_exit_lk(struct msm_kms *kms);
+int sde_splash_lk_stop_splash(struct msm_kms *kms,
+ struct drm_atomic_state *state);
/**
- * sde_splash_clean_up_free_resource.
+ * sde_splash_free_resource.
*
- * According to input connector_type, free
- * HDMI's and DSI's resource respectively.
+ * To free all LK's resource, including free reserved memory to system,
+ * withdraw data bus vote, disable MDP core power, send uevent to user
+ * to recycle pipe etc.
*/
-int sde_splash_clean_up_free_resource(struct msm_kms *kms,
- struct sde_power_handle *phandle,
- int connector_type, void *display);
+int sde_splash_free_resource(struct msm_kms *kms,
+ struct sde_power_handle *phandle,
+ int connector_type, void *display);
/**
- * sde_splash_parse_dt.
+ * sde_splash_parse_memory_dt.
*
* Parse reserved memory block from DT for early splash.
*/
-int sde_splash_parse_dt(struct drm_device *dev);
+int sde_splash_parse_memory_dt(struct drm_device *dev);
+
+/**
+ * sde_splash_parse_reserved_plane_dt
+ *
+ * Parse reserved plane information from DT for early RVC case.
+ */
+int sde_splash_parse_reserved_plane_dt(struct sde_splash_info *splash_info,
+ struct sde_mdss_cfg *cfg);
+
+/*
+ * sde_splash_query_plane_is_reserved
+ *
+ * Query plane is reserved in dt.
+ */
+bool sde_splash_query_plane_is_reserved(struct sde_splash_info *sinfo,
+ uint32_t pipe);
/**
* sde_splash_smmu_map.
@@ -127,6 +179,13 @@ void sde_splash_destroy(struct sde_splash_info *sinfo,
*
* Get LK's status to check if it has been stopped.
*/
-bool sde_splash_get_lk_complete_status(struct sde_splash_info *sinfo);
+bool sde_splash_get_lk_complete_status(struct msm_kms *kms);
+/**
+ * sde_splash_setup_display_resource
+ *
+ * Setup display resource based on connector type.
+ */
+int sde_splash_setup_display_resource(struct sde_splash_info *sinfo,
+ void *disp, int connector_type);
#endif
diff --git a/drivers/gpu/drm/msm/sde/sde_trace.h b/drivers/gpu/drm/msm/sde/sde_trace.h
index 2a4e6b59a08c..d28562eabccb 100644
--- a/drivers/gpu/drm/msm/sde/sde_trace.h
+++ b/drivers/gpu/drm/msm/sde/sde_trace.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -125,6 +125,22 @@ TRACE_EVENT(sde_cmd_release_bw,
TP_printk("crtc:%d", __entry->crtc_id)
);
+TRACE_EVENT(sde_encoder_underrun,
+ TP_PROTO(u32 enc_id, u32 underrun_cnt),
+ TP_ARGS(enc_id, underrun_cnt),
+ TP_STRUCT__entry(
+ __field(u32, enc_id)
+ __field(u32, underrun_cnt)
+ ),
+ TP_fast_assign(
+ __entry->enc_id = enc_id;
+ __entry->underrun_cnt = underrun_cnt;
+
+ ),
+ TP_printk("enc:%d underrun_cnt:%d", __entry->enc_id,
+ __entry->underrun_cnt)
+);
+
TRACE_EVENT(sde_mark_write,
TP_PROTO(int pid, const char *name, bool trace_begin),
TP_ARGS(pid, name, trace_begin),
diff --git a/drivers/gpu/drm/msm/sde_dbg.c b/drivers/gpu/drm/msm/sde_dbg.c
index ea133619cf62..93c4c1e27b0d 100644
--- a/drivers/gpu/drm/msm/sde_dbg.c
+++ b/drivers/gpu/drm/msm/sde_dbg.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2009-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2009-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -57,6 +57,9 @@
/* print debug ranges in groups of 4 u32s */
#define REG_DUMP_ALIGN 16
+#define DBG_CTRL_STOP_FTRACE BIT(0)
+#define DBG_CTRL_PANIC_UNDERRUN BIT(1)
+#define DBG_CTRL_MAX BIT(2)
/**
* struct sde_dbg_reg_offset - tracking for start and end of region
@@ -162,6 +165,7 @@ struct sde_dbg_vbif_debug_bus {
* @enable_reg_dump: whether to dump registers into memory, kernel log, or both
* @dbgbus_sde: debug bus structure for the sde
* @dbgbus_vbif_rt: debug bus structure for the realtime vbif
+ * @dump_all: dump all entries in register dump
*/
static struct sde_dbg_base {
struct sde_dbg_evtlog *evtlog;
@@ -180,6 +184,8 @@ static struct sde_dbg_base {
struct sde_dbg_sde_debug_bus dbgbus_sde;
struct sde_dbg_vbif_debug_bus dbgbus_vbif_rt;
+ bool dump_all;
+ u32 debugfs_ctrl;
} sde_dbg_base;
/* sde_dbg_base_evtlog - global pointer to main sde event log for macro use */
@@ -1448,7 +1454,7 @@ static void _sde_dbg_dump_vbif_dbg_bus(struct sde_dbg_vbif_debug_bus *bus)
*/
static void _sde_dump_array(struct sde_dbg_reg_base *blk_arr[],
u32 len, bool do_panic, const char *name, bool dump_dbgbus_sde,
- bool dump_dbgbus_vbif_rt)
+ bool dump_dbgbus_vbif_rt, bool dump_all)
{
int i;
@@ -1460,7 +1466,8 @@ static void _sde_dump_array(struct sde_dbg_reg_base *blk_arr[],
sde_dbg_base.enable_reg_dump);
}
- sde_evtlog_dump_all(sde_dbg_base.evtlog);
+ if (dump_all)
+ sde_evtlog_dump_all(sde_dbg_base.evtlog);
if (dump_dbgbus_sde)
_sde_dbg_dump_sde_dbg_bus(&sde_dbg_base.dbgbus_sde);
@@ -1484,7 +1491,8 @@ static void _sde_dump_work(struct work_struct *work)
ARRAY_SIZE(sde_dbg_base.req_dump_blks),
sde_dbg_base.work_panic, "evtlog_workitem",
sde_dbg_base.dbgbus_sde.cmn.include_in_deferred_work,
- sde_dbg_base.dbgbus_vbif_rt.cmn.include_in_deferred_work);
+ sde_dbg_base.dbgbus_vbif_rt.cmn.include_in_deferred_work,
+ sde_dbg_base.dump_all);
}
void sde_dbg_dump(bool queue_work, const char *name, ...)
@@ -1493,6 +1501,7 @@ void sde_dbg_dump(bool queue_work, const char *name, ...)
bool do_panic = false;
bool dump_dbgbus_sde = false;
bool dump_dbgbus_vbif_rt = false;
+ bool dump_all = false;
va_list args;
char *blk_name = NULL;
struct sde_dbg_reg_base *blk_base = NULL;
@@ -1510,6 +1519,7 @@ void sde_dbg_dump(bool queue_work, const char *name, ...)
memset(sde_dbg_base.req_dump_blks, 0,
sizeof(sde_dbg_base.req_dump_blks));
+ sde_dbg_base.dump_all = false;
va_start(args, name);
i = 0;
@@ -1531,6 +1541,8 @@ void sde_dbg_dump(bool queue_work, const char *name, ...)
blk_name);
}
}
+ if (!strcmp(blk_name, "all"))
+ dump_all = true;
if (!strcmp(blk_name, "dbg_bus"))
dump_dbgbus_sde = true;
@@ -1550,13 +1562,53 @@ void sde_dbg_dump(bool queue_work, const char *name, ...)
dump_dbgbus_sde;
sde_dbg_base.dbgbus_vbif_rt.cmn.include_in_deferred_work =
dump_dbgbus_vbif_rt;
+ sde_dbg_base.dump_all = dump_all;
schedule_work(&sde_dbg_base.dump_work);
} else {
_sde_dump_array(blk_arr, blk_len, do_panic, name,
- dump_dbgbus_sde, dump_dbgbus_vbif_rt);
+ dump_dbgbus_sde, dump_dbgbus_vbif_rt, dump_all);
}
}
+void sde_dbg_ctrl(const char *name, ...)
+{
+ int i = 0;
+ va_list args;
+ char *blk_name = NULL;
+
+
+ /* no debugfs controlled events are enabled, just return */
+ if (!sde_dbg_base.debugfs_ctrl)
+ return;
+
+ va_start(args, name);
+
+ while ((blk_name = va_arg(args, char*))) {
+ if (i++ >= SDE_EVTLOG_MAX_DATA) {
+ pr_err("could not parse all dbg arguments\n");
+ break;
+ }
+
+ if (IS_ERR_OR_NULL(blk_name))
+ break;
+
+ if (!strcmp(blk_name, "stop_ftrace") &&
+ sde_dbg_base.debugfs_ctrl &
+ DBG_CTRL_STOP_FTRACE) {
+ pr_debug("tracing off\n");
+ tracing_off();
+ }
+
+ if (!strcmp(blk_name, "panic_underrun") &&
+ sde_dbg_base.debugfs_ctrl &
+ DBG_CTRL_PANIC_UNDERRUN) {
+ pr_debug("panic underrun\n");
+ panic("underrun");
+ }
+ }
+
+}
+
/*
* sde_dbg_debugfs_open - debugfs open handler for evtlog dump
* @inode: debugfs inode
@@ -1564,6 +1616,9 @@ void sde_dbg_dump(bool queue_work, const char *name, ...)
*/
static int sde_dbg_debugfs_open(struct inode *inode, struct file *file)
{
+ if (!inode || !file)
+ return -EINVAL;
+
/* non-seekable */
file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
file->private_data = inode->i_private;
@@ -1583,8 +1638,16 @@ static ssize_t sde_evtlog_dump_read(struct file *file, char __user *buff,
ssize_t len = 0;
char evtlog_buf[SDE_EVTLOG_BUF_MAX];
+ if (!buff || !ppos)
+ return -EINVAL;
+
len = sde_evtlog_dump_to_buffer(sde_dbg_base.evtlog, evtlog_buf,
- SDE_EVTLOG_BUF_MAX);
+ SDE_EVTLOG_BUF_MAX, true);
+ if (len < 0 || len > count) {
+ pr_err("len is more than user buffer size");
+ return 0;
+ }
+
if (copy_to_user(buff, evtlog_buf, len))
return -EFAULT;
*ppos += len;
@@ -1621,6 +1684,82 @@ static const struct file_operations sde_evtlog_fops = {
.write = sde_evtlog_dump_write,
};
+/**
+ * sde_dbg_ctrl_read - debugfs read handler for debug ctrl read
+ * @file: file handler
+ * @buff: user buffer content for debugfs
+ * @count: size of user buffer
+ * @ppos: position offset of user buffer
+ */
+static ssize_t sde_dbg_ctrl_read(struct file *file, char __user *buff,
+ size_t count, loff_t *ppos)
+{
+ ssize_t len = 0;
+ char buf[24] = {'\0'};
+
+ if (!buff || !ppos)
+ return -EINVAL;
+
+ if (*ppos)
+ return 0; /* the end */
+
+ len = snprintf(buf, sizeof(buf), "0x%x\n", sde_dbg_base.debugfs_ctrl);
+ pr_debug("%s: ctrl:0x%x len:0x%zx\n",
+ __func__, sde_dbg_base.debugfs_ctrl, len);
+
+ if ((count < sizeof(buf)) || copy_to_user(buff, buf, len)) {
+ pr_err("error copying the buffer! count:0x%zx\n", count);
+ return -EFAULT;
+ }
+
+ *ppos += len; /* increase offset */
+ return len;
+}
+
+/**
+ * sde_dbg_ctrl_write - debugfs read handler for debug ctrl write
+ * @file: file handler
+ * @user_buf: user buffer content from debugfs
+ * @count: size of user buffer
+ * @ppos: position offset of user buffer
+ */
+static ssize_t sde_dbg_ctrl_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ u32 dbg_ctrl = 0;
+ char buf[24];
+
+ if (!file) {
+ pr_err("DbgDbg: %s: error no file --\n", __func__);
+ return -EINVAL;
+ }
+
+ if (count >= sizeof(buf))
+ return -EFAULT;
+
+
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+
+ buf[count] = 0; /* end of string */
+
+ if (kstrtouint(buf, 0, &dbg_ctrl)) {
+ pr_err("%s: error in the number of bytes\n", __func__);
+ return -EFAULT;
+ }
+
+ pr_debug("dbg_ctrl_read:0x%x\n", dbg_ctrl);
+ sde_dbg_base.debugfs_ctrl = dbg_ctrl;
+
+ return count;
+}
+
+static const struct file_operations sde_dbg_ctrl_fops = {
+ .open = sde_dbg_debugfs_open,
+ .read = sde_dbg_ctrl_read,
+ .write = sde_dbg_ctrl_write,
+};
+
void sde_dbg_init_dbg_buses(u32 hwversion)
{
static struct sde_dbg_base *dbg = &sde_dbg_base;
@@ -1695,6 +1834,8 @@ int sde_dbg_init(struct dentry *debugfs_root, struct device *dev,
for (i = 0; i < SDE_EVTLOG_ENTRY; i++)
sde_dbg_base.evtlog->logs[i].counter = i;
+ debugfs_create_file("dbg_ctrl", 0600, sde_dbg_base.root, NULL,
+ &sde_dbg_ctrl_fops);
debugfs_create_file("dump", 0600, sde_dbg_base.root, NULL,
&sde_evtlog_fops);
debugfs_create_u32("enable", 0600, sde_dbg_base.root,
@@ -1736,7 +1877,14 @@ void sde_dbg_destroy(void)
*/
static int sde_dbg_reg_base_release(struct inode *inode, struct file *file)
{
- struct sde_dbg_reg_base *dbg = file->private_data;
+ struct sde_dbg_reg_base *dbg;
+
+ if (!file)
+ return -EINVAL;
+
+ dbg = file->private_data;
+ if (!dbg)
+ return -ENODEV;
mutex_lock(&sde_dbg_base.mutex);
if (dbg && dbg->buf) {
@@ -1760,12 +1908,16 @@ static int sde_dbg_reg_base_release(struct inode *inode, struct file *file)
static ssize_t sde_dbg_reg_base_offset_write(struct file *file,
const char __user *user_buf, size_t count, loff_t *ppos)
{
- struct sde_dbg_reg_base *dbg = file->private_data;
+ struct sde_dbg_reg_base *dbg;
u32 off = 0;
u32 cnt = DEFAULT_BASE_REG_CNT;
char buf[24];
ssize_t rc = count;
+ if (!file)
+ return -EINVAL;
+
+ dbg = file->private_data;
if (!dbg)
return -ENODEV;
@@ -1799,6 +1951,9 @@ static ssize_t sde_dbg_reg_base_offset_write(struct file *file,
goto exit;
}
+ if (cnt == 0)
+ return -EINVAL;
+
dbg->off = off;
dbg->cnt = cnt;
@@ -1819,17 +1974,29 @@ exit:
static ssize_t sde_dbg_reg_base_offset_read(struct file *file,
char __user *buff, size_t count, loff_t *ppos)
{
- struct sde_dbg_reg_base *dbg = file->private_data;
+ struct sde_dbg_reg_base *dbg;
int len = 0;
char buf[24] = {'\0'};
+ if (!file)
+ return -EINVAL;
+
+ dbg = file->private_data;
if (!dbg)
return -ENODEV;
+ if (!ppos)
+ return -EINVAL;
+
if (*ppos)
return 0; /* the end */
mutex_lock(&sde_dbg_base.mutex);
+ if (dbg->off % sizeof(u32)) {
+ mutex_unlock(&sde_dbg_base.mutex);
+ return -EFAULT;
+ }
+
len = snprintf(buf, sizeof(buf), "0x%08zx %zx\n", dbg->off, dbg->cnt);
if (len < 0 || len >= sizeof(buf)) {
mutex_unlock(&sde_dbg_base.mutex);
@@ -1857,11 +2024,15 @@ static ssize_t sde_dbg_reg_base_offset_read(struct file *file,
static ssize_t sde_dbg_reg_base_reg_write(struct file *file,
const char __user *user_buf, size_t count, loff_t *ppos)
{
- struct sde_dbg_reg_base *dbg = file->private_data;
+ struct sde_dbg_reg_base *dbg;
size_t off;
u32 data, cnt;
char buf[24];
+ if (!file)
+ return -EINVAL;
+
+ dbg = file->private_data;
if (!dbg)
return -ENODEV;
@@ -1907,14 +2078,21 @@ static ssize_t sde_dbg_reg_base_reg_write(struct file *file,
static ssize_t sde_dbg_reg_base_reg_read(struct file *file,
char __user *user_buf, size_t count, loff_t *ppos)
{
- struct sde_dbg_reg_base *dbg = file->private_data;
+ struct sde_dbg_reg_base *dbg;
size_t len;
+ if (!file)
+ return -EINVAL;
+
+ dbg = file->private_data;
if (!dbg) {
pr_err("invalid handle\n");
return -ENODEV;
}
+ if (!ppos)
+ return -EINVAL;
+
mutex_lock(&sde_dbg_base.mutex);
if (!dbg->buf) {
char *hwbuf;
diff --git a/drivers/gpu/drm/msm/sde_dbg.h b/drivers/gpu/drm/msm/sde_dbg.h
index 74fd4c94b490..ce36cba08039 100644
--- a/drivers/gpu/drm/msm/sde_dbg.h
+++ b/drivers/gpu/drm/msm/sde_dbg.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -17,9 +17,10 @@
#include <linux/debugfs.h>
#include <linux/list.h>
-#define SDE_EVTLOG_DATA_LIMITER (-1)
+#define SDE_EVTLOG_DATA_LIMITER (0xC0DEBEEF)
#define SDE_EVTLOG_FUNC_ENTRY 0x1111
#define SDE_EVTLOG_FUNC_EXIT 0x2222
+#define SDE_EVTLOG_ERROR 0xebad
#define SDE_DBG_DUMP_DATA_LIMITER (NULL)
@@ -52,7 +53,7 @@ enum sde_dbg_dump_flag {
* number must be greater than print entry to prevent out of bound evtlog
* entry array access.
*/
-#define SDE_EVTLOG_ENTRY (SDE_EVTLOG_PRINT_ENTRY * 4)
+#define SDE_EVTLOG_ENTRY (SDE_EVTLOG_PRINT_ENTRY * 8)
#define SDE_EVTLOG_MAX_DATA 15
#define SDE_EVTLOG_BUF_MAX 512
#define SDE_EVTLOG_BUF_ALIGN 32
@@ -77,6 +78,7 @@ struct sde_dbg_evtlog {
struct sde_dbg_evtlog_log logs[SDE_EVTLOG_ENTRY];
u32 first;
u32 last;
+ u32 last_dump;
u32 curr;
u32 next;
u32 enable;
@@ -123,6 +125,13 @@ extern struct sde_dbg_evtlog *sde_dbg_base_evtlog;
#define SDE_DBG_DUMP_WQ(...) sde_dbg_dump(true, __func__, ##__VA_ARGS__, \
SDE_DBG_DUMP_DATA_LIMITER)
+/**
+ * SDE_DBG_EVT_CTRL - trigger a different driver events
+ * event: event that trigger different behavior in the driver
+ */
+#define SDE_DBG_CTRL(...) sde_dbg_ctrl(__func__, ##__VA_ARGS__, \
+ SDE_DBG_DUMP_DATA_LIMITER)
+
#if defined(CONFIG_DEBUG_FS)
/**
@@ -172,10 +181,12 @@ bool sde_evtlog_is_enabled(struct sde_dbg_evtlog *evtlog, u32 flag);
* @evtlog: pointer to evtlog
* @evtlog_buf: target buffer to print into
* @evtlog_buf_size: size of target buffer
+ * @update_last_entry:» whether or not to stop at most recent entry
* Returns: number of bytes written to buffer
*/
ssize_t sde_evtlog_dump_to_buffer(struct sde_dbg_evtlog *evtlog,
- char *evtlog_buf, ssize_t evtlog_buf_size);
+ char *evtlog_buf, ssize_t evtlog_buf_size,
+ bool update_last_entry);
/**
* sde_dbg_init_dbg_buses - initialize debug bus dumping support for the chipset
@@ -214,6 +225,15 @@ void sde_dbg_destroy(void);
void sde_dbg_dump(bool queue_work, const char *name, ...);
/**
+ * sde_dbg_ctrl - trigger specific actions for the driver with debugging
+ * purposes. Those actions need to be enabled by the debugfs entry
+ * so the driver executes those actions in the corresponding calls.
+ * @va_args: list of actions to trigger
+ * Returns: none
+ */
+void sde_dbg_ctrl(const char *name, ...);
+
+/**
* sde_dbg_reg_register_base - register a hw register address section for later
* dumping. call this before calling sde_dbg_reg_register_dump_range
* to be able to specify sub-ranges within the base hw range.
@@ -272,7 +292,8 @@ static inline bool sde_evtlog_is_enabled(struct sde_dbg_evtlog *evtlog,
}
static inline ssize_t sde_evtlog_dump_to_buffer(struct sde_dbg_evtlog *evtlog,
- char *evtlog_buf, ssize_t evtlog_buf_size)
+ char *evtlog_buf, ssize_t evtlog_buf_size,
+ bool update_last_entry)
{
return 0;
}
@@ -295,6 +316,10 @@ static inline void sde_dbg_dump(bool queue_work, const char *name, ...)
{
}
+static inline void sde_dbg_ctrl(const char *name, ...)
+{
+}
+
static inline int sde_dbg_reg_register_base(const char *name,
void __iomem *base, size_t max_offset)
{
diff --git a/drivers/gpu/drm/msm/sde_dbg_evtlog.c b/drivers/gpu/drm/msm/sde_dbg_evtlog.c
index 759bdab48840..70ba127ceb08 100644
--- a/drivers/gpu/drm/msm/sde_dbg_evtlog.c
+++ b/drivers/gpu/drm/msm/sde_dbg_evtlog.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -75,7 +75,8 @@ void sde_evtlog_log(struct sde_dbg_evtlog *evtlog, const char *name, int line,
}
/* always dump the last entries which are not dumped yet */
-static bool _sde_evtlog_dump_calc_range(struct sde_dbg_evtlog *evtlog)
+static bool _sde_evtlog_dump_calc_range(struct sde_dbg_evtlog *evtlog,
+ bool update_last_entry)
{
bool need_dump = true;
unsigned long flags;
@@ -87,21 +88,26 @@ static bool _sde_evtlog_dump_calc_range(struct sde_dbg_evtlog *evtlog)
evtlog->first = evtlog->next;
- if (evtlog->last == evtlog->first) {
+ if (update_last_entry)
+ evtlog->last_dump = evtlog->last;
+
+ if (evtlog->last_dump == evtlog->first) {
need_dump = false;
goto dump_exit;
}
- if (evtlog->last < evtlog->first) {
+ if (evtlog->last_dump < evtlog->first) {
evtlog->first %= SDE_EVTLOG_ENTRY;
- if (evtlog->last < evtlog->first)
- evtlog->last += SDE_EVTLOG_ENTRY;
+ if (evtlog->last_dump < evtlog->first)
+ evtlog->last_dump += SDE_EVTLOG_ENTRY;
}
- if ((evtlog->last - evtlog->first) > SDE_EVTLOG_PRINT_ENTRY) {
- pr_warn("evtlog buffer overflow before dump: %d\n",
- evtlog->last - evtlog->first);
- evtlog->first = evtlog->last - SDE_EVTLOG_PRINT_ENTRY;
+ if ((evtlog->last_dump - evtlog->first) > SDE_EVTLOG_PRINT_ENTRY) {
+ pr_info("evtlog skipping %d entries, last=%d\n",
+ evtlog->last_dump - evtlog->first -
+ SDE_EVTLOG_PRINT_ENTRY,
+ evtlog->last_dump - 1);
+ evtlog->first = evtlog->last_dump - SDE_EVTLOG_PRINT_ENTRY;
}
evtlog->next = evtlog->first + 1;
@@ -112,7 +118,8 @@ dump_exit:
}
ssize_t sde_evtlog_dump_to_buffer(struct sde_dbg_evtlog *evtlog,
- char *evtlog_buf, ssize_t evtlog_buf_size)
+ char *evtlog_buf, ssize_t evtlog_buf_size,
+ bool update_last_entry)
{
int i;
ssize_t off = 0;
@@ -123,7 +130,7 @@ ssize_t sde_evtlog_dump_to_buffer(struct sde_dbg_evtlog *evtlog,
return 0;
/* update markers, exit if nothing to print */
- if (!_sde_evtlog_dump_calc_range(evtlog))
+ if (!_sde_evtlog_dump_calc_range(evtlog, update_last_entry))
return 0;
spin_lock_irqsave(&evtlog->spin_lock, flags);
@@ -159,12 +166,16 @@ ssize_t sde_evtlog_dump_to_buffer(struct sde_dbg_evtlog *evtlog,
void sde_evtlog_dump_all(struct sde_dbg_evtlog *evtlog)
{
char buf[SDE_EVTLOG_BUF_MAX];
+ bool update_last_entry = true;
if (!evtlog)
return;
- while (sde_evtlog_dump_to_buffer(evtlog, buf, sizeof(buf)))
+ while (sde_evtlog_dump_to_buffer(evtlog, buf, sizeof(buf),
+ update_last_entry)) {
pr_info("%s", buf);
+ update_last_entry = false;
+ }
}
struct sde_dbg_evtlog *sde_evtlog_init(void)
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 2a5ed7460354..ababdaabe870 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -253,9 +253,15 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
nv_connector->edid = NULL;
}
- ret = pm_runtime_get_sync(connector->dev->dev);
- if (ret < 0 && ret != -EACCES)
- return conn_status;
+ /* Outputs are only polled while runtime active, so acquiring a
+ * runtime PM ref here is unnecessary (and would deadlock upon
+ * runtime suspend because it waits for polling to finish).
+ */
+ if (!drm_kms_helper_is_poll_worker()) {
+ ret = pm_runtime_get_sync(connector->dev->dev);
+ if (ret < 0 && ret != -EACCES)
+ return conn_status;
+ }
nv_encoder = nouveau_connector_ddc_detect(connector);
if (nv_encoder && (i2c = nv_encoder->i2c) != NULL) {
@@ -323,8 +329,10 @@ detect_analog:
out:
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker()) {
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ }
return conn_status;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 00de1bf81519..9dfc2471ea09 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -104,7 +104,7 @@ nouveau_display_scanoutpos_head(struct drm_crtc *crtc, int *vpos, int *hpos,
};
struct nouveau_display *disp = nouveau_display(crtc->dev);
struct drm_vblank_crtc *vblank = &crtc->dev->vblank[drm_crtc_index(crtc)];
- int ret, retry = 1;
+ int ret, retry = 20;
do {
ret = nvif_mthd(&disp->disp, 0, &args, sizeof(args));
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index a0865c49ec83..495c279da200 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -370,7 +370,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
struct nouveau_cli *cli = nouveau_cli(file_priv);
struct drm_device *dev = chan->drm->dev;
int trycnt = 0;
- int ret, i;
+ int ret = -EINVAL, i;
struct nouveau_bo *res_bo = NULL;
LIST_HEAD(gart_list);
LIST_HEAD(vram_list);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
index 4896474da320..3021fcd0a3df 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
@@ -127,6 +127,13 @@ nvkm_pci_init(struct nvkm_subdev *subdev)
return ret;
pci->irq = pdev->irq;
+
+ /* Ensure MSI interrupts are armed, for the case where there are
+ * already interrupts pending (for whatever reason) at load time.
+ */
+ if (pci->msi)
+ pci->func->msi_rearm(pci);
+
return ret;
}
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index f516b5891932..083db3f5181f 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -288,7 +288,12 @@ static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
msecs_to_jiffies(100))) {
dev_err(dmm->dev, "timed out waiting for done\n");
ret = -ETIMEDOUT;
+ goto cleanup;
}
+
+ /* Check the engine status before continue */
+ ret = wait_status(engine, DMM_PATSTATUS_READY |
+ DMM_PATSTATUS_VALID | DMM_PATSTATUS_DONE);
}
cleanup:
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 7ed08fdc4c42..393e5335e33b 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -158,7 +158,7 @@ static void evict_entry(struct drm_gem_object *obj,
size_t size = PAGE_SIZE * n;
loff_t off = mmap_offset(obj) +
(entry->obj_pgoff << PAGE_SHIFT);
- const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
+ const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
if (m > 1) {
int i;
@@ -415,7 +415,7 @@ static int fault_2d(struct drm_gem_object *obj,
* into account in some of the math, so figure out virtual stride
* in pages
*/
- const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
+ const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
/* We don't use vmf->pgoff since that has the fake offset: */
pgoff = ((unsigned long)vmf->virtual_address -
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index c4a552637c93..3ff7689835dc 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -494,9 +494,11 @@ static const struct drm_fb_helper_funcs qxl_fb_helper_funcs = {
int qxl_fbdev_init(struct qxl_device *qdev)
{
+ int ret = 0;
+
+#ifdef CONFIG_DRM_FBDEV_EMULATION
struct qxl_fbdev *qfbdev;
int bpp_sel = 32; /* TODO: parameter from somewhere? */
- int ret;
qfbdev = kzalloc(sizeof(struct qxl_fbdev), GFP_KERNEL);
if (!qfbdev)
@@ -531,6 +533,8 @@ fini:
drm_fb_helper_fini(&qfbdev->helper);
free:
kfree(qfbdev);
+#endif
+
return ret;
}
@@ -546,6 +550,9 @@ void qxl_fbdev_fini(struct qxl_device *qdev)
void qxl_fbdev_set_suspend(struct qxl_device *qdev, int state)
{
+ if (!qdev->mode_info.qfbdev)
+ return;
+
drm_fb_helper_set_suspend(&qdev->mode_info.qfbdev->helper, state);
}
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 134874cab4c7..80b6d6e4721a 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -3599,35 +3599,8 @@ static void cik_gpu_init(struct radeon_device *rdev)
case CHIP_KAVERI:
rdev->config.cik.max_shader_engines = 1;
rdev->config.cik.max_tile_pipes = 4;
- if ((rdev->pdev->device == 0x1304) ||
- (rdev->pdev->device == 0x1305) ||
- (rdev->pdev->device == 0x130C) ||
- (rdev->pdev->device == 0x130F) ||
- (rdev->pdev->device == 0x1310) ||
- (rdev->pdev->device == 0x1311) ||
- (rdev->pdev->device == 0x131C)) {
- rdev->config.cik.max_cu_per_sh = 8;
- rdev->config.cik.max_backends_per_se = 2;
- } else if ((rdev->pdev->device == 0x1309) ||
- (rdev->pdev->device == 0x130A) ||
- (rdev->pdev->device == 0x130D) ||
- (rdev->pdev->device == 0x1313) ||
- (rdev->pdev->device == 0x131D)) {
- rdev->config.cik.max_cu_per_sh = 6;
- rdev->config.cik.max_backends_per_se = 2;
- } else if ((rdev->pdev->device == 0x1306) ||
- (rdev->pdev->device == 0x1307) ||
- (rdev->pdev->device == 0x130B) ||
- (rdev->pdev->device == 0x130E) ||
- (rdev->pdev->device == 0x1315) ||
- (rdev->pdev->device == 0x1318) ||
- (rdev->pdev->device == 0x131B)) {
- rdev->config.cik.max_cu_per_sh = 4;
- rdev->config.cik.max_backends_per_se = 1;
- } else {
- rdev->config.cik.max_cu_per_sh = 3;
- rdev->config.cik.max_backends_per_se = 1;
- }
+ rdev->config.cik.max_cu_per_sh = 8;
+ rdev->config.cik.max_backends_per_se = 2;
rdev->config.cik.max_sh_per_se = 1;
rdev->config.cik.max_texture_channel_caches = 4;
rdev->config.cik.max_gprs = 256;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 30f00748ed37..1a2a7365d0b5 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -89,25 +89,18 @@ void radeon_connector_hotplug(struct drm_connector *connector)
/* don't do anything if sink is not display port, i.e.,
* passive dp->(dvi|hdmi) adaptor
*/
- if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
- int saved_dpms = connector->dpms;
- /* Only turn off the display if it's physically disconnected */
- if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
- } else if (radeon_dp_needs_link_train(radeon_connector)) {
- /* Don't try to start link training before we
- * have the dpcd */
- if (!radeon_dp_getdpcd(radeon_connector))
- return;
-
- /* set it to OFF so that drm_helper_connector_dpms()
- * won't return immediately since the current state
- * is ON at this point.
- */
- connector->dpms = DRM_MODE_DPMS_OFF;
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
- }
- connector->dpms = saved_dpms;
+ if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT &&
+ radeon_hpd_sense(rdev, radeon_connector->hpd.hpd) &&
+ radeon_dp_needs_link_train(radeon_connector)) {
+ /* Don't start link training before we have the DPCD */
+ if (!radeon_dp_getdpcd(radeon_connector))
+ return;
+
+ /* Turn the connector off and back on immediately, which
+ * will trigger link training
+ */
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
}
}
}
@@ -891,9 +884,11 @@ radeon_lvds_detect(struct drm_connector *connector, bool force)
enum drm_connector_status ret = connector_status_disconnected;
int r;
- r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
- return connector_status_disconnected;
+ if (!drm_kms_helper_is_poll_worker()) {
+ r = pm_runtime_get_sync(connector->dev->dev);
+ if (r < 0)
+ return connector_status_disconnected;
+ }
if (encoder) {
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
@@ -916,8 +911,12 @@ radeon_lvds_detect(struct drm_connector *connector, bool force)
/* check acpi lid status ??? */
radeon_connector_update_scratch_regs(connector, ret);
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
+
+ if (!drm_kms_helper_is_poll_worker()) {
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ }
+
return ret;
}
@@ -1020,9 +1019,11 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
enum drm_connector_status ret = connector_status_disconnected;
int r;
- r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
- return connector_status_disconnected;
+ if (!drm_kms_helper_is_poll_worker()) {
+ r = pm_runtime_get_sync(connector->dev->dev);
+ if (r < 0)
+ return connector_status_disconnected;
+ }
encoder = radeon_best_single_encoder(connector);
if (!encoder)
@@ -1089,8 +1090,10 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
radeon_connector_update_scratch_regs(connector, ret);
out:
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker()) {
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ }
return ret;
}
@@ -1153,9 +1156,11 @@ radeon_tv_detect(struct drm_connector *connector, bool force)
if (!radeon_connector->dac_load_detect)
return ret;
- r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
- return connector_status_disconnected;
+ if (!drm_kms_helper_is_poll_worker()) {
+ r = pm_runtime_get_sync(connector->dev->dev);
+ if (r < 0)
+ return connector_status_disconnected;
+ }
encoder = radeon_best_single_encoder(connector);
if (!encoder)
@@ -1167,8 +1172,12 @@ radeon_tv_detect(struct drm_connector *connector, bool force)
if (ret == connector_status_connected)
ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, false);
radeon_connector_update_scratch_regs(connector, ret);
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
+
+ if (!drm_kms_helper_is_poll_worker()) {
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ }
+
return ret;
}
@@ -1230,9 +1239,11 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
enum drm_connector_status ret = connector_status_disconnected;
bool dret = false, broken_edid = false;
- r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
- return connector_status_disconnected;
+ if (!drm_kms_helper_is_poll_worker()) {
+ r = pm_runtime_get_sync(connector->dev->dev);
+ if (r < 0)
+ return connector_status_disconnected;
+ }
if (radeon_connector->detected_hpd_without_ddc) {
force = true;
@@ -1415,8 +1426,10 @@ out:
}
exit:
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker()) {
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ }
return ret;
}
@@ -1666,9 +1679,11 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
if (radeon_dig_connector->is_mst)
return connector_status_disconnected;
- r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
- return connector_status_disconnected;
+ if (!drm_kms_helper_is_poll_worker()) {
+ r = pm_runtime_get_sync(connector->dev->dev);
+ if (r < 0)
+ return connector_status_disconnected;
+ }
if (!force && radeon_check_hpd_status_unchanged(connector)) {
ret = connector->status;
@@ -1755,8 +1770,10 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
}
out:
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker()) {
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ }
return ret;
}
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 3645b223aa37..446d99062306 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -1374,6 +1374,12 @@ radeon_user_framebuffer_create(struct drm_device *dev,
return ERR_PTR(-ENOENT);
}
+ /* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */
+ if (obj->import_attach) {
+ DRM_DEBUG_KMS("Cannot create framebuffer from imported dma_buf\n");
+ return ERR_PTR(-EINVAL);
+ }
+
radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL);
if (radeon_fb == NULL) {
drm_gem_object_unreference_unlocked(obj);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index fb6ad143873f..83aee9e814ba 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -238,9 +238,10 @@ int radeon_bo_create(struct radeon_device *rdev,
* may be slow
* See https://bugs.freedesktop.org/show_bug.cgi?id=88758
*/
-
+#ifndef CONFIG_COMPILE_TEST
#warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
thanks to write-combining
+#endif
if (bo->flags & RADEON_GEM_GTT_WC)
DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 6edcb5485092..b35ebabd6a9f 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -946,7 +946,7 @@ int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev,
/* calc dclk divider with current vco freq */
dclk_div = radeon_uvd_calc_upll_post_div(vco_freq, dclk,
pd_min, pd_even);
- if (vclk_div > pd_max)
+ if (dclk_div > pd_max)
break; /* vco is too big, it has to stop */
/* calc score with current vco freq */
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index d9007cc37be1..892d0a71d766 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -5964,9 +5964,9 @@ static void si_set_pcie_lane_width_in_smc(struct radeon_device *rdev,
{
u32 lane_width;
u32 new_lane_width =
- (radeon_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT;
+ ((radeon_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
u32 current_lane_width =
- (radeon_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT;
+ ((radeon_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
if (new_lane_width != current_lane_width) {
radeon_set_pcie_lanes(rdev, new_lane_width);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index 9befd624a5f0..6fab07935d16 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -371,6 +371,31 @@ static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc)
rcrtc->started = true;
}
+static void rcar_du_crtc_disable_planes(struct rcar_du_crtc *rcrtc)
+{
+ struct rcar_du_device *rcdu = rcrtc->group->dev;
+ struct drm_crtc *crtc = &rcrtc->crtc;
+ u32 status;
+ /* Make sure vblank interrupts are enabled. */
+ drm_crtc_vblank_get(crtc);
+ /*
+ * Disable planes and calculate how many vertical blanking interrupts we
+ * have to wait for. If a vertical blanking interrupt has been triggered
+ * but not processed yet, we don't know whether it occurred before or
+ * after the planes got disabled. We thus have to wait for two vblank
+ * interrupts in that case.
+ */
+ spin_lock_irq(&rcrtc->vblank_lock);
+ rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR, 0);
+ status = rcar_du_crtc_read(rcrtc, DSSR);
+ rcrtc->vblank_count = status & DSSR_VBK ? 2 : 1;
+ spin_unlock_irq(&rcrtc->vblank_lock);
+ if (!wait_event_timeout(rcrtc->vblank_wait, rcrtc->vblank_count == 0,
+ msecs_to_jiffies(100)))
+ dev_warn(rcdu->dev, "vertical blanking timeout\n");
+ drm_crtc_vblank_put(crtc);
+}
+
static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc)
{
struct drm_crtc *crtc = &rcrtc->crtc;
@@ -379,17 +404,16 @@ static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc)
return;
/* Disable all planes and wait for the change to take effect. This is
- * required as the DSnPR registers are updated on vblank, and no vblank
- * will occur once the CRTC is stopped. Disabling planes when starting
- * the CRTC thus wouldn't be enough as it would start scanning out
- * immediately from old frame buffers until the next vblank.
+ * required as the plane enable registers are updated on vblank, and no
+ * vblank will occur once the CRTC is stopped. Disabling planes when
+ * starting the CRTC thus wouldn't be enough as it would start scanning
+ * out immediately from old frame buffers until the next vblank.
*
* This increases the CRTC stop delay, especially when multiple CRTCs
* are stopped in one operation as we now wait for one vblank per CRTC.
* Whether this can be improved needs to be researched.
*/
- rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR, 0);
- drm_crtc_wait_one_vblank(crtc);
+ rcar_du_crtc_disable_planes(rcrtc);
/* Disable vertical blanking interrupt reporting. We first need to wait
* for page flip completion before stopping the CRTC as userspace
@@ -528,10 +552,26 @@ static irqreturn_t rcar_du_crtc_irq(int irq, void *arg)
irqreturn_t ret = IRQ_NONE;
u32 status;
+ spin_lock(&rcrtc->vblank_lock);
+
status = rcar_du_crtc_read(rcrtc, DSSR);
rcar_du_crtc_write(rcrtc, DSRCR, status & DSRCR_MASK);
- if (status & DSSR_FRM) {
+ if (status & DSSR_VBK) {
+ /*
+ * Wake up the vblank wait if the counter reaches 0. This must
+ * be protected by the vblank_lock to avoid races in
+ * rcar_du_crtc_disable_planes().
+ */
+ if (rcrtc->vblank_count) {
+ if (--rcrtc->vblank_count == 0)
+ wake_up(&rcrtc->vblank_wait);
+ }
+ }
+
+ spin_unlock(&rcrtc->vblank_lock);
+
+ if (status & DSSR_VBK) {
drm_handle_vblank(rcrtc->crtc.dev, rcrtc->index);
rcar_du_crtc_finish_page_flip(rcrtc);
ret = IRQ_HANDLED;
@@ -585,6 +625,8 @@ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index)
}
init_waitqueue_head(&rcrtc->flip_wait);
+ init_waitqueue_head(&rcrtc->vblank_wait);
+ spin_lock_init(&rcrtc->vblank_lock);
rcrtc->group = rgrp;
rcrtc->mmio_offset = mmio_offsets[index];
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
index 2bbe3f5aab65..be22ce33b70a 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
@@ -15,6 +15,7 @@
#define __RCAR_DU_CRTC_H__
#include <linux/mutex.h>
+#include <linux/spinlock.h>
#include <linux/wait.h>
#include <drm/drmP.h>
@@ -32,6 +33,9 @@ struct rcar_du_group;
* @started: whether the CRTC has been started and is running
* @event: event to post when the pending page flip completes
* @flip_wait: wait queue used to signal page flip completion
+ * @vblank_lock: protects vblank_wait and vblank_count
+ * @vblank_wait: wait queue used to signal vertical blanking
+ * @vblank_count: number of vertical blanking interrupts to wait for
* @outputs: bitmask of the outputs (enum rcar_du_output) driven by this CRTC
* @enabled: whether the CRTC is enabled, used to control system resume
* @group: CRTC group this CRTC belongs to
@@ -48,6 +52,10 @@ struct rcar_du_crtc {
struct drm_pending_vblank_event *event;
wait_queue_head_t flip_wait;
+ spinlock_t vblank_lock;
+ wait_queue_head_t vblank_wait;
+ unsigned int vblank_count;
+
unsigned int outputs;
bool enabled;
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 5d8dfe027b30..75d51ec98e06 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -818,6 +818,8 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
pr_info("Initializing pool allocator\n");
_manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
+ if (!_manager)
+ return -ENOMEM;
ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc");
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index 73e41a8613da..29bd801f5dad 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -256,10 +256,15 @@ static int udl_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
unsigned long start = vma->vm_start;
unsigned long size = vma->vm_end - vma->vm_start;
- unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+ unsigned long offset;
unsigned long page, pos;
- if (offset + size > info->fix.smem_len)
+ if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
+ return -EINVAL;
+
+ offset = vma->vm_pgoff << PAGE_SHIFT;
+
+ if (offset > info->fix.smem_len || size > info->fix.smem_len - offset)
return -EINVAL;
pos = (unsigned long)info->fix.smem_start + offset;
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index b40ed6061f05..7f898cfdc746 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -118,7 +118,7 @@ static const struct file_operations virtio_gpu_driver_fops = {
static struct drm_driver driver = {
- .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_RENDER,
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_RENDER | DRIVER_ATOMIC,
.set_busid = drm_virtio_set_busid,
.load = virtio_gpu_driver_load,
.unload = virtio_gpu_driver_unload,
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index 4a74129c5708..7b6e5c5e7284 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -68,10 +68,17 @@ static void virtio_gpu_plane_atomic_update(struct drm_plane *plane,
struct virtio_gpu_object *bo;
uint32_t handle;
- if (plane->fb) {
- vgfb = to_virtio_gpu_framebuffer(plane->fb);
+ if (plane->state->fb) {
+ vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
bo = gem_to_virtio_gpu_obj(vgfb->obj);
handle = bo->hw_res_handle;
+ if (bo->dumb) {
+ virtio_gpu_cmd_transfer_to_host_2d
+ (vgdev, handle, 0,
+ cpu_to_le32(plane->state->crtc_w),
+ cpu_to_le32(plane->state->crtc_h),
+ plane->state->crtc_x, plane->state->crtc_y, NULL);
+ }
} else {
handle = 0;
}
@@ -84,6 +91,11 @@ static void virtio_gpu_plane_atomic_update(struct drm_plane *plane,
plane->state->crtc_h,
plane->state->crtc_x,
plane->state->crtc_y);
+ virtio_gpu_cmd_resource_flush(vgdev, handle,
+ plane->state->crtc_x,
+ plane->state->crtc_y,
+ plane->state->crtc_w,
+ plane->state->crtc_h);
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 5a0f8a745b9d..52436b3c01bb 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -324,7 +324,7 @@ retry:
ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
if (ret == -ENOSPC) {
spin_unlock(&vgdev->ctrlq.qlock);
- wait_event(vgdev->ctrlq.ack_queue, vq->num_free);
+ wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= outcnt + incnt);
spin_lock(&vgdev->ctrlq.qlock);
goto retry;
} else {
@@ -399,7 +399,7 @@ retry:
ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
if (ret == -ENOSPC) {
spin_unlock(&vgdev->cursorq.qlock);
- wait_event(vgdev->cursorq.ack_queue, vq->num_free);
+ wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
spin_lock(&vgdev->cursorq.qlock);
goto retry;
} else {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
index 67cebb23c940..aa04fb0159a7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -293,13 +293,10 @@ static int vmw_cmdbuf_header_submit(struct vmw_cmdbuf_header *header)
struct vmw_cmdbuf_man *man = header->man;
u32 val;
- if (sizeof(header->handle) > 4)
- val = (header->handle >> 32);
- else
- val = 0;
+ val = upper_32_bits(header->handle);
vmw_write(man->dev_priv, SVGA_REG_COMMAND_HIGH, val);
- val = (header->handle & 0xFFFFFFFFULL);
+ val = lower_32_bits(header->handle);
val |= header->cb_context & SVGA_CB_CONTEXT_MASK;
vmw_write(man->dev_priv, SVGA_REG_COMMAND_LOW, val);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index d2d93959b119..aec6e9eef489 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -433,7 +433,7 @@ static int vmw_fb_kms_detach(struct vmw_fb_par *par,
set.y = 0;
set.mode = NULL;
set.fb = NULL;
- set.num_connectors = 1;
+ set.num_connectors = 0;
set.connectors = &par->con;
ret = drm_mode_set_config_internal(&set);
if (ret) {
@@ -821,7 +821,9 @@ int vmw_fb_off(struct vmw_private *vmw_priv)
flush_delayed_work(&par->local_work);
mutex_lock(&par->bo_mutex);
+ drm_modeset_lock_all(vmw_priv->dev);
(void) vmw_fb_kms_detach(par, true, false);
+ drm_modeset_unlock_all(vmw_priv->dev);
mutex_unlock(&par->bo_mutex);
return 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 060e5c6f4446..9b97f70fbb3d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -27,7 +27,6 @@
#include "vmwgfx_kms.h"
-
/* Might need a hrtimer here? */
#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
@@ -1910,9 +1909,12 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
* Helper to be used if an error forces the caller to undo the actions of
* vmw_kms_helper_resource_prepare.
*/
-void vmw_kms_helper_resource_revert(struct vmw_resource *res)
+void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx)
{
- vmw_kms_helper_buffer_revert(res->backup);
+ struct vmw_resource *res = ctx->res;
+
+ vmw_kms_helper_buffer_revert(ctx->buf);
+ vmw_dmabuf_unreference(&ctx->buf);
vmw_resource_unreserve(res, false, NULL, 0);
mutex_unlock(&res->dev_priv->cmdbuf_mutex);
}
@@ -1929,10 +1931,14 @@ void vmw_kms_helper_resource_revert(struct vmw_resource *res)
* interrupted by a signal.
*/
int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
- bool interruptible)
+ bool interruptible,
+ struct vmw_validation_ctx *ctx)
{
int ret = 0;
+ ctx->buf = NULL;
+ ctx->res = res;
+
if (interruptible)
ret = mutex_lock_interruptible(&res->dev_priv->cmdbuf_mutex);
else
@@ -1951,6 +1957,8 @@ int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
res->dev_priv->has_mob);
if (ret)
goto out_unreserve;
+
+ ctx->buf = vmw_dmabuf_reference(res->backup);
}
ret = vmw_resource_validate(res);
if (ret)
@@ -1958,7 +1966,7 @@ int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
return 0;
out_revert:
- vmw_kms_helper_buffer_revert(res->backup);
+ vmw_kms_helper_buffer_revert(ctx->buf);
out_unreserve:
vmw_resource_unreserve(res, false, NULL, 0);
out_unlock:
@@ -1974,13 +1982,16 @@ out_unlock:
* @out_fence: Optional pointer to a fence pointer. If non-NULL, a
* ref-counted fence pointer is returned here.
*/
-void vmw_kms_helper_resource_finish(struct vmw_resource *res,
- struct vmw_fence_obj **out_fence)
+void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx,
+ struct vmw_fence_obj **out_fence)
{
- if (res->backup || out_fence)
- vmw_kms_helper_buffer_finish(res->dev_priv, NULL, res->backup,
+ struct vmw_resource *res = ctx->res;
+
+ if (ctx->buf || out_fence)
+ vmw_kms_helper_buffer_finish(res->dev_priv, NULL, ctx->buf,
out_fence, NULL);
+ vmw_dmabuf_unreference(&ctx->buf);
vmw_resource_unreserve(res, false, NULL, 0);
mutex_unlock(&res->dev_priv->cmdbuf_mutex);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index edd81503516d..63b05d5ee50a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -180,6 +180,11 @@ struct vmw_display_unit {
bool is_implicit;
};
+struct vmw_validation_ctx {
+ struct vmw_resource *res;
+ struct vmw_dma_buffer *buf;
+};
+
#define vmw_crtc_to_du(x) \
container_of(x, struct vmw_display_unit, crtc)
#define vmw_connector_to_du(x) \
@@ -230,9 +235,10 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
struct drm_vmw_fence_rep __user *
user_fence_rep);
int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
- bool interruptible);
-void vmw_kms_helper_resource_revert(struct vmw_resource *res);
-void vmw_kms_helper_resource_finish(struct vmw_resource *res,
+ bool interruptible,
+ struct vmw_validation_ctx *ctx);
+void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx);
+void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx,
struct vmw_fence_obj **out_fence);
int vmw_kms_readback(struct vmw_private *dev_priv,
struct drm_file *file_priv,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 13926ff192e3..f50fcd213413 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -841,12 +841,13 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
struct vmw_framebuffer_surface *vfbs =
container_of(framebuffer, typeof(*vfbs), base);
struct vmw_kms_sou_surface_dirty sdirty;
+ struct vmw_validation_ctx ctx;
int ret;
if (!srf)
srf = &vfbs->surface->res;
- ret = vmw_kms_helper_resource_prepare(srf, true);
+ ret = vmw_kms_helper_resource_prepare(srf, true, &ctx);
if (ret)
return ret;
@@ -865,7 +866,7 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
dest_x, dest_y, num_clips, inc,
&sdirty.base);
- vmw_kms_helper_resource_finish(srf, out_fence);
+ vmw_kms_helper_resource_finish(&ctx, out_fence);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index f823fc3efed7..3184a9ae22c1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -1003,12 +1003,13 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
struct vmw_framebuffer_surface *vfbs =
container_of(framebuffer, typeof(*vfbs), base);
struct vmw_stdu_dirty sdirty;
+ struct vmw_validation_ctx ctx;
int ret;
if (!srf)
srf = &vfbs->surface->res;
- ret = vmw_kms_helper_resource_prepare(srf, true);
+ ret = vmw_kms_helper_resource_prepare(srf, true, &ctx);
if (ret)
return ret;
@@ -1031,7 +1032,7 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
dest_x, dest_y, num_clips, inc,
&sdirty.base);
out_finish:
- vmw_kms_helper_resource_finish(srf, out_fence);
+ vmw_kms_helper_resource_finish(&ctx, out_fence);
return ret;
}
diff --git a/drivers/gpu/msm/adreno-gpulist.h b/drivers/gpu/msm/adreno-gpulist.h
index 4ec04001ae7e..bc3f794555a5 100644
--- a/drivers/gpu/msm/adreno-gpulist.h
+++ b/drivers/gpu/msm/adreno-gpulist.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2007-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2007-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -300,6 +300,22 @@ static const struct adreno_gpu_core adreno_gpulist[] = {
.busy_mask = 0xFFFFFFFE,
},
{
+ .gpurev = ADRENO_REV_A509,
+ .core = 5,
+ .major = 0,
+ .minor = 9,
+ .patchid = ANY_ID,
+ .features = ADRENO_PREEMPTION | ADRENO_64BIT |
+ ADRENO_CONTENT_PROTECTION | ADRENO_CPZ_RETENTION,
+ .pm4fw_name = "a530_pm4.fw",
+ .pfpfw_name = "a530_pfp.fw",
+ .zap_name = "a512_zap",
+ .gpudev = &adreno_a5xx_gpudev,
+ .gmem_size = (SZ_256K + SZ_16K),
+ .num_protected_regs = 0x20,
+ .busy_mask = 0xFFFFFFFE,
+ },
+ {
.gpurev = ADRENO_REV_A508,
.core = 5,
.major = 0,
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index f96a7a2cee21..7af2af483f10 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -2839,6 +2839,8 @@ static const struct kgsl_functable adreno_functable = {
.clk_set_options = adreno_clk_set_options,
.gpu_model = adreno_gpu_model,
.stop_fault_timer = adreno_dispatcher_stop_fault_timer,
+ .dispatcher_halt = adreno_dispatcher_halt,
+ .dispatcher_unhalt = adreno_dispatcher_unhalt,
};
static struct platform_driver adreno_platform_driver = {
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index 305163147c1a..9ea50007ec38 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -170,6 +170,7 @@ enum adreno_gpurev {
ADRENO_REV_A505 = 505,
ADRENO_REV_A506 = 506,
ADRENO_REV_A508 = 508,
+ ADRENO_REV_A509 = 509,
ADRENO_REV_A510 = 510,
ADRENO_REV_A512 = 512,
ADRENO_REV_A530 = 530,
@@ -1007,6 +1008,7 @@ static inline int adreno_is_a5xx(struct adreno_device *adreno_dev)
ADRENO_TARGET(a505, ADRENO_REV_A505)
ADRENO_TARGET(a506, ADRENO_REV_A506)
ADRENO_TARGET(a508, ADRENO_REV_A508)
+ADRENO_TARGET(a509, ADRENO_REV_A509)
ADRENO_TARGET(a510, ADRENO_REV_A510)
ADRENO_TARGET(a512, ADRENO_REV_A512)
ADRENO_TARGET(a530, ADRENO_REV_A530)
diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c
index 2b8c593076cb..4daf1fad6ee1 100644
--- a/drivers/gpu/msm/adreno_a5xx.c
+++ b/drivers/gpu/msm/adreno_a5xx.c
@@ -59,6 +59,7 @@ static const struct adreno_vbif_platform a5xx_vbif_platforms[] = {
{ adreno_is_a530, a530_vbif },
{ adreno_is_a512, a540_vbif },
{ adreno_is_a510, a530_vbif },
+ { adreno_is_a509, a540_vbif },
{ adreno_is_a508, a530_vbif },
{ adreno_is_a505, a530_vbif },
{ adreno_is_a506, a530_vbif },
@@ -161,6 +162,7 @@ static const struct {
{ adreno_is_a530, a530_efuse_speed_bin },
{ adreno_is_a505, a530_efuse_speed_bin },
{ adreno_is_a512, a530_efuse_speed_bin },
+ { adreno_is_a509, a530_efuse_speed_bin },
{ adreno_is_a508, a530_efuse_speed_bin },
};
@@ -201,7 +203,8 @@ static void a5xx_platform_setup(struct adreno_device *adreno_dev)
gpudev->vbif_xin_halt_ctrl0_mask =
A510_VBIF_XIN_HALT_CTRL0_MASK;
} else if (adreno_is_a540(adreno_dev) ||
- adreno_is_a512(adreno_dev)) {
+ adreno_is_a512(adreno_dev) ||
+ adreno_is_a509(adreno_dev)) {
gpudev->snapshot_data->sect_sizes->cp_merciu = 1024;
}
@@ -539,7 +542,8 @@ static void a5xx_regulator_disable(struct adreno_device *adreno_dev)
unsigned int reg;
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- if (adreno_is_a512(adreno_dev) || adreno_is_a508(adreno_dev))
+ if (adreno_is_a512(adreno_dev) || adreno_is_a509(adreno_dev) ||
+ adreno_is_a508(adreno_dev))
return;
/* If feature is not supported or not enabled */
@@ -1199,6 +1203,7 @@ static const struct {
{ adreno_is_a540, a540_hwcg_regs, ARRAY_SIZE(a540_hwcg_regs) },
{ adreno_is_a530, a530_hwcg_regs, ARRAY_SIZE(a530_hwcg_regs) },
{ adreno_is_a512, a512_hwcg_regs, ARRAY_SIZE(a512_hwcg_regs) },
+ { adreno_is_a509, a512_hwcg_regs, ARRAY_SIZE(a512_hwcg_regs) },
{ adreno_is_a510, a510_hwcg_regs, ARRAY_SIZE(a510_hwcg_regs) },
{ adreno_is_a505, a50x_hwcg_regs, ARRAY_SIZE(a50x_hwcg_regs) },
{ adreno_is_a506, a50x_hwcg_regs, ARRAY_SIZE(a50x_hwcg_regs) },
@@ -1376,31 +1381,27 @@ static int _execute_reg_sequence(struct adreno_device *adreno_dev,
/* todo double check the reg writes */
while ((cur - opcode) < length) {
- switch (cur[0]) {
- /* Write a 32 bit value to a 64 bit reg */
- case 1:
+ if (cur[0] == 1 && (length - (cur - opcode) >= 4)) {
+ /* Write a 32 bit value to a 64 bit reg */
reg = cur[2];
reg = (reg << 32) | cur[1];
kgsl_regwrite(KGSL_DEVICE(adreno_dev), reg, cur[3]);
cur += 4;
- break;
- /* Write a 64 bit value to a 64 bit reg */
- case 2:
+ } else if (cur[0] == 2 && (length - (cur - opcode) >= 5)) {
+ /* Write a 64 bit value to a 64 bit reg */
reg = cur[2];
reg = (reg << 32) | cur[1];
val = cur[4];
val = (val << 32) | cur[3];
kgsl_regwrite(KGSL_DEVICE(adreno_dev), reg, val);
cur += 5;
- break;
- /* Delay for X usec */
- case 3:
+ } else if (cur[0] == 3 && (length - (cur - opcode) >= 2)) {
+ /* Delay for X usec */
udelay(cur[1]);
cur += 2;
- break;
- default:
+ } else
return -EINVAL;
- } }
+ }
return 0;
}
@@ -1655,7 +1656,7 @@ static void a5xx_clk_set_options(struct adreno_device *adreno_dev,
{
if (!adreno_is_a540(adreno_dev) && !adreno_is_a512(adreno_dev) &&
- !adreno_is_a508(adreno_dev))
+ !adreno_is_a508(adreno_dev) && !adreno_is_a509(adreno_dev))
return;
/* Handle clock settings for GFX PSCBCs */
@@ -1961,7 +1962,8 @@ static void a5xx_start(struct adreno_device *adreno_dev)
kgsl_regwrite(device, A5XX_CP_MERCIU_SIZE, 0x20);
kgsl_regwrite(device, A5XX_CP_ROQ_THRESHOLDS_2, 0x40000030);
kgsl_regwrite(device, A5XX_CP_ROQ_THRESHOLDS_1, 0x20100D0A);
- } else if (adreno_is_a540(adreno_dev) || adreno_is_a512(adreno_dev)) {
+ } else if (adreno_is_a540(adreno_dev) || adreno_is_a512(adreno_dev) ||
+ adreno_is_a509(adreno_dev)) {
kgsl_regwrite(device, A5XX_CP_MEQ_THRESHOLDS, 0x40);
kgsl_regwrite(device, A5XX_CP_MERCIU_SIZE, 0x400);
kgsl_regwrite(device, A5XX_CP_ROQ_THRESHOLDS_2, 0x80000060);
@@ -1980,7 +1982,8 @@ static void a5xx_start(struct adreno_device *adreno_dev)
if (adreno_is_a505_or_a506(adreno_dev) || adreno_is_a508(adreno_dev))
kgsl_regwrite(device, A5XX_PC_DBG_ECO_CNTL,
(0x100 << 11 | 0x100 << 22));
- else if (adreno_is_a510(adreno_dev) || adreno_is_a512(adreno_dev))
+ else if (adreno_is_a510(adreno_dev) || adreno_is_a512(adreno_dev) ||
+ adreno_is_a509(adreno_dev))
kgsl_regwrite(device, A5XX_PC_DBG_ECO_CNTL,
(0x200 << 11 | 0x200 << 22));
else
@@ -2073,7 +2076,8 @@ static void a5xx_start(struct adreno_device *adreno_dev)
kgsl_regwrite(device, A5XX_TPL1_MODE_CNTL, bit << 7);
kgsl_regwrite(device, A5XX_RB_MODE_CNTL, bit << 1);
if (adreno_is_a540(adreno_dev) ||
- adreno_is_a512(adreno_dev))
+ adreno_is_a512(adreno_dev) ||
+ adreno_is_a509(adreno_dev))
kgsl_regwrite(device, A5XX_UCHE_DBG_ECO_CNTL_2,
bit);
}
diff --git a/drivers/gpu/msm/adreno_a5xx_snapshot.c b/drivers/gpu/msm/adreno_a5xx_snapshot.c
index 49560d704537..fb4cebe2cf40 100644
--- a/drivers/gpu/msm/adreno_a5xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a5xx_snapshot.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -410,6 +410,15 @@ static const unsigned int a5xx_registers[] = {
0xEC00, 0xEC05, 0xEC08, 0xECE9, 0xECF0, 0xECF0,
/* VPC CTX 1 */
0xEA80, 0xEA80, 0xEA82, 0xEAA3, 0xEAA5, 0xEAC2,
+};
+
+/*
+ * GPMU registers to dump for A5XX on snapshot.
+ * Registers in pairs - first value is the start offset, second
+ * is the stop offset (inclusive)
+ */
+
+static const unsigned int a5xx_gpmu_registers[] = {
/* GPMU */
0xA800, 0xA8FF, 0xAC60, 0xAC60,
};
@@ -662,24 +671,23 @@ static size_t a5xx_snapshot_pre_crashdump_regs(struct kgsl_device *device,
return kgsl_snapshot_dump_registers(device, buf, remain, &pre_cdregs);
}
+struct registers {
+ const unsigned int *regs;
+ size_t size;
+};
+
static size_t a5xx_legacy_snapshot_registers(struct kgsl_device *device,
- u8 *buf, size_t remain)
+ u8 *buf, size_t remain, const unsigned int *regs, size_t size)
{
- struct kgsl_snapshot_registers regs = {
- .regs = a5xx_registers,
- .count = ARRAY_SIZE(a5xx_registers) / 2,
+ struct kgsl_snapshot_registers snapshot_regs = {
+ .regs = regs,
+ .count = size / 2,
};
- return kgsl_snapshot_dump_registers(device, buf, remain, &regs);
+ return kgsl_snapshot_dump_registers(device, buf, remain,
+ &snapshot_regs);
}
-static struct cdregs {
- const unsigned int *regs;
- unsigned int size;
-} _a5xx_cd_registers[] = {
- { a5xx_registers, ARRAY_SIZE(a5xx_registers) },
-};
-
#define REG_PAIR_COUNT(_a, _i) \
(((_a)[(2 * (_i)) + 1] - (_a)[2 * (_i)]) + 1)
@@ -689,11 +697,13 @@ static size_t a5xx_snapshot_registers(struct kgsl_device *device, u8 *buf,
struct kgsl_snapshot_regs *header = (struct kgsl_snapshot_regs *)buf;
unsigned int *data = (unsigned int *)(buf + sizeof(*header));
unsigned int *src = (unsigned int *) registers.hostptr;
- unsigned int i, j, k;
+ struct registers *regs = (struct registers *)priv;
+ unsigned int j, k;
unsigned int count = 0;
if (crash_dump_valid == false)
- return a5xx_legacy_snapshot_registers(device, buf, remain);
+ return a5xx_legacy_snapshot_registers(device, buf, remain,
+ regs->regs, regs->size);
if (remain < sizeof(*header)) {
SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
@@ -702,24 +712,20 @@ static size_t a5xx_snapshot_registers(struct kgsl_device *device, u8 *buf,
remain -= sizeof(*header);
- for (i = 0; i < ARRAY_SIZE(_a5xx_cd_registers); i++) {
- struct cdregs *regs = &_a5xx_cd_registers[i];
+ for (j = 0; j < regs->size / 2; j++) {
+ unsigned int start = regs->regs[2 * j];
+ unsigned int end = regs->regs[(2 * j) + 1];
- for (j = 0; j < regs->size / 2; j++) {
- unsigned int start = regs->regs[2 * j];
- unsigned int end = regs->regs[(2 * j) + 1];
-
- if (remain < ((end - start) + 1) * 8) {
- SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
- goto out;
- }
+ if (remain < ((end - start) + 1) * 8) {
+ SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
+ goto out;
+ }
- remain -= ((end - start) + 1) * 8;
+ remain -= ((end - start) + 1) * 8;
- for (k = start; k <= end; k++, count++) {
- *data++ = k;
- *data++ = *src++;
- }
+ for (k = start; k <= end; k++, count++) {
+ *data++ = k;
+ *data++ = *src++;
}
}
@@ -859,6 +865,7 @@ void a5xx_snapshot(struct adreno_device *adreno_dev,
struct adreno_snapshot_data *snap_data = gpudev->snapshot_data;
unsigned int reg, i;
struct adreno_ringbuffer *rb;
+ struct registers regs;
/* Disable Clock gating temporarily for the debug bus to work */
a5xx_hwcg_set(adreno_dev, false);
@@ -875,8 +882,20 @@ void a5xx_snapshot(struct adreno_device *adreno_dev,
/* Try to run the crash dumper */
_a5xx_do_crashdump(device);
- kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS,
- snapshot, a5xx_snapshot_registers, NULL);
+ regs.regs = a5xx_registers;
+ regs.size = ARRAY_SIZE(a5xx_registers);
+
+ kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS, snapshot,
+ a5xx_snapshot_registers, &regs);
+
+ if (ADRENO_FEATURE(adreno_dev, ADRENO_GPMU)) {
+ regs.regs = a5xx_gpmu_registers;
+ regs.size = ARRAY_SIZE(a5xx_gpmu_registers);
+
+ kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS,
+ snapshot, a5xx_snapshot_registers, &regs);
+ }
+
/* Dump SP TP HLSQ registers */
kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS, snapshot,
@@ -1033,17 +1052,23 @@ void a5xx_crashdump_init(struct adreno_device *adreno_dev)
* To save the registers, we need 16 bytes per register pair for the
* script and a dword for each register int the data
*/
- for (i = 0; i < ARRAY_SIZE(_a5xx_cd_registers); i++) {
- struct cdregs *regs = &_a5xx_cd_registers[i];
+ /* Each pair needs 16 bytes (2 qwords) */
+ script_size += (ARRAY_SIZE(a5xx_registers) / 2) * 16;
+
+ /* Each register needs a dword in the data */
+ for (j = 0; j < ARRAY_SIZE(a5xx_registers) / 2; j++)
+ data_size += REG_PAIR_COUNT(a5xx_registers, j) *
+ sizeof(unsigned int);
+
+ if (ADRENO_FEATURE(adreno_dev, ADRENO_GPMU)) {
/* Each pair needs 16 bytes (2 qwords) */
- script_size += (regs->size / 2) * 16;
+ script_size += (ARRAY_SIZE(a5xx_gpmu_registers) / 2) * 16;
/* Each register needs a dword in the data */
- for (j = 0; j < regs->size / 2; j++)
- data_size += REG_PAIR_COUNT(regs->regs, j) *
+ for (j = 0; j < ARRAY_SIZE(a5xx_gpmu_registers) / 2; j++)
+ data_size += REG_PAIR_COUNT(a5xx_gpmu_registers, j) *
sizeof(unsigned int);
-
}
/*
@@ -1081,13 +1106,21 @@ void a5xx_crashdump_init(struct adreno_device *adreno_dev)
ptr = (uint64_t *) capturescript.hostptr;
/* For the registers, program a read command for each pair */
- for (i = 0; i < ARRAY_SIZE(_a5xx_cd_registers); i++) {
- struct cdregs *regs = &_a5xx_cd_registers[i];
- for (j = 0; j < regs->size / 2; j++) {
- unsigned int r = REG_PAIR_COUNT(regs->regs, j);
+ for (j = 0; j < ARRAY_SIZE(a5xx_registers) / 2; j++) {
+ unsigned int r = REG_PAIR_COUNT(a5xx_registers, j);
+ *ptr++ = registers.gpuaddr + offset;
+ *ptr++ = (((uint64_t) a5xx_registers[2 * j]) << 44)
+ | r;
+ offset += r * sizeof(unsigned int);
+ }
+
+ if (ADRENO_FEATURE(adreno_dev, ADRENO_GPMU)) {
+ for (j = 0; j < ARRAY_SIZE(a5xx_gpmu_registers) / 2; j++) {
+ unsigned int r = REG_PAIR_COUNT(a5xx_gpmu_registers, j);
*ptr++ = registers.gpuaddr + offset;
- *ptr++ = (((uint64_t) regs->regs[2 * j]) << 44) | r;
+ *ptr++ = (((uint64_t) a5xx_gpmu_registers[2 * j]) << 44)
+ | r;
offset += r * sizeof(unsigned int);
}
}
diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c
index fc7799722026..6dd9f6040fae 100644
--- a/drivers/gpu/msm/adreno_dispatch.c
+++ b/drivers/gpu/msm/adreno_dispatch.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -2816,6 +2816,16 @@ int adreno_dispatcher_init(struct adreno_device *adreno_dev)
return ret;
}
+void adreno_dispatcher_halt(struct kgsl_device *device)
+{
+ adreno_get_gpu_halt(ADRENO_DEVICE(device));
+}
+
+void adreno_dispatcher_unhalt(struct kgsl_device *device)
+{
+ adreno_put_gpu_halt(ADRENO_DEVICE(device));
+}
+
/*
* adreno_dispatcher_idle() - Wait for dispatcher to idle
* @adreno_dev: Adreno device whose dispatcher needs to idle
diff --git a/drivers/gpu/msm/adreno_dispatch.h b/drivers/gpu/msm/adreno_dispatch.h
index 48f0cdc546ff..f55f8cbf928b 100644
--- a/drivers/gpu/msm/adreno_dispatch.h
+++ b/drivers/gpu/msm/adreno_dispatch.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -103,6 +103,9 @@ enum adreno_dispatcher_flags {
};
void adreno_dispatcher_start(struct kgsl_device *device);
+void adreno_dispatcher_halt(struct kgsl_device *device);
+void adreno_dispatcher_unhalt(struct kgsl_device *device);
+
int adreno_dispatcher_init(struct adreno_device *adreno_dev);
void adreno_dispatcher_close(struct adreno_device *adreno_dev);
int adreno_dispatcher_idle(struct adreno_device *adreno_dev);
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index cc3e79dc29bf..db9e5f7d6d6b 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -530,6 +530,16 @@ int kgsl_context_init(struct kgsl_device_private *dev_priv,
struct kgsl_device *device = dev_priv->device;
char name[64];
int ret = 0, id;
+ struct kgsl_process_private *proc_priv = dev_priv->process_priv;
+
+ if (atomic_read(&proc_priv->ctxt_count) > KGSL_MAX_CONTEXTS_PER_PROC) {
+ KGSL_DRV_ERR(device,
+ "Per process context limit reached for pid %u",
+ dev_priv->process_priv->pid);
+ return -ENOSPC;
+ }
+
+ atomic_inc(&proc_priv->ctxt_count);
id = _kgsl_get_context_id(device);
if (id == -ENOSPC) {
@@ -548,7 +558,7 @@ int kgsl_context_init(struct kgsl_device_private *dev_priv,
KGSL_DRV_INFO(device,
"cannot have more than %zu contexts due to memstore limitation\n",
KGSL_MEMSTORE_MAX);
-
+ atomic_dec(&proc_priv->ctxt_count);
return id;
}
@@ -579,6 +589,7 @@ int kgsl_context_init(struct kgsl_device_private *dev_priv,
out:
if (ret) {
+ atomic_dec(&proc_priv->ctxt_count);
write_lock(&device->context_lock);
idr_remove(&dev_priv->device->context_idr, id);
write_unlock(&device->context_lock);
@@ -662,6 +673,7 @@ kgsl_context_destroy(struct kref *kref)
device->pwrctrl.constraint.type = KGSL_CONSTRAINT_NONE;
}
+ atomic_dec(&context->proc_priv->ctxt_count);
idr_remove(&device->context_idr, context->id);
context->id = KGSL_CONTEXT_INVALID;
}
@@ -734,6 +746,8 @@ static int kgsl_suspend_device(struct kgsl_device *device, pm_message_t state)
mutex_lock(&device->mutex);
status = kgsl_pwrctrl_change_state(device, KGSL_STATE_SUSPEND);
+ if (status == 0)
+ device->ftbl->dispatcher_halt(device);
mutex_unlock(&device->mutex);
KGSL_PWR_WARN(device, "suspend end\n");
@@ -748,6 +762,7 @@ static int kgsl_resume_device(struct kgsl_device *device)
KGSL_PWR_WARN(device, "resume start\n");
mutex_lock(&device->mutex);
if (device->state == KGSL_STATE_SUSPEND) {
+ device->ftbl->dispatcher_unhalt(device);
kgsl_pwrctrl_change_state(device, KGSL_STATE_SLUMBER);
} else if (device->state != KGSL_STATE_INIT) {
/*
@@ -3977,6 +3992,7 @@ long kgsl_ioctl_gpuobj_set_info(struct kgsl_device_private *dev_priv,
struct kgsl_process_private *private = dev_priv->process_priv;
struct kgsl_gpuobj_set_info *param = data;
struct kgsl_mem_entry *entry;
+ int ret = 0;
if (param->id == 0)
return -EINVAL;
@@ -3989,12 +4005,16 @@ long kgsl_ioctl_gpuobj_set_info(struct kgsl_device_private *dev_priv,
copy_metadata(entry, param->metadata, param->metadata_len);
if (param->flags & KGSL_GPUOBJ_SET_INFO_TYPE) {
- entry->memdesc.flags &= ~((uint64_t) KGSL_MEMTYPE_MASK);
- entry->memdesc.flags |= param->type << KGSL_MEMTYPE_SHIFT;
+ if (param->type <= (KGSL_MEMTYPE_MASK >> KGSL_MEMTYPE_SHIFT)) {
+ entry->memdesc.flags &= ~((uint64_t) KGSL_MEMTYPE_MASK);
+ entry->memdesc.flags |= (uint64_t)((param->type <<
+ KGSL_MEMTYPE_SHIFT) & KGSL_MEMTYPE_MASK);
+ } else
+ ret = -EINVAL;
}
kgsl_mem_entry_put(entry);
- return 0;
+ return ret;
}
long kgsl_ioctl_cff_syncmem(struct kgsl_device_private *dev_priv,
diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h
index faf38d1d2293..a486d9a86f9d 100644
--- a/drivers/gpu/msm/kgsl.h
+++ b/drivers/gpu/msm/kgsl.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2016, 2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -51,11 +51,12 @@
/* The number of memstore arrays limits the number of contexts allowed.
* If more contexts are needed, update multiple for MEMSTORE_SIZE
*/
-#define KGSL_MEMSTORE_SIZE ((int)(PAGE_SIZE * 2))
+#define KGSL_MEMSTORE_SIZE ((int)(PAGE_SIZE * 8))
#define KGSL_MEMSTORE_GLOBAL (0)
#define KGSL_PRIORITY_MAX_RB_LEVELS 4
#define KGSL_MEMSTORE_MAX (KGSL_MEMSTORE_SIZE / \
sizeof(struct kgsl_devmemstore) - 1 - KGSL_PRIORITY_MAX_RB_LEVELS)
+#define KGSL_MAX_CONTEXTS_PER_PROC 200
#define MEMSTORE_RB_OFFSET(rb, field) \
KGSL_MEMSTORE_OFFSET(((rb)->id + KGSL_MEMSTORE_MAX), field)
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index 64dd45a30612..57d4fe4d9120 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2007-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2007-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -171,6 +171,8 @@ struct kgsl_functable {
void (*gpu_model)(struct kgsl_device *device, char *str,
size_t bufsz);
void (*stop_fault_timer)(struct kgsl_device *device);
+ void (*dispatcher_halt)(struct kgsl_device *device);
+ void (*dispatcher_unhalt)(struct kgsl_device *device);
};
struct kgsl_ioctl {
@@ -417,6 +419,7 @@ struct kgsl_context {
* @syncsource_idr: sync sources created by this process
* @syncsource_lock: Spinlock to protect the syncsource idr
* @fd_count: Counter for the number of FDs for this process
+ * @ctxt_count: Count for the number of contexts for this process
*/
struct kgsl_process_private {
unsigned long priv;
@@ -436,6 +439,7 @@ struct kgsl_process_private {
struct idr syncsource_idr;
spinlock_t syncsource_lock;
int fd_count;
+ atomic_t ctxt_count;
};
/**
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index ff6fae7c739b..b1b0b69d55ba 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -2675,6 +2675,7 @@ _aware(struct kgsl_device *device)
break;
default:
status = -EINVAL;
+ return status;
}
if (status)
kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
@@ -2781,8 +2782,9 @@ static int _suspend(struct kgsl_device *device)
{
int ret = 0;
- if ((KGSL_STATE_NONE == device->state) ||
- (KGSL_STATE_INIT == device->state))
+ if ((device->state == KGSL_STATE_NONE) ||
+ (device->state == KGSL_STATE_INIT) ||
+ (device->state == KGSL_STATE_SUSPEND))
return ret;
/* drain to prevent from more commands being submitted */
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 659ca36ce4c9..d1d399cce06a 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1331,7 +1331,7 @@ u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags)
* of implement() working on 8 byte chunks
*/
- int len = hid_report_len(report) + 7;
+ u32 len = hid_report_len(report) + 7;
return kmalloc(len, flags);
}
@@ -1396,7 +1396,7 @@ void __hid_request(struct hid_device *hid, struct hid_report *report,
{
char *buf;
int ret;
- int len;
+ u32 len;
buf = hid_alloc_report_buf(report, GFP_KERNEL);
if (!buf)
@@ -1422,14 +1422,14 @@ out:
}
EXPORT_SYMBOL_GPL(__hid_request);
-int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
+int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
int interrupt)
{
struct hid_report_enum *report_enum = hid->report_enum + type;
struct hid_report *report;
struct hid_driver *hdrv;
unsigned int a;
- int rsize, csize = size;
+ u32 rsize, csize = size;
u8 *cdata = data;
int ret = 0;
@@ -1487,7 +1487,7 @@ EXPORT_SYMBOL_GPL(hid_report_raw_event);
*
* This is data entry for lower layers.
*/
-int hid_input_report(struct hid_device *hid, int type, u8 *data, int size, int interrupt)
+int hid_input_report(struct hid_device *hid, int type, u8 *data, u32 size, int interrupt)
{
struct hid_report_enum *report_enum;
struct hid_driver *hdrv;
@@ -2309,7 +2309,6 @@ static const struct hid_device_id hid_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, 0x0004) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, 0x000a) },
{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, 0x0400) },
- { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, 0x0401) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) },
@@ -2388,6 +2387,9 @@ static const struct hid_device_id hid_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTIME) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYPH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POWERANALYSERCASSY) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CONVERTERCONTROLLERCASSY) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MACHINETESTCASSY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_JWM) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_DMMP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIP) },
@@ -2579,6 +2581,17 @@ bool hid_ignore(struct hid_device *hdev)
strncmp(hdev->name, "www.masterkit.ru MA901", 22) == 0)
return true;
break;
+ case USB_VENDOR_ID_ELAN:
+ /*
+ * Many Elan devices have a product id of 0x0401 and are handled
+ * by the elan_i2c input driver. But the ACPI HID ELAN0800 dev
+ * is not (and cannot be) handled by that driver ->
+ * Ignore all 0x0401 devs except for the ELAN0800 dev.
+ */
+ if (hdev->product == 0x0401 &&
+ strncmp(hdev->name, "ELAN0800", 8) != 0)
+ return true;
+ break;
}
if (hdev->type == HID_TYPE_USBMOUSE &&
diff --git a/drivers/hid/hid-elo.c b/drivers/hid/hid-elo.c
index 0cd4f7216239..5eea6fe0d7bd 100644
--- a/drivers/hid/hid-elo.c
+++ b/drivers/hid/hid-elo.c
@@ -42,6 +42,12 @@ static int elo_input_configured(struct hid_device *hdev,
{
struct input_dev *input = hidinput->input;
+ /*
+ * ELO devices have one Button usage in GenDesk field, which makes
+ * hid-input map it to BTN_LEFT; that confuses userspace, which then
+ * considers the device to be a mouse/touchpad instead of touchscreen.
+ */
+ clear_bit(BTN_LEFT, input->keybit);
set_bit(BTN_TOUCH, input->keybit);
set_bit(ABS_PRESSURE, input->absbit);
input_set_abs_params(input, ABS_PRESSURE, 0, 256, 0, 0);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index b554d17c9156..b316ab7e8996 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -570,6 +570,9 @@
#define USB_DEVICE_ID_LD_MICROCASSYTIME 0x1033
#define USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE 0x1035
#define USB_DEVICE_ID_LD_MICROCASSYPH 0x1038
+#define USB_DEVICE_ID_LD_POWERANALYSERCASSY 0x1040
+#define USB_DEVICE_ID_LD_CONVERTERCONTROLLERCASSY 0x1042
+#define USB_DEVICE_ID_LD_MACHINETESTCASSY 0x1043
#define USB_DEVICE_ID_LD_JWM 0x1080
#define USB_DEVICE_ID_LD_DMMP 0x1081
#define USB_DEVICE_ID_LD_UMIP 0x1090
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 2ba6bf69b7d0..8d74e691ac90 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -1128,18 +1128,26 @@ void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct
/*
* Ignore out-of-range values as per HID specification,
- * section 5.10 and 6.2.25.
+ * section 5.10 and 6.2.25, when NULL state bit is present.
+ * When it's not, clamp the value to match Microsoft's input
+ * driver as mentioned in "Required HID usages for digitizers":
+ * https://msdn.microsoft.com/en-us/library/windows/hardware/dn672278(v=vs.85).asp
*
* The logical_minimum < logical_maximum check is done so that we
* don't unintentionally discard values sent by devices which
* don't specify logical min and max.
*/
if ((field->flags & HID_MAIN_ITEM_VARIABLE) &&
- (field->logical_minimum < field->logical_maximum) &&
- (value < field->logical_minimum ||
- value > field->logical_maximum)) {
- dbg_hid("Ignoring out-of-range value %x\n", value);
- return;
+ (field->logical_minimum < field->logical_maximum)) {
+ if (field->flags & HID_MAIN_ITEM_NULL_STATE &&
+ (value < field->logical_minimum ||
+ value > field->logical_maximum)) {
+ dbg_hid("Ignoring out-of-range value %x\n", value);
+ return;
+ }
+ value = clamp(value,
+ field->logical_minimum,
+ field->logical_maximum);
}
/*
@@ -1250,7 +1258,8 @@ static void hidinput_led_worker(struct work_struct *work)
led_work);
struct hid_field *field;
struct hid_report *report;
- int len, ret;
+ int ret;
+ u32 len;
__u8 *buf;
field = hidinput_get_led_field(hid);
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index f62a9d6601cc..9de379c1b3fd 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -314,7 +314,8 @@ static struct attribute_group mt_attribute_group = {
static void mt_get_feature(struct hid_device *hdev, struct hid_report *report)
{
struct mt_device *td = hid_get_drvdata(hdev);
- int ret, size = hid_report_len(report);
+ int ret;
+ u32 size = hid_report_len(report);
u8 *buf;
/*
@@ -919,7 +920,7 @@ static void mt_set_input_mode(struct hid_device *hdev)
struct hid_report_enum *re;
struct mt_class *cls = &td->mtclass;
char *buf;
- int report_len;
+ u32 report_len;
if (td->inputmode < 0)
return;
diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c
index 67cd059a8f46..41a4a2af9db1 100644
--- a/drivers/hid/hid-rmi.c
+++ b/drivers/hid/hid-rmi.c
@@ -110,8 +110,8 @@ struct rmi_data {
u8 *writeReport;
u8 *readReport;
- int input_report_size;
- int output_report_size;
+ u32 input_report_size;
+ u32 output_report_size;
unsigned long flags;
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index 9c2d7c23f296..c0c4df198725 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -197,6 +197,11 @@ static ssize_t hidraw_get_report(struct file *file, char __user *buffer, size_t
int ret = 0, len;
unsigned char report_number;
+ if (!hidraw_table[minor] || !hidraw_table[minor]->exist) {
+ ret = -ENODEV;
+ goto out;
+ }
+
dev = hidraw_table[minor]->hid;
if (!dev->ll_driver->raw_request) {
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index 312aa1e33fb2..4c3ed078c6b9 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -137,10 +137,10 @@ struct i2c_hid {
* register of the HID
* descriptor. */
unsigned int bufsize; /* i2c buffer size */
- char *inbuf; /* Input buffer */
- char *rawbuf; /* Raw Input buffer */
- char *cmdbuf; /* Command buffer */
- char *argsbuf; /* Command arguments buffer */
+ u8 *inbuf; /* Input buffer */
+ u8 *rawbuf; /* Raw Input buffer */
+ u8 *cmdbuf; /* Command buffer */
+ u8 *argsbuf; /* Command arguments buffer */
unsigned long flags; /* device flags */
@@ -387,7 +387,8 @@ static int i2c_hid_hwreset(struct i2c_client *client)
static void i2c_hid_get_input(struct i2c_hid *ihid)
{
- int ret, ret_size;
+ int ret;
+ u32 ret_size;
int size = le16_to_cpu(ihid->hdesc.wMaxInputLength);
if (size > ihid->bufsize)
@@ -412,7 +413,7 @@ static void i2c_hid_get_input(struct i2c_hid *ihid)
return;
}
- if (ret_size > size) {
+ if ((ret_size > size) || (ret_size <= 2)) {
dev_err(&ihid->client->dev, "%s: incomplete report (%d/%d)\n",
__func__, size, ret_size);
return;
diff --git a/drivers/hsi/clients/ssi_protocol.c b/drivers/hsi/clients/ssi_protocol.c
index a38af68cf326..0a0628d11c0b 100644
--- a/drivers/hsi/clients/ssi_protocol.c
+++ b/drivers/hsi/clients/ssi_protocol.c
@@ -976,7 +976,7 @@ static int ssip_pn_xmit(struct sk_buff *skb, struct net_device *dev)
goto drop;
/* Pad to 32-bits - FIXME: Revisit*/
if ((skb->len & 3) && skb_pad(skb, 4 - (skb->len & 3)))
- goto drop;
+ goto inc_dropped;
/*
* Modem sends Phonet messages over SSI with its own endianess...
@@ -1028,8 +1028,9 @@ static int ssip_pn_xmit(struct sk_buff *skb, struct net_device *dev)
drop2:
hsi_free_msg(msg);
drop:
- dev->stats.tx_dropped++;
dev_kfree_skb(skb);
+inc_dropped:
+ dev->stats.tx_dropped++;
return 0;
}
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index d415a804fd26..9a8976a79b29 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -195,9 +195,7 @@ int hv_init(void)
{
int max_leaf;
union hv_x64_msr_hypercall_contents hypercall_msr;
- union hv_x64_msr_hypercall_contents tsc_msr;
void *virtaddr = NULL;
- void *va_tsc = NULL;
memset(hv_context.synic_event_page, 0, sizeof(void *) * NR_CPUS);
memset(hv_context.synic_message_page, 0,
@@ -243,6 +241,9 @@ int hv_init(void)
#ifdef CONFIG_X86_64
if (ms_hyperv.features & HV_X64_MSR_REFERENCE_TSC_AVAILABLE) {
+ union hv_x64_msr_hypercall_contents tsc_msr;
+ void *va_tsc;
+
va_tsc = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL);
if (!va_tsc)
goto cleanup;
diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c
index b24f1d3045f0..ac63e562071f 100644
--- a/drivers/hwmon/ina2xx.c
+++ b/drivers/hwmon/ina2xx.c
@@ -94,18 +94,20 @@ enum ina2xx_ids { ina219, ina226 };
struct ina2xx_config {
u16 config_default;
- int calibration_factor;
+ int calibration_value;
int registers;
int shunt_div;
int bus_voltage_shift;
int bus_voltage_lsb; /* uV */
- int power_lsb; /* uW */
+ int power_lsb_factor;
};
struct ina2xx_data {
const struct ina2xx_config *config;
long rshunt;
+ long current_lsb_uA;
+ long power_lsb_uW;
struct mutex config_lock;
struct regmap *regmap;
@@ -115,21 +117,21 @@ struct ina2xx_data {
static const struct ina2xx_config ina2xx_config[] = {
[ina219] = {
.config_default = INA219_CONFIG_DEFAULT,
- .calibration_factor = 40960000,
+ .calibration_value = 4096,
.registers = INA219_REGISTERS,
.shunt_div = 100,
.bus_voltage_shift = 3,
.bus_voltage_lsb = 4000,
- .power_lsb = 20000,
+ .power_lsb_factor = 20,
},
[ina226] = {
.config_default = INA226_CONFIG_DEFAULT,
- .calibration_factor = 5120000,
+ .calibration_value = 2048,
.registers = INA226_REGISTERS,
.shunt_div = 400,
.bus_voltage_shift = 0,
.bus_voltage_lsb = 1250,
- .power_lsb = 25000,
+ .power_lsb_factor = 25,
},
};
@@ -168,12 +170,16 @@ static u16 ina226_interval_to_reg(int interval)
return INA226_SHIFT_AVG(avg_bits);
}
+/*
+ * Calibration register is set to the best value, which eliminates
+ * truncation errors on calculating current register in hardware.
+ * According to datasheet (eq. 3) the best values are 2048 for
+ * ina226 and 4096 for ina219. They are hardcoded as calibration_value.
+ */
static int ina2xx_calibrate(struct ina2xx_data *data)
{
- u16 val = DIV_ROUND_CLOSEST(data->config->calibration_factor,
- data->rshunt);
-
- return regmap_write(data->regmap, INA2XX_CALIBRATION, val);
+ return regmap_write(data->regmap, INA2XX_CALIBRATION,
+ data->config->calibration_value);
}
/*
@@ -186,10 +192,6 @@ static int ina2xx_init(struct ina2xx_data *data)
if (ret < 0)
return ret;
- /*
- * Set current LSB to 1mA, shunt is in uOhms
- * (equation 13 in datasheet).
- */
return ina2xx_calibrate(data);
}
@@ -267,15 +269,15 @@ static int ina2xx_get_value(struct ina2xx_data *data, u8 reg,
val = DIV_ROUND_CLOSEST(val, 1000);
break;
case INA2XX_POWER:
- val = regval * data->config->power_lsb;
+ val = regval * data->power_lsb_uW;
break;
case INA2XX_CURRENT:
- /* signed register, LSB=1mA (selected), in mA */
- val = (s16)regval;
+ /* signed register, result in mA */
+ val = regval * data->current_lsb_uA;
+ val = DIV_ROUND_CLOSEST(val, 1000);
break;
case INA2XX_CALIBRATION:
- val = DIV_ROUND_CLOSEST(data->config->calibration_factor,
- regval);
+ val = regval;
break;
default:
/* programmer goofed */
@@ -303,9 +305,32 @@ static ssize_t ina2xx_show_value(struct device *dev,
ina2xx_get_value(data, attr->index, regval));
}
-static ssize_t ina2xx_set_shunt(struct device *dev,
- struct device_attribute *da,
- const char *buf, size_t count)
+/*
+ * In order to keep calibration register value fixed, the product
+ * of current_lsb and shunt_resistor should also be fixed and equal
+ * to shunt_voltage_lsb = 1 / shunt_div multiplied by 10^9 in order
+ * to keep the scale.
+ */
+static int ina2xx_set_shunt(struct ina2xx_data *data, long val)
+{
+ unsigned int dividend = DIV_ROUND_CLOSEST(1000000000,
+ data->config->shunt_div);
+ if (val <= 0 || val > dividend)
+ return -EINVAL;
+
+ mutex_lock(&data->config_lock);
+ data->rshunt = val;
+ data->current_lsb_uA = DIV_ROUND_CLOSEST(dividend, val);
+ data->power_lsb_uW = data->config->power_lsb_factor *
+ data->current_lsb_uA;
+ mutex_unlock(&data->config_lock);
+
+ return 0;
+}
+
+static ssize_t ina2xx_store_shunt(struct device *dev,
+ struct device_attribute *da,
+ const char *buf, size_t count)
{
unsigned long val;
int status;
@@ -315,18 +340,9 @@ static ssize_t ina2xx_set_shunt(struct device *dev,
if (status < 0)
return status;
- if (val == 0 ||
- /* Values greater than the calibration factor make no sense. */
- val > data->config->calibration_factor)
- return -EINVAL;
-
- mutex_lock(&data->config_lock);
- data->rshunt = val;
- status = ina2xx_calibrate(data);
- mutex_unlock(&data->config_lock);
+ status = ina2xx_set_shunt(data, val);
if (status < 0)
return status;
-
return count;
}
@@ -386,7 +402,7 @@ static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, ina2xx_show_value, NULL,
/* shunt resistance */
static SENSOR_DEVICE_ATTR(shunt_resistor, S_IRUGO | S_IWUSR,
- ina2xx_show_value, ina2xx_set_shunt,
+ ina2xx_show_value, ina2xx_store_shunt,
INA2XX_CALIBRATION);
/* update interval (ina226 only) */
@@ -431,6 +447,7 @@ static int ina2xx_probe(struct i2c_client *client,
/* set the device type */
data->config = &ina2xx_config[id->driver_data];
+ mutex_init(&data->config_lock);
if (of_property_read_u32(dev->of_node, "shunt-resistor", &val) < 0) {
struct ina2xx_platform_data *pdata = dev_get_platdata(dev);
@@ -441,10 +458,7 @@ static int ina2xx_probe(struct i2c_client *client,
val = INA2XX_RSHUNT_DEFAULT;
}
- if (val <= 0 || val > data->config->calibration_factor)
- return -ENODEV;
-
- data->rshunt = val;
+ ina2xx_set_shunt(data, val);
ina2xx_regmap_config.max_register = data->config->registers;
@@ -460,8 +474,6 @@ static int ina2xx_probe(struct i2c_client *client,
return -ENODEV;
}
- mutex_init(&data->config_lock);
-
data->groups[group++] = &ina2xx_group;
if (id->driver_data == ina226)
data->groups[group++] = &ina226_group;
diff --git a/drivers/hwmon/pmbus/adm1275.c b/drivers/hwmon/pmbus/adm1275.c
index 188af4c89f40..18477dd1e243 100644
--- a/drivers/hwmon/pmbus/adm1275.c
+++ b/drivers/hwmon/pmbus/adm1275.c
@@ -95,8 +95,8 @@ static const struct coefficients adm1075_coefficients[] = {
[0] = { 27169, 0, -1 }, /* voltage */
[1] = { 806, 20475, -1 }, /* current, irange25 */
[2] = { 404, 20475, -1 }, /* current, irange50 */
- [3] = { 0, -1, 8549 }, /* power, irange25 */
- [4] = { 0, -1, 4279 }, /* power, irange50 */
+ [3] = { 8549, 0, -1 }, /* power, irange25 */
+ [4] = { 4279, 0, -1 }, /* power, irange50 */
};
static const struct coefficients adm1275_coefficients[] = {
diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c
index 3fd080b94069..0da9adc49574 100644
--- a/drivers/hwtracing/coresight/coresight-tpiu.c
+++ b/drivers/hwtracing/coresight/coresight-tpiu.c
@@ -45,8 +45,11 @@
#define TPIU_ITATBCTR0 0xef8
/** register definition **/
+/* FFSR - 0x300 */
+#define FFSR_FT_STOPPED BIT(1)
/* FFCR - 0x304 */
#define FFCR_FON_MAN BIT(6)
+#define FFCR_STOP_FI BIT(12)
/**
* @base: memory mapped base address for this component.
@@ -85,10 +88,14 @@ static void tpiu_disable_hw(struct tpiu_drvdata *drvdata)
{
CS_UNLOCK(drvdata->base);
- /* Clear formatter controle reg. */
- writel_relaxed(0x0, drvdata->base + TPIU_FFCR);
+ /* Clear formatter and stop on flush */
+ writel_relaxed(FFCR_STOP_FI, drvdata->base + TPIU_FFCR);
/* Generate manual flush */
- writel_relaxed(FFCR_FON_MAN, drvdata->base + TPIU_FFCR);
+ writel_relaxed(FFCR_STOP_FI | FFCR_FON_MAN, drvdata->base + TPIU_FFCR);
+ /* Wait for flush to complete */
+ coresight_timeout(drvdata->base, TPIU_FFCR, FFCR_FON_MAN, 0);
+ /* Wait for formatter to stop */
+ coresight_timeout(drvdata->base, TPIU_FFSR, FFSR_FT_STOPPED, 1);
CS_LOCK(drvdata->base);
}
diff --git a/drivers/hwtracing/coresight/of_coresight.c b/drivers/hwtracing/coresight/of_coresight.c
index 061ddadd1122..1b8199f1b25f 100644
--- a/drivers/hwtracing/coresight/of_coresight.c
+++ b/drivers/hwtracing/coresight/of_coresight.c
@@ -152,7 +152,7 @@ struct coresight_platform_data *of_get_coresight_platform_data(
continue;
/* The local out port number */
- pdata->outports[i] = endpoint.id;
+ pdata->outports[i] = endpoint.port;
/*
* Get a handle on the remote port and parent
diff --git a/drivers/i2c/busses/i2c-msm-v2.c b/drivers/i2c/busses/i2c-msm-v2.c
index 4a9536d39b58..d72953f2df23 100644
--- a/drivers/i2c/busses/i2c-msm-v2.c
+++ b/drivers/i2c/busses/i2c-msm-v2.c
@@ -32,6 +32,8 @@
#include <linux/dma-mapping.h>
#include <linux/i2c.h>
#include <linux/of.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
#include <linux/msm-sps.h>
#include <linux/msm-bus.h>
#include <linux/msm-bus-board.h>
@@ -50,6 +52,9 @@ static int i2c_msm_xfer_wait_for_completion(struct i2c_msm_ctrl *ctrl,
static int i2c_msm_pm_resume(struct device *dev);
static void i2c_msm_pm_suspend(struct device *dev);
static void i2c_msm_clk_path_init(struct i2c_msm_ctrl *ctrl);
+static struct pinctrl_state *
+ i2c_msm_rsrcs_gpio_get_state(struct i2c_msm_ctrl *ctrl,
+ const char *name);
static void i2c_msm_pm_pinctrl_state(struct i2c_msm_ctrl *ctrl,
bool runtime_active);
@@ -1269,10 +1274,10 @@ static int i2c_msm_dma_xfer_process(struct i2c_msm_ctrl *ctrl)
tx->dir,
(SPS_IOVEC_FLAG_EOT |
SPS_IOVEC_FLAG_NWD));
- if (dma_desc_tx < 0) {
+ if (IS_ERR_OR_NULL(dma_desc_tx)) {
dev_err(ctrl->dev, "error dmaengine_prep_slave_sg tx:%ld\n",
PTR_ERR(dma_desc_tx));
- ret = PTR_ERR(dma_desc_tx);
+ ret = dma_desc_tx ? PTR_ERR(dma_desc_tx) : -ENOMEM;
goto dma_xfer_end;
}
@@ -1287,11 +1292,11 @@ static int i2c_msm_dma_xfer_process(struct i2c_msm_ctrl *ctrl)
sg_rx_itr - sg_rx, rx->dir,
(SPS_IOVEC_FLAG_EOT |
SPS_IOVEC_FLAG_NWD));
- if (dma_desc_rx < 0) {
+ if (IS_ERR_OR_NULL(dma_desc_rx)) {
dev_err(ctrl->dev,
"error dmaengine_prep_slave_sg rx:%ld\n",
PTR_ERR(dma_desc_rx));
- ret = PTR_ERR(dma_desc_rx);
+ ret = dma_desc_rx ? PTR_ERR(dma_desc_rx) : -ENOMEM;
goto dma_xfer_end;
}
@@ -1917,63 +1922,74 @@ static void i2c_msm_qup_init(struct i2c_msm_ctrl *ctrl)
"error on verifying HW support (I2C_MAST_GEN=0)\n");
}
-/*
- * qup_i2c_try_recover_bus_busy: issue QUP bus clear command
- */
-static int qup_i2c_try_recover_bus_busy(struct i2c_msm_ctrl *ctrl)
+static void qup_i2c_recover_bit_bang(struct i2c_msm_ctrl *ctrl)
{
- int ret;
- ulong min_sleep_usec;
+ int i, ret;
+ int gpio_clk;
+ int gpio_dat;
+ bool gpio_clk_status = false;
+ uint32_t status = readl_relaxed(ctrl->rsrcs.base + QUP_I2C_STATUS);
+ struct pinctrl_state *bitbang;
- /* call i2c_msm_qup_init() to set core in idle state */
- i2c_msm_qup_init(ctrl);
-
- /* must be in run state for bus clear */
- ret = i2c_msm_qup_state_set(ctrl, QUP_STATE_RUN);
- if (ret < 0) {
- dev_err(ctrl->dev, "error: bus clear fail to set run state\n");
- return ret;
+ dev_info(ctrl->dev, "Executing bus recovery procedure (9 clk pulse)\n");
+ disable_irq(ctrl->rsrcs.irq);
+ if (!(status & (I2C_STATUS_BUS_ACTIVE)) ||
+ (status & (I2C_STATUS_BUS_MASTER))) {
+ dev_warn(ctrl->dev, "unexpected i2c recovery call:0x%x\n",
+ status);
+ goto recovery_exit;
}
- /*
- * call i2c_msm_qup_xfer_init_run_state() to set clock dividers.
- * the dividers are necessary for bus clear.
- */
- i2c_msm_qup_xfer_init_run_state(ctrl);
+ gpio_clk = of_get_named_gpio(ctrl->adapter.dev.of_node, "qcom,i2c-clk",
+ 0);
+ gpio_dat = of_get_named_gpio(ctrl->adapter.dev.of_node, "qcom,i2c-dat",
+ 0);
- writel_relaxed(0x1, ctrl->rsrcs.base + QUP_I2C_MASTER_BUS_CLR);
+ if (gpio_clk < 0 || gpio_dat < 0) {
+ dev_warn(ctrl->dev, "SW bigbang err: i2c gpios not known\n");
+ goto recovery_exit;
+ }
- /*
- * wait for recovery (9 clock pulse cycles) to complete.
- * min_time = 9 clock *10 (1000% margin)
- * max_time = 10* min_time
- */
- min_sleep_usec =
- max_t(ulong, (9 * 10 * USEC_PER_SEC) / ctrl->rsrcs.clk_freq_out, 100);
+ bitbang = i2c_msm_rsrcs_gpio_get_state(ctrl, "i2c_bitbang");
+ if (bitbang)
+ ret = pinctrl_select_state(ctrl->rsrcs.pinctrl, bitbang);
+ if (!bitbang || ret) {
+ dev_err(ctrl->dev, "GPIO pins have no bitbang setting\n");
+ goto recovery_exit;
+ }
+ for (i = 0; i < 10; i++) {
+ if (gpio_get_value(gpio_dat) && gpio_clk_status)
+ break;
+ gpio_direction_output(gpio_clk, 0);
+ udelay(5);
+ gpio_direction_output(gpio_dat, 0);
+ udelay(5);
+ gpio_direction_input(gpio_clk);
+ udelay(5);
+ if (!gpio_get_value(gpio_clk))
+ udelay(20);
+ if (!gpio_get_value(gpio_clk))
+ usleep_range(10000, 10001);
+ gpio_clk_status = gpio_get_value(gpio_clk);
+ gpio_direction_input(gpio_dat);
+ udelay(5);
+ }
- usleep_range(min_sleep_usec, min_sleep_usec * 10);
- return ret;
-}
+ i2c_msm_pm_pinctrl_state(ctrl, true);
+ udelay(10);
-static int qup_i2c_recover_bus_busy(struct i2c_msm_ctrl *ctrl)
-{
- u32 bus_clr, bus_active, status;
- int retry = 0;
- dev_info(ctrl->dev, "Executing bus recovery procedure (9 clk pulse)\n");
+ status = readl_relaxed(ctrl->rsrcs.base + QUP_I2C_STATUS);
+ if (!(status & I2C_STATUS_BUS_ACTIVE)) {
+ dev_info(ctrl->dev,
+ "Bus busy cleared after %d clock cycles, status %x\n",
+ i, status);
+ goto recovery_exit;
+ }
- do {
- qup_i2c_try_recover_bus_busy(ctrl);
- bus_clr = readl_relaxed(ctrl->rsrcs.base +
- QUP_I2C_MASTER_BUS_CLR);
- status = readl_relaxed(ctrl->rsrcs.base + QUP_I2C_STATUS);
- bus_active = status & I2C_STATUS_BUS_ACTIVE;
- if (++retry >= I2C_QUP_MAX_BUS_RECOVERY_RETRY)
- break;
- } while (bus_clr || bus_active);
+ dev_warn(ctrl->dev, "Bus still busy, status %x\n", status);
- dev_info(ctrl->dev, "Bus recovery %s after %d retries\n",
- (bus_clr || bus_active) ? "fail" : "success", retry);
- return 0;
+recovery_exit:
+ enable_irq(ctrl->rsrcs.irq);
}
static int i2c_msm_qup_post_xfer(struct i2c_msm_ctrl *ctrl, int err)
@@ -1984,7 +2000,7 @@ static int i2c_msm_qup_post_xfer(struct i2c_msm_ctrl *ctrl, int err)
(ctrl->xfer.err == I2C_MSM_ERR_BUS_ERR) ||
(ctrl->xfer.err == I2C_MSM_ERR_TIMEOUT)) {
if (i2c_msm_qup_slv_holds_bus(ctrl))
- qup_i2c_recover_bus_busy(ctrl);
+ qup_i2c_recover_bit_bang(ctrl);
/* do not generalize error to EIO if its already set */
if (!err)
@@ -2832,8 +2848,8 @@ static void i2c_msm_pm_rt_init(struct device *dev) {}
static const struct dev_pm_ops i2c_msm_pm_ops = {
#ifdef CONFIG_PM_SLEEP
- .suspend_noirq = i2c_msm_pm_sys_suspend_noirq,
- .resume_noirq = i2c_msm_pm_sys_resume_noirq,
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(i2c_msm_pm_sys_suspend_noirq,
+ i2c_msm_pm_sys_resume_noirq)
#endif
SET_RUNTIME_PM_OPS(i2c_msm_pm_rt_suspend,
i2c_msm_pm_rt_resume,
diff --git a/drivers/i2c/busses/i2c-scmi.c b/drivers/i2c/busses/i2c-scmi.c
index dfc98df7b1b6..7aa7b9cb6203 100644
--- a/drivers/i2c/busses/i2c-scmi.c
+++ b/drivers/i2c/busses/i2c-scmi.c
@@ -18,6 +18,9 @@
#define ACPI_SMBUS_HC_CLASS "smbus"
#define ACPI_SMBUS_HC_DEVICE_NAME "cmi"
+/* SMBUS HID definition as supported by Microsoft Windows */
+#define ACPI_SMBUS_MS_HID "SMB0001"
+
ACPI_MODULE_NAME("smbus_cmi");
struct smbus_methods_t {
@@ -51,6 +54,7 @@ static const struct smbus_methods_t ibm_smbus_methods = {
static const struct acpi_device_id acpi_smbus_cmi_ids[] = {
{"SMBUS01", (kernel_ulong_t)&smbus_methods},
{ACPI_SMBUS_IBM_HID, (kernel_ulong_t)&ibm_smbus_methods},
+ {ACPI_SMBUS_MS_HID, (kernel_ulong_t)&smbus_methods},
{"", 0}
};
MODULE_DEVICE_TABLE(acpi, acpi_smbus_cmi_ids);
diff --git a/drivers/i2c/i2c-boardinfo.c b/drivers/i2c/i2c-boardinfo.c
index 90e322959303..42c25aed671d 100644
--- a/drivers/i2c/i2c-boardinfo.c
+++ b/drivers/i2c/i2c-boardinfo.c
@@ -56,9 +56,7 @@ EXPORT_SYMBOL_GPL(__i2c_first_dynamic_bus_num);
* The board info passed can safely be __initdata, but be careful of embedded
* pointers (for platform_data, functions, etc) since that won't be copied.
*/
-int __init
-i2c_register_board_info(int busnum,
- struct i2c_board_info const *info, unsigned len)
+int i2c_register_board_info(int busnum, struct i2c_board_info const *info, unsigned len)
{
int status;
diff --git a/drivers/idle/Kconfig b/drivers/idle/Kconfig
index 4732dfc15447..331adc509f3a 100644
--- a/drivers/idle/Kconfig
+++ b/drivers/idle/Kconfig
@@ -17,6 +17,7 @@ config I7300_IDLE_IOAT_CHANNEL
config I7300_IDLE
tristate "Intel chipset idle memory power saving driver"
+ depends on PCI
select I7300_IDLE_IOAT_CHANNEL
help
Enable memory power savings when idle with certain Intel server
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index 197a08b4e2f3..b4136d3bf6b7 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -628,6 +628,8 @@ static const struct iio_trigger_ops st_accel_trigger_ops = {
int st_accel_common_probe(struct iio_dev *indio_dev)
{
struct st_sensor_data *adata = iio_priv(indio_dev);
+ struct st_sensors_platform_data *pdata =
+ (struct st_sensors_platform_data *)adata->dev->platform_data;
int irq = adata->get_irq_data_ready(indio_dev);
int err;
@@ -652,11 +654,10 @@ int st_accel_common_probe(struct iio_dev *indio_dev)
&adata->sensor_settings->fs.fs_avl[0];
adata->odr = adata->sensor_settings->odr.odr_avl[0].hz;
- if (!adata->dev->platform_data)
- adata->dev->platform_data =
- (struct st_sensors_platform_data *)&default_accel_pdata;
+ if (!pdata)
+ pdata = (struct st_sensors_platform_data *)&default_accel_pdata;
- err = st_sensors_init_sensor(indio_dev, adata->dev->platform_data);
+ err = st_sensors_init_sensor(indio_dev, pdata);
if (err < 0)
return err;
diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c
index f684fe31f832..64799ad7ebad 100644
--- a/drivers/iio/adc/axp288_adc.c
+++ b/drivers/iio/adc/axp288_adc.c
@@ -44,7 +44,7 @@ struct axp288_adc_info {
struct regmap *regmap;
};
-static const struct iio_chan_spec const axp288_adc_channels[] = {
+static const struct iio_chan_spec axp288_adc_channels[] = {
{
.indexed = 1,
.type = IIO_TEMP,
diff --git a/drivers/iio/adc/hi8435.c b/drivers/iio/adc/hi8435.c
index c73c6c62a6ac..7401f102dff4 100644
--- a/drivers/iio/adc/hi8435.c
+++ b/drivers/iio/adc/hi8435.c
@@ -121,10 +121,21 @@ static int hi8435_write_event_config(struct iio_dev *idev,
enum iio_event_direction dir, int state)
{
struct hi8435_priv *priv = iio_priv(idev);
+ int ret;
+ u32 tmp;
+
+ if (state) {
+ ret = hi8435_readl(priv, HI8435_SO31_0_REG, &tmp);
+ if (ret < 0)
+ return ret;
+ if (tmp & BIT(chan->channel))
+ priv->event_prev_val |= BIT(chan->channel);
+ else
+ priv->event_prev_val &= ~BIT(chan->channel);
- priv->event_scan_mask &= ~BIT(chan->channel);
- if (state)
priv->event_scan_mask |= BIT(chan->channel);
+ } else
+ priv->event_scan_mask &= ~BIT(chan->channel);
return 0;
}
@@ -442,13 +453,15 @@ static int hi8435_probe(struct spi_device *spi)
priv->spi = spi;
reset_gpio = devm_gpiod_get(&spi->dev, NULL, GPIOD_OUT_LOW);
- if (IS_ERR(reset_gpio)) {
- /* chip s/w reset if h/w reset failed */
+ if (!IS_ERR(reset_gpio)) {
+ /* need >=100ns low pulse to reset chip */
+ gpiod_set_raw_value_cansleep(reset_gpio, 0);
+ udelay(1);
+ gpiod_set_raw_value_cansleep(reset_gpio, 1);
+ } else {
+ /* s/w reset chip if h/w reset is not available */
hi8435_writeb(priv, HI8435_CTRL_REG, HI8435_CTRL_SRST);
hi8435_writeb(priv, HI8435_CTRL_REG, 0);
- } else {
- udelay(5);
- gpiod_set_value(reset_gpio, 1);
}
spi_set_drvdata(spi, idev);
diff --git a/drivers/iio/imu/adis_trigger.c b/drivers/iio/imu/adis_trigger.c
index f53e9a803a0e..93b99bd93738 100644
--- a/drivers/iio/imu/adis_trigger.c
+++ b/drivers/iio/imu/adis_trigger.c
@@ -47,6 +47,10 @@ int adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev)
if (adis->trig == NULL)
return -ENOMEM;
+ adis->trig->dev.parent = &adis->spi->dev;
+ adis->trig->ops = &adis_trigger_ops;
+ iio_trigger_set_drvdata(adis->trig, adis);
+
ret = request_irq(adis->spi->irq,
&iio_trigger_generic_data_rdy_poll,
IRQF_TRIGGER_RISING,
@@ -55,9 +59,6 @@ int adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev)
if (ret)
goto error_free_trig;
- adis->trig->dev.parent = &adis->spi->dev;
- adis->trig->ops = &adis_trigger_ops;
- iio_trigger_set_drvdata(adis->trig, adis);
ret = iio_trigger_register(adis->trig);
indio_dev->trig = iio_trigger_get(adis->trig);
diff --git a/drivers/iio/imu/inv_mpu/Kconfig b/drivers/iio/imu/inv_mpu/Kconfig
new file mode 100644
index 000000000000..7505454f8763
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu/Kconfig
@@ -0,0 +1,63 @@
+#
+# inv-mpu-iio driver for Invensense MPU devices
+#
+
+config INV_MPU_IIO
+ tristate
+ select IIO_BUFFER
+ select IIO_KFIFO_BUF
+ select IIO_TRIGGER
+ select CRC32
+
+choice
+ prompt "Chip name"
+ depends on INV_MPU_IIO
+
+config INV_MPU_IIO_ICM20648
+ bool "ICM20648/ICM20948"
+ help
+ Select this if you are using a ICM20648/ICM20948 chip.
+
+config INV_MPU_IIO_ICM20608D
+ bool "ICM20608D/ICM20609/ICM20689"
+ help
+ Select this if you are using a ICM20608D/ICM20609/ICM20689 chip.
+
+config INV_MPU_IIO_ICM20602
+ bool "ICM20602"
+ help
+ Select this if you are using a ICM20602 chip.
+
+config INV_MPU_IIO_ICM20690
+ bool "ICM20690"
+ help
+ Select this if you are using a ICM20690 chip.
+
+config INV_MPU_IIO_IAM20680
+ bool "IAM20680"
+ help
+ Select this if you are using a IAM20680 chip.
+
+endchoice
+
+config INV_MPU_IIO_I2C
+ tristate "Invensense ICM20xxx devices (I2C)"
+ depends on I2C && !INV_MPU6050_IIO
+ select INV_MPU_IIO
+ default n
+ help
+ This driver supports Invensense ICM20xxx devices over I2C.
+ This driver can be built as a module. The module will be called
+ inv-mpu-iio-i2c.
+
+config INV_MPU_IIO_SPI
+ tristate "Invensense ICM20xxx devices (SPI)"
+ depends on SPI_MASTER && !INV_MPU6050_IIO
+ select INV_MPU_IIO
+ default n
+ help
+ This driver supports Invensense ICM20xxx devices over SPI.
+ This driver can be built as a module. The module will be called
+ inv-mpu-iio-spi.
+
+source "drivers/iio/imu/inv_mpu/inv_test/Kconfig"
diff --git a/drivers/iio/imu/inv_mpu/Makefile b/drivers/iio/imu/inv_mpu/Makefile
new file mode 100644
index 000000000000..dfc4c257ef73
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu/Makefile
@@ -0,0 +1,61 @@
+#
+# Makefile for Invensense inv-mpu-iio device.
+#
+
+obj-$(CONFIG_INV_MPU_IIO) += inv-mpu-iio.o
+
+inv-mpu-iio-objs += inv_mpu_common.o
+inv-mpu-iio-objs += inv_mpu_ring.o
+inv-mpu-iio-objs += inv_mpu_timestamp.o
+inv-mpu-iio-objs += inv_mpu_dts.o
+
+# chip support
+ifeq ($(CONFIG_INV_MPU_IIO_ICM20648), y)
+inv-mpu-iio-objs += icm20648/inv_mpu_init.o
+inv-mpu-iio-objs += icm20648/inv_mpu_core.o
+inv-mpu-iio-objs += icm20648/inv_mpu_parsing.o
+inv-mpu-iio-objs += icm20648/inv_mpu_setup.o
+inv-mpu-iio-objs += icm20648/inv_mpu_dmp_fifo.o
+inv-mpu-iio-objs += icm20648/inv_slave_compass.o
+inv-mpu-iio-objs += icm20648/inv_slave_pressure.o
+inv-mpu-iio-objs += icm20648/inv_slave_als.o
+inv-mpu-iio-objs += icm20648/inv_mpu_load_dmp.o
+inv-mpu-iio-objs += icm20648/inv_mpu_selftest.o
+inv-mpu-iio-objs += dmp_support/inv_mpu_misc.o
+else ifeq ($(CONFIG_INV_MPU_IIO_ICM20690), y)
+inv-mpu-iio-objs += icm20690/inv_mpu_init_20690.o
+inv-mpu-iio-objs += icm20690/inv_mpu_core_20690.o
+inv-mpu-iio-objs += icm20690/inv_mpu_parsing_20690.o
+inv-mpu-iio-objs += icm20690/inv_mpu_setup_20690.o
+inv-mpu-iio-objs += icm20690/inv_mpu_selftest_20690.o
+inv-mpu-iio-objs += icm20690/inv_slave_compass.o
+else ifeq ($(CONFIG_INV_MPU_IIO_ICM20602), y)
+inv-mpu-iio-objs += icm20602/inv_mpu_init_20602.o
+inv-mpu-iio-objs += icm20602/inv_mpu_core_20602.o
+inv-mpu-iio-objs += icm20602/inv_mpu_parsing_20602.o
+inv-mpu-iio-objs += icm20602/inv_mpu_setup_20602.o
+inv-mpu-iio-objs += icm20602/inv_mpu_selftest_20602.o
+else ifeq ($(CONFIG_INV_MPU_IIO_ICM20608D), y)
+inv-mpu-iio-objs += icm20608d/inv_mpu_init_20608.o
+inv-mpu-iio-objs += icm20608d/inv_mpu_core_20608.o
+inv-mpu-iio-objs += icm20608d/inv_mpu_parsing_20608.o
+inv-mpu-iio-objs += icm20608d/inv_mpu_setup_20608D.o
+inv-mpu-iio-objs += icm20608d/inv_mpu_dmp_fifo.o
+inv-mpu-iio-objs += icm20608d/inv_mpu_load_dmp.o
+inv-mpu-iio-objs += icm20608d/inv_mpu_selftest_20608.o
+inv-mpu-iio-objs += dmp_support/inv_mpu_misc.o
+else ifeq ($(CONFIG_INV_MPU_IIO_IAM20680), y)
+inv-mpu-iio-objs += iam20680/inv_mpu_init_20680.o
+inv-mpu-iio-objs += iam20680/inv_mpu_core_20680.o
+inv-mpu-iio-objs += iam20680/inv_mpu_parsing_20680.o
+inv-mpu-iio-objs += iam20680/inv_mpu_setup_20680.o
+inv-mpu-iio-objs += iam20680/inv_mpu_selftest_20680.o
+endif
+
+# Bus support
+obj-$(CONFIG_INV_MPU_IIO_I2C) += inv-mpu-iio-i2c.o
+inv-mpu-iio-i2c-objs := inv_mpu_i2c.o
+obj-$(CONFIG_INV_MPU_IIO_SPI) += inv-mpu-iio-spi.o
+inv-mpu-iio-spi-objs := inv_mpu_spi.o
+
+obj-y += inv_test/
diff --git a/drivers/iio/imu/inv_mpu/README b/drivers/iio/imu/inv_mpu/README
new file mode 100644
index 000000000000..47ff5029ee6e
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu/README
@@ -0,0 +1,117 @@
+Kernel driver inv-mpu-iio
+Author: InvenSense, Inc.
+
+
+Table of Contents
+=================
+- Description
+- Integrating the Driver in the Linux Kernel
+- Dts file
+- Communicating with the Driver in Userspace
+
+
+Description
+===========
+This document describes how to install the Invensense device driver into a
+Linux kernel. The supported chips are listed in Kconfig and user selects an
+appropriate one from .e.g. menuconfig.
+
+
+Integrating the Driver in the Linux Kernel
+==========================================
+Please add the files as follows (kernel 3.10):
+- Copy mpu.h to <kernel_root>/include/linux/iio/imu/
+- Copy inv_mpu folder under <kernel_root>/drivers/iio/imu/
+
+In order to see the driver in menuconfig when building the kernel, please
+make modifications as shown below:
+
+ add "source "drivers/iio/imu/inv_mpu/Kconfig""
+ in <kernel_root>/drivers/iio/imu/Kconfig
+
+ add "obj-y += inv_mpu/"
+ in <kernel_root>/drivers/iio/imu/Makefile
+
+
+
+Dts file
+========
+In order to recognize the Invensense device on the I2C/SPI bus, dts(or dtsi)
+file must be modified.
+
+Example)
+ICM20648 + AK09911/BMP280/APDS9930 on AUX I2C
+
+ i2c@f9968000 {
+ /* Invensense */
+ mpu6515_acc@68 {
+ compatible = "inven,icm20648";
+ reg = <0x68>;
+ interrupt-parent = <&msmgpio>;
+ interrupts = <73 0x2>;
+ inven,vdd_ana-supply = <&pm8941_l17>;
+ inven,vcc_i2c-supply = <&pm8941_lvs1>;
+ inven,gpio_int1 = <&msmgpio 73 0x00>;
+ fs_range = <0x00>;
+ /* mount matrix */
+ axis_map_x = <1>;
+ axis_map_y = <0>;
+ axis_map_z = <2>;
+ negate_x = <0>;
+ negate_y = <0>;
+ negate_z = <1>;
+ poll_interval = <200>;
+ min_interval = <5>;
+ inven,secondary_reg = <0x0c>;
+ /* If no compass sensor,
+ * replace "compass" with "none"
+ */
+ inven,secondary_type = "compass";
+ inven,secondary_name = "ak09911";
+ inven,secondary_axis_map_x = <1>;
+ inven,secondary_axis_map_y = <0>;
+ inven,secondary_axis_map_z = <2>;
+ inven,secondary_negate_x = <1>;
+ inven,secondary_negate_y = <1>;
+ inven,secondary_negate_z = <1>;
+ /* If no pressure sensor,
+ * replace "pressure" with "none"
+ */
+ inven,aux_type = "pressure";
+ inven,aux_name = "bmp280";
+ inven,aux_reg = <0x76>;
+ /* If no ALS sensor
+ * replace "als" with "none"
+ */
+ inven,read_only_slave_type = "als";
+ inven,read_only_slave_name = "apds9930";
+ inven,read_only_slave_reg = <0x39>;
+ };
+ };
+
+
+Communicating with the Driver in Userspace
+==========================================
+The driver generates several files in sysfs upon installation.
+These files are used to communicate with the driver. The files can be found at:
+
+(I2C) /sys/devices/*.i2c/i2c-*/*-*/iio:device*
+(SPI) /sys/devices/*.spi/spi_master/spi*/spi*.*/iio:device*
+
+Group and Owner for all entries should be updated to system/system at
+boot time to allow userspace to access properly.
+
+
+License
+=======
+Copyright (C) 2018 InvenSense, Inc.
+
+This software is licensed under the terms of the GNU General Public
+License version 2, as published by the Free Software Foundation, and
+may be copied, distributed, and modified under those terms.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
diff --git a/drivers/iio/imu/inv_mpu/iam20680/inv_mpu_core_20680.c b/drivers/iio/imu/inv_mpu/iam20680/inv_mpu_core_20680.c
new file mode 100644
index 000000000000..b429f57be5ca
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu/iam20680/inv_mpu_core_20680.c
@@ -0,0 +1,1072 @@
+/*
+ * Copyright (C) 2017-2018 InvenSense, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#define pr_fmt(fmt) "inv_mpu: " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/sysfs.h>
+#include <linux/jiffies.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/kfifo.h>
+#include <linux/poll.h>
+#include <linux/miscdevice.h>
+#include <linux/spinlock.h>
+#include <linux/spi/spi.h>
+#include <linux/i2c.h>
+
+#include "../inv_mpu_iio.h"
+
+static const struct inv_hw_s hw_info[INV_NUM_PARTS] = {
+ {128, "ICM20608D"},
+ {128, "ICM20690"},
+ {128, "ICM20602"},
+ {128, "IAM20680"},
+};
+
+#ifndef SUPPORT_ONLY_BASIC_FEATURES
+static char debug_reg_addr = 0x6;
+#endif
+
+const char sensor_l_info[][30] = {
+ "SENSOR_L_ACCEL",
+ "SENSOR_L_GYRO",
+ "SENSOR_L_MAG",
+ "SENSOR_L_ALS",
+ "SENSOR_L_SIXQ",
+ "SENSOR_L_THREEQ",
+ "SENSOR_L_NINEQ",
+ "SENSOR_L_PEDQ",
+ "SENSOR_L_GEOMAG",
+ "SENSOR_L_PRESSURE",
+ "SENSOR_L_GYRO_CAL",
+ "SENSOR_L_MAG_CAL",
+ "SENSOR_L_EIS_GYRO",
+ "SENSOR_L_ACCEL_WAKE",
+ "SENSOR_L_GYRO_WAKE",
+ "SENSOR_L_MAG_WAKE",
+ "SENSOR_L_ALS_WAKE",
+ "SENSOR_L_SIXQ_WAKE",
+ "SENSOR_L_NINEQ_WAKE",
+ "SENSOR_L_PEDQ_WAKE",
+ "SENSOR_L_GEOMAG_WAKE",
+ "SENSOR_L_PRESSURE_WAKE",
+ "SENSOR_L_GYRO_CAL_WAKE",
+ "SENSOR_L_MAG_CAL_WAKE",
+ "SENSOR_L_NUM_MAX",
+};
+
+static int inv_set_accel_bias_reg(struct inv_mpu_state *st,
+ int accel_bias, int axis)
+{
+ int accel_reg_bias;
+ u8 addr;
+ u8 d[2];
+ int result = 0;
+
+ switch (axis) {
+ case 0:
+ /* X */
+ addr = REG_XA_OFFS_H;
+ break;
+ case 1:
+ /* Y */
+ addr = REG_YA_OFFS_H;
+ break;
+ case 2:
+ /* Z* */
+ addr = REG_ZA_OFFS_H;
+ break;
+ default:
+ result = -EINVAL;
+ goto accel_bias_set_err;
+ }
+
+ result = inv_plat_read(st, addr, 2, d);
+ if (result)
+ goto accel_bias_set_err;
+ accel_reg_bias = ((int)d[0] << 8) | d[1];
+
+ /* accel_bias is 2g scaled by 1<<16.
+ * Convert to 16g, and mask bit0 */
+ accel_reg_bias -= ((accel_bias / 8 / 65536) & ~1);
+
+ d[0] = (accel_reg_bias >> 8) & 0xff;
+ d[1] = (accel_reg_bias) & 0xff;
+ result = inv_plat_single_write(st, addr, d[0]);
+ if (result)
+ goto accel_bias_set_err;
+ result = inv_plat_single_write(st, addr + 1, d[1]);
+ if (result)
+ goto accel_bias_set_err;
+
+accel_bias_set_err:
+ return result;
+}
+
+static int inv_set_gyro_bias_reg(struct inv_mpu_state *st,
+ const int gyro_bias, int axis)
+{
+ int gyro_reg_bias;
+ u8 addr;
+ u8 d[2];
+ int result = 0;
+
+ switch (axis) {
+ case 0:
+ /* X */
+ addr = REG_XG_OFFS_USR_H;
+ break;
+ case 1:
+ /* Y */
+ addr = REG_YG_OFFS_USR_H;
+ break;
+ case 2:
+ /* Z */
+ addr = REG_ZG_OFFS_USR_H;
+ break;
+ default:
+ result = -EINVAL;
+ goto gyro_bias_set_err;
+ }
+
+ /* gyro_bias is 2000dps scaled by 1<<16.
+ * Convert to 1000dps */
+ gyro_reg_bias = (-gyro_bias * 2 / 65536);
+
+ d[0] = (gyro_reg_bias >> 8) & 0xff;
+ d[1] = (gyro_reg_bias) & 0xff;
+ result = inv_plat_single_write(st, addr, d[0]);
+ if (result)
+ goto gyro_bias_set_err;
+ result = inv_plat_single_write(st, addr + 1, d[1]);
+ if (result)
+ goto gyro_bias_set_err;
+
+gyro_bias_set_err:
+ return result;
+}
+
+static int _bias_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct inv_mpu_state *st = iio_priv(indio_dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ int result, data;
+
+ result = inv_switch_power_in_lp(st, true);
+ if (result)
+ return result;
+
+ result = kstrtoint(buf, 10, &data);
+ if (result)
+ goto bias_store_fail;
+ switch (this_attr->address) {
+ case ATTR_ACCEL_X_OFFSET:
+ result = inv_set_accel_bias_reg(st, data, 0);
+ if (result)
+ goto bias_store_fail;
+ st->input_accel_bias[0] = data;
+ break;
+ case ATTR_ACCEL_Y_OFFSET:
+ result = inv_set_accel_bias_reg(st, data, 1);
+ if (result)
+ goto bias_store_fail;
+ st->input_accel_bias[1] = data;
+ break;
+ case ATTR_ACCEL_Z_OFFSET:
+ result = inv_set_accel_bias_reg(st, data, 2);
+ if (result)
+ goto bias_store_fail;
+ st->input_accel_bias[2] = data;
+ break;
+ case ATTR_GYRO_X_OFFSET:
+ result = inv_set_gyro_bias_reg(st, data, 0);
+ if (result)
+ goto bias_store_fail;
+ st->input_gyro_bias[0] = data;
+ break;
+ case ATTR_GYRO_Y_OFFSET:
+ result = inv_set_gyro_bias_reg(st, data, 1);
+ if (result)
+ goto bias_store_fail;
+ st->input_gyro_bias[1] = data;
+ break;
+ case ATTR_GYRO_Z_OFFSET:
+ result = inv_set_gyro_bias_reg(st, data, 2);
+ if (result)
+ goto bias_store_fail;
+ st->input_gyro_bias[2] = data;
+ break;
+ default:
+ break;
+ }
+
+bias_store_fail:
+ if (result)
+ return result;
+ result = inv_switch_power_in_lp(st, false);
+ if (result)
+ return result;
+
+ return count;
+}
+
+static ssize_t inv_bias_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ int result;
+
+ mutex_lock(&indio_dev->mlock);
+ result = _bias_store(dev, attr, buf, count);
+ mutex_unlock(&indio_dev->mlock);
+
+ return result;
+}
+
+#ifndef SUPPORT_ONLY_BASIC_FEATURES
+static ssize_t inv_debug_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct inv_mpu_state *st = iio_priv(indio_dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ int result, data;
+
+ result = kstrtoint(buf, 10, &data);
+ if (result)
+ return result;
+ switch (this_attr->address) {
+ case ATTR_DMP_LP_EN_OFF:
+ st->chip_config.lp_en_mode_off = !!data;
+ inv_switch_power_in_lp(st, !!data);
+ break;
+ case ATTR_DMP_CLK_SEL:
+ st->chip_config.clk_sel = !!data;
+ inv_switch_power_in_lp(st, !!data);
+ break;
+ case ATTR_DEBUG_REG_ADDR:
+ debug_reg_addr = data;
+ break;
+ case ATTR_DEBUG_REG_WRITE:
+ inv_plat_single_write(st, debug_reg_addr, data);
+ break;
+ }
+ return count;
+}
+#endif
+
+static int _misc_attr_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct inv_mpu_state *st = iio_priv(indio_dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ int result, data;
+
+ result = inv_switch_power_in_lp(st, true);
+ if (result)
+ return result;
+ result = kstrtoint(buf, 10, &data);
+ if (result)
+ return result;
+ switch (this_attr->address) {
+ case ATTR_GYRO_SCALE:
+ if (data > 3)
+ return -EINVAL;
+ st->chip_config.fsr = data;
+ result = inv_set_gyro_sf(st);
+ return result;
+ case ATTR_ACCEL_SCALE:
+ if (data > 3)
+ return -EINVAL;
+ st->chip_config.accel_fs = data;
+ result = inv_set_accel_sf(st);
+ return result;
+ default:
+ return -EINVAL;
+ }
+ st->trigger_state = MISC_TRIGGER;
+ result = set_inv_enable(indio_dev);
+
+ return result;
+}
+
+/*
+ * inv_misc_attr_store() - calling this function
+ */
+static ssize_t inv_misc_attr_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ int result;
+
+ mutex_lock(&indio_dev->mlock);
+ result = _misc_attr_store(dev, attr, buf, count);
+ mutex_unlock(&indio_dev->mlock);
+ if (result)
+ return result;
+
+ return count;
+}
+
+static ssize_t inv_sensor_rate_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct inv_mpu_state *st = iio_priv(indio_dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+ return snprintf(buf, MAX_WR_SZ, "%d\n",
+ st->sensor_l[this_attr->address].rate);
+}
+
+static ssize_t inv_sensor_rate_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct inv_mpu_state *st = iio_priv(indio_dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ int data, rate, ind;
+ int result;
+
+ result = kstrtoint(buf, 10, &data);
+ if (result)
+ return -EINVAL;
+ if (data <= 0) {
+ pr_err("sensor_rate_store: invalid data=%d\n", data);
+ return -EINVAL;
+ }
+ ind = this_attr->address;
+ rate = inv_rate_convert(st, ind, data);
+
+ pr_debug("sensor [%s] requested rate %d input [%d]\n",
+ sensor_l_info[ind], rate, data);
+
+ if (rate == st->sensor_l[ind].rate)
+ return count;
+ mutex_lock(&indio_dev->mlock);
+ st->sensor_l[ind].rate = rate;
+ st->trigger_state = DATA_TRIGGER;
+ inv_check_sensor_on(st);
+ result = set_inv_enable(indio_dev);
+ pr_debug("%s rate %d div %d\n", sensor_l_info[ind],
+ st->sensor_l[ind].rate, st->sensor_l[ind].div);
+ mutex_unlock(&indio_dev->mlock);
+
+ return count;
+}
+
+static ssize_t inv_sensor_on_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct inv_mpu_state *st = iio_priv(indio_dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+ return snprintf(buf, MAX_WR_SZ, "%d\n", st->sensor_l[this_attr->address].on);
+}
+
+static ssize_t inv_sensor_on_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct inv_mpu_state *st = iio_priv(indio_dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ int data, on, ind;
+ int result;
+
+ result = kstrtoint(buf, 10, &data);
+ if (result)
+ return -EINVAL;
+ if (data < 0) {
+ pr_err("sensor_on_store: invalid data=%d\n", data);
+ return -EINVAL;
+ }
+ ind = this_attr->address;
+ on = !!data;
+
+ pr_debug("sensor [%s] requested %s, input [%d]\n",
+ sensor_l_info[ind], (on == 1) ? "On" : "Off", data);
+
+ if (on == st->sensor_l[ind].on) {
+ pr_debug("sensor [%s] is already %s, input [%d]\n",
+ sensor_l_info[ind], (on == 1) ? "On" : "Off", data);
+ return count;
+ }
+
+ mutex_lock(&indio_dev->mlock);
+ st->sensor_l[ind].on = on;
+ st->trigger_state = RATE_TRIGGER;
+ inv_check_sensor_on(st);
+ result = set_inv_enable(indio_dev);
+ mutex_unlock(&indio_dev->mlock);
+ if (result)
+ return result;
+
+ pr_debug("Sensor [%s] is %s by sysfs\n",
+ sensor_l_info[ind], (on == 1) ? "On" : "Off");
+ return count;
+}
+
+static int inv_check_l_step(struct inv_mpu_state *st)
+{
+ if (st->step_counter_l_on || st->step_counter_wake_l_on)
+ st->ped.on = true;
+ else
+ st->ped.on = false;
+
+ return 0;
+}
+
+static int _basic_attr_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct inv_mpu_state *st = iio_priv(indio_dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ int data;
+ int result;
+ u32 power_on_data;
+
+ result = kstrtoint(buf, 10, &data);
+ if (result || (data < 0))
+ return -EINVAL;
+
+ switch (this_attr->address) {
+ case ATTR_DMP_PED_ON:
+ if ((!!data) == st->ped.on)
+ return count;
+ st->ped.on = !!data;
+ break;
+ case ATTR_DMP_TILT_ENABLE:
+ if ((!!data) == st->chip_config.tilt_enable)
+ return count;
+ st->chip_config.tilt_enable = !!data;
+ pr_info("Tile %s\n",
+ st->chip_config.tilt_enable ==
+ 1 ? "Enabled" : "Disabled");
+ break;
+ case ATTR_DMP_PICK_UP_ENABLE:
+ if ((!!data) == st->chip_config.pick_up_enable) {
+ pr_info("Pick_up enable already %s\n",
+ st->chip_config.pick_up_enable ==
+ 1 ? "Enabled" : "Disabled");
+ return count;
+ }
+ st->chip_config.pick_up_enable = !!data;
+ pr_info("Pick up %s\n",
+ st->chip_config.pick_up_enable ==
+ 1 ? "Enable" : "Disable");
+ break;
+ case ATTR_IN_POWER_ON:
+ {
+ u8 p0[2];
+ u8 p1[2];
+
+ power_on_data = (u32)data;
+ p0[0] = (power_on_data & 0xff);
+ p0[1] = ((power_on_data >> 8) & 0xff);
+ p1[0] = ((power_on_data >> 16) & 0xff);
+ p1[1] = ((power_on_data >> 24) & 0xff);
+
+ if (st->bus_type == BUS_SPI) {
+ struct spi_transfer power_on;
+ struct spi_message msg;
+
+ memset(&power_on, 0, sizeof(struct spi_transfer));
+
+ power_on.bits_per_word = 8;
+ power_on.len = 2;
+
+ power_on.tx_buf = p0;
+ power_on.rx_buf = p1;
+ spi_message_init(&msg);
+ spi_message_add_tail(&power_on, &msg);
+ spi_sync(to_spi_device(st->dev), &msg);
+
+ } else if (st->bus_type == BUS_I2C) {
+ struct i2c_msg msgs[2];
+
+ p0[0] &= 0x7f;
+
+ msgs[0].addr = st->i2c_addr;
+ msgs[0].flags = 0; /* write */
+ msgs[0].buf = &p0[0];
+ msgs[0].len = 1;
+
+ msgs[1].addr = st->i2c_addr;
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].buf = &p1[1];
+ msgs[1].len = 1;
+
+ result = i2c_transfer(st->sl_handle, msgs, 2);
+ if (result < 2)
+ return -EIO;
+ }
+ st->power_on_data = ((p0[0] << 24) | (p0[1] << 16) |
+ (p1[0] << 8) | p1[1]);
+ return count;
+ }
+ case ATTR_DMP_EIS_ENABLE:
+ if ((!!data) == st->chip_config.eis_enable)
+ return count;
+ st->chip_config.eis_enable = !!data;
+ pr_info("Eis %s\n",
+ st->chip_config.eis_enable == 1 ? "Enable" : "Disable");
+ break;
+ case ATTR_DMP_STEP_DETECTOR_ON:
+ st->step_detector_l_on = !!data;
+ break;
+ case ATTR_DMP_STEP_DETECTOR_WAKE_ON:
+ st->step_detector_wake_l_on = !!data;
+ break;
+ case ATTR_DMP_STEP_COUNTER_ON:
+ st->step_counter_l_on = !!data;
+ break;
+ case ATTR_DMP_STEP_COUNTER_WAKE_ON:
+ st->step_counter_wake_l_on = !!data;
+ break;
+ case ATTR_DMP_BATCHMODE_TIMEOUT:
+ if (data == st->batch.timeout)
+ return count;
+ st->batch.timeout = data;
+ break;
+ default:
+ return -EINVAL;
+ };
+ inv_check_l_step(st);
+ inv_check_sensor_on(st);
+
+ st->trigger_state = EVENT_TRIGGER;
+ result = set_inv_enable(indio_dev);
+ if (result)
+ return result;
+
+ return count;
+}
+
+/*
+ * inv_basic_attr_store()
+ */
+static ssize_t inv_basic_attr_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ int result;
+
+ mutex_lock(&indio_dev->mlock);
+ result = _basic_attr_store(dev, attr, buf, count);
+
+ mutex_unlock(&indio_dev->mlock);
+
+ return result;
+}
+
+/*
+ * inv_attr_show()
+ */
+static ssize_t inv_attr_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct inv_mpu_state *st = iio_priv(indio_dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ s8 *m;
+
+ switch (this_attr->address) {
+ case ATTR_GYRO_SCALE:
+ {
+ const s16 gyro_scale[] = { 250, 500, 1000, 2000 };
+
+ return snprintf(buf, MAX_WR_SZ, "%d\n",
+ gyro_scale[st->chip_config.fsr]);
+ }
+ case ATTR_ACCEL_SCALE:
+ {
+ const s16 accel_scale[] = { 2, 4, 8, 16 };
+ return snprintf(buf, MAX_WR_SZ, "%d\n",
+ accel_scale[st->chip_config.accel_fs]);
+ }
+ case ATTR_GYRO_ENABLE:
+ return snprintf(buf, MAX_WR_SZ, "%d\n", st->chip_config.gyro_enable);
+ case ATTR_ACCEL_ENABLE:
+ return snprintf(buf, MAX_WR_SZ, "%d\n", st->chip_config.accel_enable);
+ case ATTR_IN_POWER_ON:
+ return snprintf(buf, MAX_WR_SZ, "%d\n", st->power_on_data);
+ case ATTR_DMP_BATCHMODE_TIMEOUT:
+ return snprintf(buf, MAX_WR_SZ, "%d\n", st->batch.timeout);
+ case ATTR_DMP_PED_ON:
+ return snprintf(buf, MAX_WR_SZ, "%d\n", st->ped.on);
+ case ATTR_DMP_TILT_ENABLE:
+ return snprintf(buf, MAX_WR_SZ, "%d\n",
+ st->chip_config.tilt_enable);
+ case ATTR_DMP_PICK_UP_ENABLE:
+ return snprintf(buf, MAX_WR_SZ, "%d\n",
+ st->chip_config.pick_up_enable);
+ case ATTR_DMP_EIS_ENABLE:
+ return snprintf(buf, MAX_WR_SZ, "%d\n", st->chip_config.eis_enable);
+ case ATTR_DMP_LP_EN_OFF:
+ return snprintf(buf, MAX_WR_SZ, "%d\n",
+ st->chip_config.lp_en_mode_off);
+ case ATTR_DMP_STEP_COUNTER_ON:
+ return snprintf(buf, MAX_WR_SZ, "%d\n", st->step_counter_l_on);
+ case ATTR_DMP_STEP_COUNTER_WAKE_ON:
+ return snprintf(buf, MAX_WR_SZ, "%d\n", st->step_counter_wake_l_on);
+ case ATTR_DMP_STEP_DETECTOR_ON:
+ return snprintf(buf, MAX_WR_SZ, "%d\n", st->step_detector_l_on);
+ case ATTR_DMP_STEP_DETECTOR_WAKE_ON:
+ return snprintf(buf, MAX_WR_SZ, "%d\n", st->step_detector_wake_l_on);
+ case ATTR_GYRO_MATRIX:
+ m = st->plat_data.orientation;
+ return snprintf(buf, MAX_WR_SZ, "%d,%d,%d,%d,%d,%d,%d,%d,%d\n",
+ m[0], m[1], m[2], m[3], m[4], m[5], m[6], m[7],
+ m[8]);
+ case ATTR_ACCEL_MATRIX:
+ m = st->plat_data.orientation;
+ return snprintf(buf, MAX_WR_SZ, "%d,%d,%d,%d,%d,%d,%d,%d,%d\n",
+ m[0], m[1], m[2], m[3], m[4], m[5], m[6], m[7],
+ m[8]);
+ case ATTR_GYRO_SF:
+ return snprintf(buf, MAX_WR_SZ, "%d\n", st->gyro_sf);
+ case ATTR_ANGLVEL_X_ST_CALIBBIAS:
+ return snprintf(buf, MAX_WR_SZ, "%d\n", st->gyro_st_bias[0]);
+ case ATTR_ANGLVEL_Y_ST_CALIBBIAS:
+ return snprintf(buf, MAX_WR_SZ, "%d\n", st->gyro_st_bias[1]);
+ case ATTR_ANGLVEL_Z_ST_CALIBBIAS:
+ return snprintf(buf, MAX_WR_SZ, "%d\n", st->gyro_st_bias[2]);
+ case ATTR_ACCEL_X_ST_CALIBBIAS:
+ return snprintf(buf, MAX_WR_SZ, "%d\n", st->accel_st_bias[0]);
+ case ATTR_ACCEL_Y_ST_CALIBBIAS:
+ return snprintf(buf, MAX_WR_SZ, "%d\n", st->accel_st_bias[1]);
+ case ATTR_ACCEL_Z_ST_CALIBBIAS:
+ return snprintf(buf, MAX_WR_SZ, "%d\n", st->accel_st_bias[2]);
+ case ATTR_GYRO_X_OFFSET:
+ return snprintf(buf, MAX_WR_SZ, "%d\n", st->input_gyro_bias[0]);
+ case ATTR_GYRO_Y_OFFSET:
+ return snprintf(buf, MAX_WR_SZ, "%d\n", st->input_gyro_bias[1]);
+ case ATTR_GYRO_Z_OFFSET:
+ return snprintf(buf, MAX_WR_SZ, "%d\n", st->input_gyro_bias[2]);
+ case ATTR_ACCEL_X_OFFSET:
+ return snprintf(buf, MAX_WR_SZ, "%d\n", st->input_accel_bias[0]);
+ case ATTR_ACCEL_Y_OFFSET:
+ return snprintf(buf, MAX_WR_SZ, "%d\n", st->input_accel_bias[1]);
+ case ATTR_ACCEL_Z_OFFSET:
+ return snprintf(buf, MAX_WR_SZ, "%d\n", st->input_accel_bias[2]);
+ default:
+ return -EPERM;
+ }
+}
+
+static ssize_t inv_self_test(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct inv_mpu_state *st = iio_priv(indio_dev);
+ int res;
+
+ mutex_lock(&indio_dev->mlock);
+ res = inv_hw_self_test(st);
+ set_inv_enable(indio_dev);
+ mutex_unlock(&indio_dev->mlock);
+
+ return snprintf(buf, MAX_WR_SZ, "%d\n", res);
+}
+
+
+/*
+ * inv_temperature_show() - Read temperature data directly from registers.
+ */
+static ssize_t inv_temperature_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct inv_mpu_state *st = iio_priv(indio_dev);
+
+ u8 data[2];
+ s32 temp;
+ int res;
+
+ mutex_lock(&indio_dev->mlock);
+ res = inv_plat_read(st, REG_RAW_TEMP, 2, data);
+ if (res)
+ return res;
+ mutex_unlock(&indio_dev->mlock);
+
+ temp = (s32)be16_to_cpup((__be16 *)(data)) * 10000;
+ temp = temp / TEMP_SENSITIVITY + TEMP_OFFSET;
+
+ return snprintf(buf, MAX_WR_SZ, "%d %lld\n", temp, get_time_ns());
+}
+
+/*
+ * inv_reg_dump_show() - Register dump for testing.
+ */
+static ssize_t inv_reg_dump_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int ii;
+ char data;
+ int bytes_printed = 0;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct inv_mpu_state *st = iio_priv(indio_dev);
+
+ mutex_lock(&indio_dev->mlock);
+ bytes_printed += snprintf(buf + bytes_printed, MAX_WR_SZ, "bank 0\n");
+
+ for (ii = 0; ii < 0x7F; ii++) {
+ /* don't read fifo r/w register */
+ if ((ii == REG_MEM_R_W) || (ii == REG_FIFO_R_W))
+ data = 0;
+ else
+ inv_plat_read(st, ii, 1, &data);
+ bytes_printed += snprintf(buf + bytes_printed, MAX_WR_SZ,
+ "%#2x: %#2x\n", ii, data);
+ }
+ set_inv_enable(indio_dev);
+ mutex_unlock(&indio_dev->mlock);
+
+ return bytes_printed;
+}
+
+static ssize_t inv_flush_batch_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ int result, data;
+
+ result = kstrtoint(buf, 10, &data);
+ if (result)
+ return result;
+
+ mutex_lock(&indio_dev->mlock);
+ result = inv_flush_batch_data(indio_dev, data);
+ mutex_unlock(&indio_dev->mlock);
+
+ return count;
+}
+
+static const struct iio_chan_spec inv_mpu_channels[] = {
+ IIO_CHAN_SOFT_TIMESTAMP(INV_MPU_SCAN_TIMESTAMP),
+};
+
+/* special run time sysfs entry, read only */
+static DEVICE_ATTR(debug_reg_dump, S_IRUGO | S_IWUSR, inv_reg_dump_show, NULL);
+static DEVICE_ATTR(out_temperature, S_IRUGO | S_IWUSR,
+ inv_temperature_show, NULL);
+static DEVICE_ATTR(misc_self_test, S_IRUGO | S_IWUSR, inv_self_test, NULL);
+
+static IIO_DEVICE_ATTR(info_anglvel_matrix, S_IRUGO, inv_attr_show, NULL,
+ ATTR_GYRO_MATRIX);
+static IIO_DEVICE_ATTR(info_accel_matrix, S_IRUGO, inv_attr_show, NULL,
+ ATTR_ACCEL_MATRIX);
+
+static IIO_DEVICE_ATTR(info_gyro_sf, S_IRUGO, inv_attr_show, NULL,
+ ATTR_GYRO_SF);
+/* write only sysfs */
+static DEVICE_ATTR(misc_flush_batch, S_IWUSR, NULL, inv_flush_batch_store);
+
+/* sensor on/off sysfs control */
+static IIO_DEVICE_ATTR(in_accel_enable, S_IRUGO | S_IWUSR,
+ inv_sensor_on_show, inv_sensor_on_store, SENSOR_L_ACCEL);
+static IIO_DEVICE_ATTR(in_anglvel_enable, S_IRUGO | S_IWUSR,
+ inv_sensor_on_show, inv_sensor_on_store, SENSOR_L_GYRO);
+#ifndef SUPPORT_ONLY_BASIC_FEATURES
+static IIO_DEVICE_ATTR(in_eis_enable, S_IRUGO | S_IWUSR,
+ inv_sensor_on_show, inv_sensor_on_store,
+ SENSOR_L_EIS_GYRO);
+#endif
+static IIO_DEVICE_ATTR(in_accel_wake_enable, S_IRUGO | S_IWUSR,
+ inv_sensor_on_show, inv_sensor_on_store,
+ SENSOR_L_ACCEL_WAKE);
+static IIO_DEVICE_ATTR(in_anglvel_wake_enable, S_IRUGO | S_IWUSR,
+ inv_sensor_on_show, inv_sensor_on_store,
+ SENSOR_L_GYRO_WAKE);
+
+/* sensor rate sysfs control */
+static IIO_DEVICE_ATTR(in_accel_rate, S_IRUGO | S_IWUSR,
+ inv_sensor_rate_show, inv_sensor_rate_store,
+ SENSOR_L_ACCEL);
+static IIO_DEVICE_ATTR(in_anglvel_rate, S_IRUGO | S_IWUSR, inv_sensor_rate_show,
+ inv_sensor_rate_store, SENSOR_L_GYRO);
+#ifndef SUPPORT_ONLY_BASIC_FEATURES
+static IIO_DEVICE_ATTR(in_eis_rate, S_IRUGO | S_IWUSR,
+ inv_sensor_rate_show, inv_sensor_rate_store,
+ SENSOR_L_EIS_GYRO);
+#endif
+static IIO_DEVICE_ATTR(in_accel_wake_rate, S_IRUGO | S_IWUSR,
+ inv_sensor_rate_show, inv_sensor_rate_store,
+ SENSOR_L_ACCEL_WAKE);
+static IIO_DEVICE_ATTR(in_anglvel_wake_rate, S_IRUGO | S_IWUSR,
+ inv_sensor_rate_show, inv_sensor_rate_store,
+ SENSOR_L_GYRO_WAKE);
+
+static IIO_DEVICE_ATTR(misc_batchmode_timeout, S_IRUGO | S_IWUSR,
+ inv_attr_show, inv_basic_attr_store,
+ ATTR_DMP_BATCHMODE_TIMEOUT);
+
+/* engine scale */
+static IIO_DEVICE_ATTR(in_accel_scale, S_IRUGO | S_IWUSR, inv_attr_show,
+ inv_misc_attr_store, ATTR_ACCEL_SCALE);
+static IIO_DEVICE_ATTR(in_anglvel_scale, S_IRUGO | S_IWUSR, inv_attr_show,
+ inv_misc_attr_store, ATTR_GYRO_SCALE);
+
+
+#ifndef SUPPORT_ONLY_BASIC_FEATURES
+static IIO_DEVICE_ATTR(debug_lp_en_off, S_IRUGO | S_IWUSR, inv_attr_show,
+ inv_debug_store, ATTR_DMP_LP_EN_OFF);
+static IIO_DEVICE_ATTR(debug_clock_sel, S_IRUGO | S_IWUSR, inv_attr_show,
+ inv_debug_store, ATTR_DMP_CLK_SEL);
+static IIO_DEVICE_ATTR(debug_reg_write, S_IRUGO | S_IWUSR, inv_attr_show,
+ inv_debug_store, ATTR_DEBUG_REG_WRITE);
+static IIO_DEVICE_ATTR(debug_reg_write_addr, S_IRUGO | S_IWUSR, inv_attr_show,
+ inv_debug_store, ATTR_DEBUG_REG_ADDR);
+#endif
+
+static IIO_DEVICE_ATTR(in_accel_x_st_calibbias, S_IRUGO | S_IWUSR,
+ inv_attr_show, NULL, ATTR_ACCEL_X_ST_CALIBBIAS);
+static IIO_DEVICE_ATTR(in_accel_y_st_calibbias, S_IRUGO | S_IWUSR,
+ inv_attr_show, NULL, ATTR_ACCEL_Y_ST_CALIBBIAS);
+static IIO_DEVICE_ATTR(in_accel_z_st_calibbias, S_IRUGO | S_IWUSR,
+ inv_attr_show, NULL, ATTR_ACCEL_Z_ST_CALIBBIAS);
+
+static IIO_DEVICE_ATTR(in_anglvel_x_st_calibbias, S_IRUGO | S_IWUSR,
+ inv_attr_show, NULL, ATTR_ANGLVEL_X_ST_CALIBBIAS);
+static IIO_DEVICE_ATTR(in_anglvel_y_st_calibbias, S_IRUGO | S_IWUSR,
+ inv_attr_show, NULL, ATTR_ANGLVEL_Y_ST_CALIBBIAS);
+static IIO_DEVICE_ATTR(in_anglvel_z_st_calibbias, S_IRUGO | S_IWUSR,
+ inv_attr_show, NULL, ATTR_ANGLVEL_Z_ST_CALIBBIAS);
+
+static IIO_DEVICE_ATTR(in_accel_x_offset, S_IRUGO | S_IWUSR,
+ inv_attr_show, inv_bias_store, ATTR_ACCEL_X_OFFSET);
+static IIO_DEVICE_ATTR(in_accel_y_offset, S_IRUGO | S_IWUSR,
+ inv_attr_show, inv_bias_store, ATTR_ACCEL_Y_OFFSET);
+static IIO_DEVICE_ATTR(in_accel_z_offset, S_IRUGO | S_IWUSR,
+ inv_attr_show, inv_bias_store, ATTR_ACCEL_Z_OFFSET);
+
+static IIO_DEVICE_ATTR(in_anglvel_x_offset, S_IRUGO | S_IWUSR,
+ inv_attr_show, inv_bias_store, ATTR_GYRO_X_OFFSET);
+static IIO_DEVICE_ATTR(in_anglvel_y_offset, S_IRUGO | S_IWUSR,
+ inv_attr_show, inv_bias_store, ATTR_GYRO_Y_OFFSET);
+static IIO_DEVICE_ATTR(in_anglvel_z_offset, S_IRUGO | S_IWUSR,
+ inv_attr_show, inv_bias_store, ATTR_GYRO_Z_OFFSET);
+
+#ifndef SUPPORT_ONLY_BASIC_FEATURES
+static IIO_DEVICE_ATTR(in_step_detector_enable, S_IRUGO | S_IWUSR,
+ inv_attr_show, inv_basic_attr_store,
+ ATTR_DMP_STEP_DETECTOR_ON);
+static IIO_DEVICE_ATTR(in_step_detector_wake_enable, S_IRUGO | S_IWUSR,
+ inv_attr_show, inv_basic_attr_store,
+ ATTR_DMP_STEP_DETECTOR_WAKE_ON);
+static IIO_DEVICE_ATTR(in_step_counter_enable, S_IRUGO | S_IWUSR, inv_attr_show,
+ inv_basic_attr_store, ATTR_DMP_STEP_COUNTER_ON);
+static IIO_DEVICE_ATTR(in_step_counter_wake_enable, S_IRUGO | S_IWUSR,
+ inv_attr_show, inv_basic_attr_store,
+ ATTR_DMP_STEP_COUNTER_WAKE_ON);
+
+static IIO_DEVICE_ATTR(event_tilt_enable, S_IRUGO | S_IWUSR,
+ inv_attr_show, inv_basic_attr_store,
+ ATTR_DMP_TILT_ENABLE);
+
+static IIO_DEVICE_ATTR(event_eis_enable, S_IRUGO | S_IWUSR,
+ inv_attr_show, inv_basic_attr_store,
+ ATTR_DMP_EIS_ENABLE);
+
+static IIO_DEVICE_ATTR(event_pick_up_enable, S_IRUGO | S_IWUSR,
+ inv_attr_show, inv_basic_attr_store,
+ ATTR_DMP_PICK_UP_ENABLE);
+
+static IIO_DEVICE_ATTR(in_power_on, S_IRUGO | S_IWUSR,
+ inv_attr_show, inv_basic_attr_store,
+ ATTR_IN_POWER_ON);
+#endif
+
+static const struct attribute *inv_raw_attributes[] = {
+ &dev_attr_debug_reg_dump.attr,
+ &dev_attr_out_temperature.attr,
+ &dev_attr_misc_flush_batch.attr,
+ &dev_attr_misc_self_test.attr,
+#ifndef SUPPORT_ONLY_BASIC_FEATURES
+ &iio_dev_attr_in_power_on.dev_attr.attr,
+#endif
+ &iio_dev_attr_in_accel_enable.dev_attr.attr,
+ &iio_dev_attr_in_accel_wake_enable.dev_attr.attr,
+ &iio_dev_attr_info_accel_matrix.dev_attr.attr,
+ &iio_dev_attr_in_accel_scale.dev_attr.attr,
+ &iio_dev_attr_misc_batchmode_timeout.dev_attr.attr,
+ &iio_dev_attr_in_accel_rate.dev_attr.attr,
+ &iio_dev_attr_in_accel_wake_rate.dev_attr.attr,
+};
+
+#ifndef SUPPORT_ONLY_BASIC_FEATURES
+static const struct attribute *inv_debug_attributes[] = {
+ &iio_dev_attr_debug_lp_en_off.dev_attr.attr,
+ &iio_dev_attr_debug_clock_sel.dev_attr.attr,
+ &iio_dev_attr_debug_reg_write.dev_attr.attr,
+ &iio_dev_attr_debug_reg_write_addr.dev_attr.attr,
+};
+#endif
+
+static const struct attribute *inv_gyro_attributes[] = {
+ &iio_dev_attr_info_anglvel_matrix.dev_attr.attr,
+ &iio_dev_attr_in_anglvel_enable.dev_attr.attr,
+ &iio_dev_attr_in_anglvel_rate.dev_attr.attr,
+#ifndef SUPPORT_ONLY_BASIC_FEATURES
+ &iio_dev_attr_in_eis_enable.dev_attr.attr,
+#endif
+ &iio_dev_attr_in_anglvel_wake_enable.dev_attr.attr,
+ &iio_dev_attr_in_anglvel_scale.dev_attr.attr,
+#ifndef SUPPORT_ONLY_BASIC_FEATURES
+ &iio_dev_attr_in_eis_rate.dev_attr.attr,
+#endif
+ &iio_dev_attr_in_anglvel_wake_rate.dev_attr.attr,
+ &iio_dev_attr_info_gyro_sf.dev_attr.attr,
+};
+
+static const struct attribute *inv_bias_attributes[] = {
+ &iio_dev_attr_in_accel_x_st_calibbias.dev_attr.attr,
+ &iio_dev_attr_in_accel_y_st_calibbias.dev_attr.attr,
+ &iio_dev_attr_in_accel_z_st_calibbias.dev_attr.attr,
+ &iio_dev_attr_in_accel_x_offset.dev_attr.attr,
+ &iio_dev_attr_in_accel_y_offset.dev_attr.attr,
+ &iio_dev_attr_in_accel_z_offset.dev_attr.attr,
+ &iio_dev_attr_in_anglvel_x_st_calibbias.dev_attr.attr,
+ &iio_dev_attr_in_anglvel_y_st_calibbias.dev_attr.attr,
+ &iio_dev_attr_in_anglvel_z_st_calibbias.dev_attr.attr,
+ &iio_dev_attr_in_anglvel_x_offset.dev_attr.attr,
+ &iio_dev_attr_in_anglvel_y_offset.dev_attr.attr,
+ &iio_dev_attr_in_anglvel_z_offset.dev_attr.attr,
+};
+
+#ifndef SUPPORT_ONLY_BASIC_FEATURES
+static const struct attribute *inv_pedometer_attributes[] = {
+ &iio_dev_attr_event_tilt_enable.dev_attr.attr,
+ &iio_dev_attr_event_eis_enable.dev_attr.attr,
+ &iio_dev_attr_event_pick_up_enable.dev_attr.attr,
+ &iio_dev_attr_in_step_counter_enable.dev_attr.attr,
+ &iio_dev_attr_in_step_counter_wake_enable.dev_attr.attr,
+ &iio_dev_attr_in_step_detector_enable.dev_attr.attr,
+ &iio_dev_attr_in_step_detector_wake_enable.dev_attr.attr,
+};
+#endif
+
+static struct attribute *inv_attributes[ARRAY_SIZE(inv_raw_attributes) +
+#ifndef SUPPORT_ONLY_BASIC_FEATURES
+ ARRAY_SIZE(inv_debug_attributes) +
+#endif
+ ARRAY_SIZE(inv_gyro_attributes) +
+ ARRAY_SIZE(inv_bias_attributes) +
+#ifndef SUPPORT_ONLY_BASIC_FEATURES
+ ARRAY_SIZE(inv_pedometer_attributes) +
+#endif
+ + 1];
+
+static const struct attribute_group inv_attribute_group = {
+ .name = "mpu",
+ .attrs = inv_attributes
+};
+
+static const struct iio_info mpu_info = {
+ .driver_module = THIS_MODULE,
+ .attrs = &inv_attribute_group,
+};
+
+/*
+ * inv_check_chip_type() - check and setup chip type.
+ */
+int inv_check_chip_type(struct iio_dev *indio_dev, const char *name)
+{
+ int result;
+ int t_ind;
+ struct inv_chip_config_s *conf;
+ struct mpu_platform_data *plat;
+ struct inv_mpu_state *st;
+
+ st = iio_priv(indio_dev);
+ conf = &st->chip_config;
+ plat = &st->plat_data;
+
+ if (!strcmp(name, "iam20680"))
+ st->chip_type = IAM20680;
+ else
+ return -EPERM;
+ st->chip_config.has_gyro = 1;
+
+ st->hw = &hw_info[st->chip_type];
+ result = inv_mpu_initialize(st);
+ if (result)
+ return result;
+
+ t_ind = 0;
+ memcpy(&inv_attributes[t_ind], inv_raw_attributes,
+ sizeof(inv_raw_attributes));
+ t_ind += ARRAY_SIZE(inv_raw_attributes);
+
+#ifndef SUPPORT_ONLY_BASIC_FEATURES
+ memcpy(&inv_attributes[t_ind], inv_pedometer_attributes,
+ sizeof(inv_pedometer_attributes));
+ t_ind += ARRAY_SIZE(inv_pedometer_attributes);
+#endif
+
+ memcpy(&inv_attributes[t_ind], inv_gyro_attributes,
+ sizeof(inv_gyro_attributes));
+ t_ind += ARRAY_SIZE(inv_gyro_attributes);
+
+ memcpy(&inv_attributes[t_ind], inv_bias_attributes,
+ sizeof(inv_bias_attributes));
+ t_ind += ARRAY_SIZE(inv_bias_attributes);
+
+#ifndef SUPPORT_ONLY_BASIC_FEATURES
+ memcpy(&inv_attributes[t_ind], inv_debug_attributes,
+ sizeof(inv_debug_attributes));
+ t_ind += ARRAY_SIZE(inv_debug_attributes);
+#endif
+
+ inv_attributes[t_ind] = NULL;
+
+ indio_dev->channels = inv_mpu_channels;
+ indio_dev->num_channels = ARRAY_SIZE(inv_mpu_channels);
+
+ indio_dev->info = &mpu_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->currentmode = INDIO_DIRECT_MODE;
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(inv_check_chip_type);
+
+int inv_create_dmp_sysfs(struct iio_dev *ind)
+{
+ // dummy
+ return 0;
+}
+EXPORT_SYMBOL_GPL(inv_create_dmp_sysfs);
+
+MODULE_AUTHOR("Invensense Corporation");
+MODULE_DESCRIPTION("Invensense device ICM20xxx driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/imu/inv_mpu/iam20680/inv_mpu_iio_reg_20680.h b/drivers/iio/imu/inv_mpu/iam20680/inv_mpu_iio_reg_20680.h
new file mode 100644
index 000000000000..3f8ce71be024
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu/iam20680/inv_mpu_iio_reg_20680.h
@@ -0,0 +1,236 @@
+/*
+ * Copyright (C) 2017-2018 InvenSense, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _INV_MPU_IIO_REG_20680_H_
+#define _INV_MPU_IIO_REG_20680_H_
+
+/* Uncomment when HAL does not support the algorithm library
+ * for calibration and sensor fusion not to expose unused
+ * sysfs entries */
+#define SUPPORT_ONLY_BASIC_FEATURES
+
+/* Uncomment to read data registers for sensor data instead of FIFO */
+//#define SENSOR_DATA_FROM_REGISTERS
+
+/* Uncomment to enable timer based batching */
+#define TIMER_BASED_BATCHING
+
+/* Polling (batch mode) can be enabled only when FIFO read */
+#if defined(SENSOR_DATA_FROM_REGISTERS)
+#undef TIMER_BASED_BATCHING
+#endif
+
+/*register and associated bit definition*/
+#define REG_XA_OFFS_H 0x77
+#define REG_YA_OFFS_H 0x7A
+#define REG_ZA_OFFS_H 0x7D
+#define REG_XG_OFFS_USR_H 0x13
+#define REG_YG_OFFS_USR_H 0x15
+#define REG_ZG_OFFS_USR_H 0x17
+#define REG_SAMPLE_RATE_DIV 0x19
+
+#define REG_CONFIG 0x1A
+#define EXT_SYNC_SET 8
+
+#define REG_GYRO_CONFIG 0x1B
+#define BITS_SELF_TEST_EN 0xE0
+#define SHIFT_GYRO_FS_SEL 0x03
+
+#define REG_ACCEL_CONFIG 0x1C
+#define SHIFT_ACCEL_FS 0x03
+
+#define REG_LP_MODE_CTRL 0x1E
+#define BIT_GYRO_CYCLE_EN 0x80
+
+#define REG_ACCEL_WOM_THR 0x1F
+#define REG_ACCEL_WOM_X_THR 0x20
+#define REG_ACCEL_WOM_Y_THR 0x21
+#define REG_ACCEL_WOM_Z_THR 0x22
+
+#define REG_ACCEL_MOT_THR 0x1F
+#define REG_ACCEL_MOT_DUR 0x20
+
+#define REG_ACCEL_CONFIG_2 0x1D
+#define BIT_ACCEL_FCHOCIE_B 0x08
+
+#define REG_FIFO_EN 0x23
+#define BITS_GYRO_FIFO_EN 0x70
+#define BIT_ACCEL_FIFO_EN 0x08
+
+#define REG_FSYNC_INT 0x36
+#define BIT_FSYNC_INT 0x80
+
+#define REG_INT_PIN_CFG 0x37
+
+#define REG_INT_ENABLE 0x38
+#define BIT_WOM_X_INT_EN 0x80
+#define BIT_WOM_Y_INT_EN 0x40
+#define BIT_WOM_Z_INT_EN 0x20
+#define BIT_WOM_ALL_INT_EN 0xE0
+#define BIT_FSYNC_INT_EN 0x8
+#define BIT_DATA_RDY_EN 0x1
+
+#define REG_INT_STATUS 0x3A
+#define BIT_WOM_X_INT 0x80
+#define BIT_WOM_Y_INT 0x40
+#define BIT_WOM_Z_INT 0x20
+
+#define REG_RAW_ACCEL 0x3B
+#define REG_RAW_TEMP 0x41
+#define REG_RAW_GYRO 0x43
+#define REG_EXT_SENS_DATA_00 0x49
+#define REG_EXT_SENS_DATA_08 0x51
+#define REG_EXT_SENS_DATA_09 0x52
+
+#define REG_ACCEL_INTEL_CTRL 0x69
+#define BIT_ACCEL_INTEL_EN 0x80
+#define BIT_ACCEL_INTEL_MODE 0x40
+
+#define REG_USER_CTRL 0x6A
+#define BIT_COND_RST 0x01
+#define BIT_FIFO_RST 0x04
+#define BIT_FIFO_EN 0x40
+
+#define REG_PWR_MGMT_1 0x6B
+#define BIT_H_RESET 0x80
+#define BIT_SLEEP 0x40
+#define BIT_LP_EN 0x20
+#define BIT_CLK_PLL 0x01
+#define BIT_CLK_MASK 0x07
+
+#define REG_PWR_MGMT_2 0x6C
+#define BIT_PWR_ACCEL_STBY 0x38
+#define BIT_PWR_GYRO_STBY 0x07
+#define BIT_PWR_ALL_OFF 0x3F
+#define BIT_FIFO_LP_EN 0x80
+
+#define REG_MEM_BANK_SEL 0x6D
+#define REG_MEM_START_ADDR 0x6E
+#define REG_MEM_R_W 0x6F
+
+#define REG_FIFO_COUNT_H 0x72
+#define REG_FIFO_R_W 0x74
+#define REG_WHO_AM_I 0x75
+
+#define REG_6500_XG_ST_DATA 0x50
+#define REG_6500_XA_ST_DATA 0xD
+#define REG_6500_XA_OFFS_H 0x77
+#define REG_6500_YA_OFFS_H 0x7A
+#define REG_6500_ZA_OFFS_H 0x7D
+#define REG_6500_ACCEL_CONFIG2 0x1D
+#define BIT_ACCEL_FCHOCIE_B 0x08
+#define BIT_FIFO_SIZE_1K 0x40
+
+#define REG_LP_MODE_CFG 0x1E
+
+#define REG_6500_LP_ACCEL_ODR 0x1E
+#define REG_6500_ACCEL_WOM_THR 0x1F
+
+/* data output control reg 2 */
+#define ACCEL_ACCURACY_SET 0x4000
+#define GYRO_ACCURACY_SET 0x2000
+#define CPASS_ACCURACY_SET 0x1000
+
+/* data definitions */
+#define ACCEL_COVARIANCE 0
+#define BYTES_PER_SENSOR 6
+#define BYTES_FOR_TEMP 2
+#define FIFO_COUNT_BYTE 2
+#define HARDWARE_FIFO_SIZE 512
+#define FIFO_SIZE (HARDWARE_FIFO_SIZE * 7 / 10)
+#define POWER_UP_TIME 100
+#define REG_UP_TIME_USEC 100
+#define LEFT_OVER_BYTES 128
+#define IIO_BUFFER_BYTES 8
+#define BASE_SAMPLE_RATE 1000
+#define DRY_RUN_TIME 50
+#define INV_IAM20680_GYRO_START_TIME 35
+#define INV_IAM20680_ACCEL_START_TIME 30
+#define MODE_1K_INIT_SAMPLE 5
+#define FIRST_SAMPLE_BUF_MS 30
+
+#ifdef BIAS_CONFIDENCE_HIGH
+#define DEFAULT_ACCURACY 3
+#else
+#define DEFAULT_ACCURACY 1
+#endif
+
+/* temperature */
+#define TEMP_SENSITIVITY 32680 // 326.8 LSB/degC * 100
+#define TEMP_OFFSET 2500 // 25 degC * 100
+
+/* enum for sensor */
+enum INV_SENSORS {
+ SENSOR_ACCEL = 0,
+ SENSOR_TEMP,
+ SENSOR_GYRO,
+ SENSOR_COMPASS,
+ SENSOR_NUM_MAX,
+ SENSOR_INVALID,
+};
+
+enum inv_filter_e {
+ INV_FILTER_256HZ_NOLPF2 = 0,
+ INV_FILTER_188HZ,
+ INV_FILTER_98HZ,
+ INV_FILTER_42HZ,
+ INV_FILTER_20HZ,
+ INV_FILTER_10HZ,
+ INV_FILTER_5HZ,
+ INV_FILTER_2100HZ_NOLPF,
+ NUM_FILTER
+};
+
+#define MPU_DEFAULT_DMP_FREQ 200
+#define PEDOMETER_FREQ (MPU_DEFAULT_DMP_FREQ >> 2)
+#define SENSOR_FUSION_MIN_RATE 100
+#define GESTURE_ACCEL_RATE 50
+#define ESI_GYRO_RATE 1000
+#define MAX_FIFO_PACKET_READ 6
+#define MAX_BATCH_FIFO_SIZE FIFO_SIZE
+
+#define MIN_MST_ODR_CONFIG 4
+#define MAX_MST_ODR_CONFIG 5
+/* initial rate is important. For non-DMP mode, it is set as 4 1000/256*/
+#define MPU_INIT_SENSOR_RATE 4
+#define MAX_MST_NON_COMPASS_ODR_CONFIG 7
+#define THREE_AXES 3
+#define NINE_ELEM (THREE_AXES * THREE_AXES)
+#define MPU_TEMP_SHIFT 16
+
+#define DMP_DIVIDER (BASE_SAMPLE_RATE / MPU_DEFAULT_DMP_FREQ)
+#define DEFAULT_BATCH_RATE 400
+#define DEFAULT_BATCH_TIME (MSEC_PER_SEC / DEFAULT_BATCH_RATE)
+
+#define TEMPERATURE_SCALE 3340827L
+#define TEMPERATURE_OFFSET 1376256L
+#define SECONDARY_INIT_WAIT 100
+#define MPU_SOFT_REV_ADDR 0x86
+#define MPU_SOFT_REV_MASK 0xf
+#define SW_REV_LP_EN_MODE 4
+
+/* data limit definitions */
+#define MIN_FIFO_RATE 4
+#define MAX_FIFO_RATE MPU_DEFAULT_DMP_FREQ
+
+#define MAX_MPU_MEM 8192
+#define MAX_PRS_RATE 281
+
+enum inv_devices {
+ ICM20608D,
+ ICM20690,
+ ICM20602,
+ IAM20680,
+ INV_NUM_PARTS,
+};
+#endif
diff --git a/drivers/iio/imu/inv_mpu/iam20680/inv_mpu_init_20680.c b/drivers/iio/imu/inv_mpu/iam20680/inv_mpu_init_20680.c
new file mode 100644
index 000000000000..58bd8d073890
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu/iam20680/inv_mpu_init_20680.c
@@ -0,0 +1,258 @@
+/*
+ * Copyright (C) 2017-2018 InvenSense, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#define pr_fmt(fmt) "inv_mpu: " fmt
+#include "../inv_mpu_iio.h"
+
+static int inv_calc_gyro_sf(s8 pll)
+{
+ int a, r;
+ int value, t;
+
+ t = 102870L + 81L * pll;
+ a = (1L << 30) / t;
+ r = (1L << 30) - a * t;
+ value = a * 797 * DMP_DIVIDER;
+ value += (s64) ((a * 1011387LL * DMP_DIVIDER) >> 20);
+ value += r * 797L * DMP_DIVIDER / t;
+ value += (s32) ((s64) ((r * 1011387LL * DMP_DIVIDER) >> 20)) / t;
+ value <<= 1;
+
+ return value;
+}
+
+static int inv_read_timebase(struct inv_mpu_state *st)
+{
+
+ inv_plat_single_write(st, REG_CONFIG, 3);
+
+ st->eng_info[ENGINE_ACCEL].base_time = NSEC_PER_SEC;
+ st->eng_info[ENGINE_ACCEL].base_time_1k = NSEC_PER_SEC;
+ /* talor expansion to calculate base time unit */
+ st->eng_info[ENGINE_GYRO].base_time = NSEC_PER_SEC;
+ st->eng_info[ENGINE_GYRO].base_time_1k = NSEC_PER_SEC;
+ st->eng_info[ENGINE_I2C].base_time = NSEC_PER_SEC;
+ st->eng_info[ENGINE_I2C].base_time_1k = NSEC_PER_SEC;
+
+ st->eng_info[ENGINE_ACCEL].orig_rate = BASE_SAMPLE_RATE;
+ st->eng_info[ENGINE_GYRO].orig_rate = BASE_SAMPLE_RATE;
+ st->eng_info[ENGINE_I2C].orig_rate = BASE_SAMPLE_RATE;
+
+ st->gyro_sf = inv_calc_gyro_sf(0);
+
+ return 0;
+}
+
+int inv_set_gyro_sf(struct inv_mpu_state *st)
+{
+ int result;
+
+ result = inv_plat_single_write(st, REG_GYRO_CONFIG,
+ st->chip_config.fsr << SHIFT_GYRO_FS_SEL);
+
+ return result;
+}
+
+int inv_set_accel_sf(struct inv_mpu_state *st)
+{
+ int result;
+
+ result = inv_plat_single_write(st, REG_ACCEL_CONFIG,
+ st->chip_config.accel_fs << SHIFT_ACCEL_FS);
+ return result;
+}
+
+// dummy for 20602
+int inv_set_accel_intel(struct inv_mpu_state *st)
+{
+ return 0;
+}
+
+static void inv_init_sensor_struct(struct inv_mpu_state *st)
+{
+ int i;
+
+ for (i = 0; i < SENSOR_NUM_MAX; i++)
+ st->sensor[i].rate = MPU_INIT_SENSOR_RATE;
+
+ st->sensor[SENSOR_ACCEL].sample_size = BYTES_PER_SENSOR;
+ st->sensor[SENSOR_TEMP].sample_size = BYTES_FOR_TEMP;
+ st->sensor[SENSOR_GYRO].sample_size = BYTES_PER_SENSOR;
+
+ st->sensor_l[SENSOR_L_SIXQ].base = SENSOR_GYRO;
+ st->sensor_l[SENSOR_L_PEDQ].base = SENSOR_GYRO;
+
+ st->sensor_l[SENSOR_L_SIXQ_WAKE].base = SENSOR_GYRO;
+ st->sensor_l[SENSOR_L_PEDQ_WAKE].base = SENSOR_GYRO;
+
+ st->sensor[SENSOR_ACCEL].a_en = true;
+ st->sensor[SENSOR_GYRO].a_en = false;
+
+ st->sensor[SENSOR_ACCEL].g_en = false;
+ st->sensor[SENSOR_GYRO].g_en = true;
+
+ st->sensor[SENSOR_ACCEL].c_en = false;
+ st->sensor[SENSOR_GYRO].c_en = false;
+
+ st->sensor[SENSOR_ACCEL].p_en = false;
+ st->sensor[SENSOR_GYRO].p_en = false;
+
+ st->sensor[SENSOR_ACCEL].engine_base = ENGINE_ACCEL;
+ st->sensor[SENSOR_GYRO].engine_base = ENGINE_GYRO;
+
+ st->sensor_l[SENSOR_L_ACCEL].base = SENSOR_ACCEL;
+ st->sensor_l[SENSOR_L_GESTURE_ACCEL].base = SENSOR_ACCEL;
+ st->sensor_l[SENSOR_L_GYRO].base = SENSOR_GYRO;
+ st->sensor_l[SENSOR_L_GYRO_CAL].base = SENSOR_GYRO;
+ st->sensor_l[SENSOR_L_EIS_GYRO].base = SENSOR_GYRO;
+
+ st->sensor_l[SENSOR_L_ACCEL_WAKE].base = SENSOR_ACCEL;
+ st->sensor_l[SENSOR_L_GYRO_WAKE].base = SENSOR_GYRO;
+
+ st->sensor_l[SENSOR_L_GYRO_CAL_WAKE].base = SENSOR_GYRO;
+
+ st->sensor_l[SENSOR_L_ACCEL].header = ACCEL_HDR;
+ st->sensor_l[SENSOR_L_GESTURE_ACCEL].header = ACCEL_HDR;
+ st->sensor_l[SENSOR_L_GYRO].header = GYRO_HDR;
+ st->sensor_l[SENSOR_L_GYRO_CAL].header = GYRO_CALIB_HDR;
+
+ st->sensor_l[SENSOR_L_EIS_GYRO].header = EIS_GYRO_HDR;
+ st->sensor_l[SENSOR_L_SIXQ].header = SIXQUAT_HDR;
+ st->sensor_l[SENSOR_L_THREEQ].header = LPQ_HDR;
+ st->sensor_l[SENSOR_L_NINEQ].header = NINEQUAT_HDR;
+ st->sensor_l[SENSOR_L_PEDQ].header = PEDQUAT_HDR;
+
+ st->sensor_l[SENSOR_L_ACCEL_WAKE].header = ACCEL_WAKE_HDR;
+ st->sensor_l[SENSOR_L_GYRO_WAKE].header = GYRO_WAKE_HDR;
+ st->sensor_l[SENSOR_L_GYRO_CAL_WAKE].header = GYRO_CALIB_WAKE_HDR;
+ st->sensor_l[SENSOR_L_MAG_WAKE].header = COMPASS_WAKE_HDR;
+ st->sensor_l[SENSOR_L_MAG_CAL_WAKE].header = COMPASS_CALIB_WAKE_HDR;
+ st->sensor_l[SENSOR_L_SIXQ_WAKE].header = SIXQUAT_WAKE_HDR;
+ st->sensor_l[SENSOR_L_NINEQ_WAKE].header = NINEQUAT_WAKE_HDR;
+ st->sensor_l[SENSOR_L_PEDQ_WAKE].header = PEDQUAT_WAKE_HDR;
+
+ st->sensor_l[SENSOR_L_ACCEL].wake_on = false;
+ st->sensor_l[SENSOR_L_GYRO].wake_on = false;
+ st->sensor_l[SENSOR_L_GYRO_CAL].wake_on = false;
+ st->sensor_l[SENSOR_L_MAG].wake_on = false;
+ st->sensor_l[SENSOR_L_MAG_CAL].wake_on = false;
+ st->sensor_l[SENSOR_L_EIS_GYRO].wake_on = false;
+ st->sensor_l[SENSOR_L_SIXQ].wake_on = false;
+ st->sensor_l[SENSOR_L_NINEQ].wake_on = false;
+ st->sensor_l[SENSOR_L_PEDQ].wake_on = false;
+
+ st->sensor_l[SENSOR_L_ACCEL_WAKE].wake_on = true;
+ st->sensor_l[SENSOR_L_GYRO_WAKE].wake_on = true;
+ st->sensor_l[SENSOR_L_GYRO_CAL_WAKE].wake_on = true;
+ st->sensor_l[SENSOR_L_MAG_WAKE].wake_on = true;
+ st->sensor_l[SENSOR_L_SIXQ_WAKE].wake_on = true;
+ st->sensor_l[SENSOR_L_NINEQ_WAKE].wake_on = true;
+ st->sensor_l[SENSOR_L_PEDQ_WAKE].wake_on = true;
+}
+
+static int inv_init_config(struct inv_mpu_state *st)
+{
+ int res, i;
+
+ st->batch.overflow_on = 0;
+ st->chip_config.fsr = MPU_INIT_GYRO_SCALE;
+ st->chip_config.accel_fs = MPU_INIT_ACCEL_SCALE;
+ st->ped.int_thresh = MPU_INIT_PED_INT_THRESH;
+ st->ped.step_thresh = MPU_INIT_PED_STEP_THRESH;
+ st->chip_config.low_power_gyro_on = 1;
+ st->eis.count_precision = NSEC_PER_MSEC;
+ st->firmware = 0;
+ st->fifo_count_mode = BYTE_MODE;
+#ifdef TIMER_BASED_BATCHING
+ st->batch_timeout = 0;
+ st->is_batch_timer_running = false;
+#endif
+
+ st->eng_info[ENGINE_GYRO].base_time = NSEC_PER_SEC;
+ st->eng_info[ENGINE_ACCEL].base_time = NSEC_PER_SEC;
+
+ inv_init_sensor_struct(st);
+ res = inv_read_timebase(st);
+ if (res)
+ return res;
+
+ res = inv_set_gyro_sf(st);
+ if (res)
+ return res;
+ res = inv_set_accel_sf(st);
+ if (res)
+ return res;
+ res = inv_set_accel_intel(st);
+ if (res)
+ return res;
+
+ for (i = 0; i < SENSOR_NUM_MAX; i++)
+ st->sensor[i].ts = 0;
+
+ for (i = 0; i < SENSOR_NUM_MAX; i++)
+ st->sensor[i].previous_ts = 0;
+
+ return res;
+}
+
+int inv_mpu_initialize(struct inv_mpu_state *st)
+{
+ u8 v;
+ int result;
+ struct inv_chip_config_s *conf;
+ struct mpu_platform_data *plat;
+
+ conf = &st->chip_config;
+ plat = &st->plat_data;
+
+ /* verify whoami */
+ result = inv_plat_read(st, REG_WHO_AM_I, 1, &v);
+ if (result)
+ return result;
+ pr_info("whoami= %x\n", v);
+ if (v == 0x00 || v == 0xff)
+ return -ENODEV;
+
+ /* reset to make sure previous state are not there */
+ result = inv_plat_single_write(st, REG_PWR_MGMT_1, BIT_H_RESET);
+ if (result)
+ return result;
+ usleep_range(REG_UP_TIME_USEC, REG_UP_TIME_USEC);
+ msleep(100);
+ /* toggle power state */
+ result = inv_set_power(st, false);
+ if (result)
+ return result;
+ result = inv_set_power(st, true);
+ if (result)
+ return result;
+
+ result = inv_plat_single_write(st, REG_USER_CTRL, st->i2c_dis);
+ if (result)
+ return result;
+ result = inv_init_config(st);
+ if (result)
+ return result;
+
+ result = mem_r(MPU_SOFT_REV_ADDR, 1, &v);
+ pr_info("sw_rev=%x, res=%d\n", v, result);
+ if (result)
+ return result;
+ st->chip_config.lp_en_mode_off = 0;
+
+ pr_info("%s: Mask %X, v = %X, lp mode = %d\n", __func__,
+ MPU_SOFT_REV_MASK, v, st->chip_config.lp_en_mode_off);
+ result = inv_set_power(st, false);
+
+ pr_info("%s: initialize result is %d....\n", __func__, result);
+ return 0;
+}
diff --git a/drivers/iio/imu/inv_mpu/iam20680/inv_mpu_parsing_20680.c b/drivers/iio/imu/inv_mpu/iam20680/inv_mpu_parsing_20680.c
new file mode 100644
index 000000000000..0f17b6dd2c52
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu/iam20680/inv_mpu_parsing_20680.c
@@ -0,0 +1,421 @@
+/*
+ * Copyright (C) 2017-2018 InvenSense, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#define pr_fmt(fmt) "inv_mpu: " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/sysfs.h>
+#include <linux/jiffies.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/kfifo.h>
+#include <linux/poll.h>
+#include <linux/miscdevice.h>
+#include <linux/math64.h>
+
+#include "../inv_mpu_iio.h"
+
+static char iden[] = { 1, 0, 0, 0, 1, 0, 0, 0, 1 };
+
+static int inv_process_gyro(struct inv_mpu_state *st, u8 *d, u64 t)
+{
+ s16 raw[3];
+ s32 calib[3];
+ int i;
+#define BIAS_UNIT 2859
+
+ for (i = 0; i < 3; i++)
+ raw[i] = be16_to_cpup((__be16 *) (d + i * 2));
+
+ for (i = 0; i < 3; i++)
+ calib[i] = (raw[i] << 15);
+
+
+ inv_push_gyro_data(st, raw, calib, t);
+
+ return 0;
+}
+
+static int inv_check_fsync(struct inv_mpu_state *st, u8 fsync_status)
+{
+ u8 data[1];
+
+ if (!st->chip_config.eis_enable)
+ return 0;
+ inv_plat_read(st, REG_FSYNC_INT, 1, data);
+ if (data[0] & BIT_FSYNC_INT) {
+ pr_debug("fsync\n");
+ st->eis.eis_triggered = true;
+ st->eis.fsync_delay = 1;
+ st->eis.prev_state = 1;
+ st->eis.frame_count++;
+ st->eis.eis_frame = true;
+ }
+ st->header_count--;
+
+ return 0;
+}
+
+static int inv_push_sensor(struct inv_mpu_state *st, int ind, u64 t, u8 *d)
+{
+#ifdef ACCEL_BIAS_TEST
+ s16 acc[3], avg[3];
+#endif
+
+ switch (ind) {
+ case SENSOR_ACCEL:
+ inv_convert_and_push_8bytes(st, ind, d, t, iden);
+#ifdef ACCEL_BIAS_TEST
+ acc[0] = be16_to_cpup((__be16 *) (d));
+ acc[1] = be16_to_cpup((__be16 *) (d + 2));
+ acc[2] = be16_to_cpup((__be16 *) (d + 4));
+ if(inv_get_3axis_average(acc, avg, 0)){
+ pr_debug("accel 200 samples average = %5d, %5d, %5d\n", avg[0], avg[1], avg[2]);
+ }
+#endif
+ break;
+ case SENSOR_TEMP:
+ inv_check_fsync(st, d[1]);
+ break;
+ case SENSOR_GYRO:
+ inv_process_gyro(st, d, t);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int inv_push_20680_data(struct inv_mpu_state *st, u8 *d)
+{
+ u8 *dptr;
+ int i;
+
+ dptr = d;
+
+ for (i = 0; i < SENSOR_NUM_MAX; i++) {
+ if (st->sensor[i].on) {
+ inv_get_dmp_ts(st, i);
+ if (st->sensor[i].send && (!st->ts_algo.first_sample)) {
+ st->sensor[i].sample_calib++;
+ inv_push_sensor(st, i, st->sensor[i].ts, dptr);
+ }
+ dptr += st->sensor[i].sample_size;
+ }
+ }
+ if (st->ts_algo.first_sample)
+ st->ts_algo.first_sample--;
+ st->header_count--;
+
+ return 0;
+}
+
+static int inv_process_20680_data(struct inv_mpu_state *st)
+{
+ int total_bytes, tmp, res, fifo_count, pk_size, i;
+ u8 *dptr, *d;
+ u8 data[14];
+ bool done_flag;
+ u8 v;
+#ifdef SENSOR_DATA_FROM_REGISTERS
+ u8 reg;
+ int len;
+#endif
+
+ if(st->gesture_only_on && (!st->batch.timeout)) {
+ res = inv_plat_read(st, REG_INT_STATUS, 1, data);
+ if (res)
+ return res;
+ pr_debug("ges cnt=%d, statu=%x\n",
+ st->gesture_int_count, data[0]);
+ if (data[0] & (BIT_WOM_ALL_INT_EN)) {
+ if (!st->gesture_int_count) {
+ inv_switch_power_in_lp(st, true);
+ res = inv_plat_single_write(st, REG_INT_ENABLE,
+ BIT_WOM_ALL_INT_EN | BIT_DATA_RDY_EN);
+ if (res)
+ return res;
+ v = 0;
+ if (st->chip_config.gyro_enable)
+ v |= BITS_GYRO_FIFO_EN;
+
+ if (st->chip_config.accel_enable)
+ v |= BIT_ACCEL_FIFO_EN;
+ res = inv_plat_single_write(st, REG_FIFO_EN, v);
+ if (res)
+ return res;
+ /* First time wake up from WOM.
+ We don't need data in the FIFO */
+ res = inv_reset_fifo(st, true);
+ if (res)
+ return res;
+ res = inv_switch_power_in_lp(st, false);
+ st->gesture_int_count = WOM_DELAY_THRESHOLD;
+
+ return res;
+ }
+ st->gesture_int_count = WOM_DELAY_THRESHOLD;
+ } else {
+ if (!st->gesture_int_count) {
+ inv_switch_power_in_lp(st, true);
+ res = inv_plat_single_write(st, REG_FIFO_EN, 0);
+ res = inv_plat_single_write(st, REG_INT_ENABLE,
+ BIT_WOM_ALL_INT_EN);
+ inv_switch_power_in_lp(st, false);
+
+ return res;
+ }
+ st->gesture_int_count--;
+ }
+ }
+
+ fifo_count = inv_get_last_run_time_non_dmp_record_mode(st);
+ pr_debug("fifc= %d\n", fifo_count);
+ if (!fifo_count) {
+ pr_debug("REG_FIFO_COUNT_H size is 0\n");
+ return 0;
+ }
+ pk_size = st->batch.pk_size;
+ if (!pk_size)
+ return -EINVAL;
+
+ if (fifo_count >= (HARDWARE_FIFO_SIZE / st->batch.pk_size)) {
+ pr_warn("fifo overflow pkt count=%d pkt sz=%d\n", fifo_count, st->batch.pk_size);
+ return -EOVERFLOW;
+ }
+
+ fifo_count *= st->batch.pk_size;
+ st->fifo_count = fifo_count;
+ d = st->fifo_data_store;
+ dptr = d;
+ total_bytes = fifo_count;
+
+#ifdef SENSOR_DATA_FROM_REGISTERS
+ len = 0;
+ if (st->sensor[SENSOR_GYRO].on) {
+ reg = REG_RAW_GYRO;
+ len += BYTES_PER_SENSOR;
+ if (st->sensor[SENSOR_ACCEL].on && !st->sensor[SENSOR_TEMP].on)
+ len += BYTES_FOR_TEMP;
+ }
+ if (st->sensor[SENSOR_TEMP].on) {
+ reg = REG_RAW_TEMP;
+ len += BYTES_FOR_TEMP;
+ }
+ if (st->sensor[SENSOR_ACCEL].on) {
+ reg = REG_RAW_ACCEL;
+ len += BYTES_PER_SENSOR;
+ }
+
+ if (len == 0) {
+ pr_debug("No sensor is enabled\n");
+ return 0;
+ }
+
+ /* read data registers */
+ res = inv_plat_read(st, reg, len, data);
+ if (res < 0) {
+ pr_err("read data registers is failed\n");
+ return res;
+ }
+
+ /* copy sensor data to buffer as FIFO data format */
+ tmp = 0;
+ if (st->sensor[SENSOR_ACCEL].on) {
+ for (i = 0; i < BYTES_PER_SENSOR; i++)
+ dptr[i] = data[tmp + i];
+ dptr += BYTES_PER_SENSOR;
+ tmp += BYTES_PER_SENSOR;
+ }
+
+ if (st->sensor[SENSOR_TEMP].on) {
+ for (i = 0; i < BYTES_FOR_TEMP; i++)
+ dptr[i] = data[tmp + i];
+ dptr += BYTES_FOR_TEMP;
+ tmp += BYTES_FOR_TEMP;
+ }
+
+ if (st->sensor[SENSOR_GYRO].on) {
+ if (st->sensor[SENSOR_ACCEL].on && !st->sensor[SENSOR_TEMP].on)
+ tmp += BYTES_FOR_TEMP;
+ for (i = 0; i < BYTES_PER_SENSOR; i++)
+ dptr[i] = data[tmp + i];
+ }
+#else
+ while (total_bytes > 0) {
+ if (total_bytes < pk_size * MAX_FIFO_PACKET_READ)
+ tmp = total_bytes;
+ else
+ tmp = pk_size * MAX_FIFO_PACKET_READ;
+ res = inv_plat_read(st, REG_FIFO_R_W, tmp, dptr);
+ if (res < 0) {
+ pr_err("read REG_FIFO_R_W is failed\n");
+ return res;
+ }
+ pr_debug("inside: %x, %x, %x, %x, %x, %x, %x, %x\n", dptr[0], dptr[1], dptr[2],
+ dptr[3], dptr[4], dptr[5], dptr[6], dptr[7]);
+ pr_debug("insid2: %x, %x, %x, %x, %x, %x, %x, %x\n", dptr[8], dptr[9], dptr[10],
+ dptr[11], dptr[12], dptr[13], dptr[14], dptr[15]);
+
+ dptr += tmp;
+ total_bytes -= tmp;
+ }
+#endif /* SENSOR_DATA_FROM_REGISTERS */
+ dptr = d;
+ pr_debug("dd: %x, %x, %x, %x, %x, %x, %x, %x\n", d[0], d[1], d[2],
+ d[3], d[4], d[5], d[6], d[7]);
+ pr_debug("dd2: %x, %x, %x, %x, %x, %x, %x, %x\n", d[8], d[9], d[10],
+ d[11], d[12], d[13], d[14], d[15]);
+ total_bytes = fifo_count;
+
+ for (i = 0; i < SENSOR_NUM_MAX; i++) {
+ if (st->sensor[i].on) {
+ st->sensor[i].count = total_bytes / pk_size;
+ }
+ }
+ st->header_count = 0;
+ for (i = 0; i < SENSOR_NUM_MAX; i++) {
+ if (st->sensor[i].on)
+ st->header_count = max(st->header_count,
+ st->sensor[i].count);
+ }
+
+ st->ts_algo.calib_counter++;
+ inv_bound_timestamp(st);
+
+ dptr = d;
+ done_flag = false;
+
+ while (!done_flag) {
+ pr_debug("total%d, pk=%d\n", total_bytes, pk_size);
+ if (total_bytes >= pk_size) {
+ res = inv_push_20680_data(st, dptr);
+ if (res)
+ return res;
+ total_bytes -= pk_size;
+ dptr += pk_size;
+ } else {
+ done_flag = true;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * _inv_read_fifo() - Transfer data from FIFO to ring buffer.
+ */
+static void _inv_read_fifo(struct inv_mpu_state *st)
+{
+ struct iio_dev *indio_dev = iio_priv_to_dev(st);
+ int result;
+
+ result = wait_event_interruptible_timeout(st->wait_queue,
+ st->resume_state, msecs_to_jiffies(300));
+ if (result <= 0)
+ return;
+ mutex_lock(&indio_dev->mlock);
+#ifdef TIMER_BASED_BATCHING
+ if (st->batch_timeout) {
+ if (inv_plat_single_write(st, REG_INT_ENABLE, st->int_en))
+ pr_err("REG_INT_ENABLE write error\n");
+ }
+#endif
+ st->wake_sensor_received = false;
+ result = inv_process_20680_data(st);
+ if (result)
+ goto err_reset_fifo;
+ mutex_unlock(&indio_dev->mlock);
+
+ if (st->wake_sensor_received)
+#ifdef CONFIG_HAS_WAKELOCK
+ wake_lock_timeout(&st->wake_lock, msecs_to_jiffies(200));
+#else
+ __pm_wakeup_event(&st->wake_lock, 200); /* 200 msecs */
+#endif
+ return;
+
+err_reset_fifo:
+ if ((!st->chip_config.gyro_enable) &&
+ (!st->chip_config.accel_enable) &&
+ (!st->chip_config.slave_enable) &&
+ (!st->chip_config.pressure_enable)) {
+ inv_switch_power_in_lp(st, false);
+ mutex_unlock(&indio_dev->mlock);
+
+ return;
+ }
+
+ pr_err("error to reset fifo\n");
+ inv_switch_power_in_lp(st, true);
+ inv_reset_fifo(st, true);
+ inv_switch_power_in_lp(st, false);
+ mutex_unlock(&indio_dev->mlock);
+
+ return;
+}
+
+irqreturn_t inv_read_fifo(int irq, void *dev_id)
+{
+ struct inv_mpu_state *st = (struct inv_mpu_state *)dev_id;
+
+ _inv_read_fifo(st);
+
+ return IRQ_HANDLED;
+}
+
+#ifdef TIMER_BASED_BATCHING
+void inv_batch_work(struct work_struct *work)
+{
+ struct inv_mpu_state *st =
+ container_of(work, struct inv_mpu_state, batch_work);
+ struct iio_dev *indio_dev = iio_priv_to_dev(st);
+
+ mutex_lock(&indio_dev->mlock);
+ if (inv_plat_single_write(st, REG_INT_ENABLE, st->int_en | BIT_DATA_RDY_EN))
+ pr_err("REG_INT_ENABLE write error\n");
+ mutex_unlock(&indio_dev->mlock);
+
+ return;
+}
+#endif
+
+int inv_flush_batch_data(struct iio_dev *indio_dev, int data)
+{
+ struct inv_mpu_state *st = iio_priv(indio_dev);
+
+#ifndef SENSOR_DATA_FROM_REGISTERS
+ if (st->chip_config.gyro_enable ||
+ st->chip_config.accel_enable ||
+ st->chip_config.slave_enable ||
+ st->chip_config.pressure_enable) {
+ st->wake_sensor_received = 0;
+ inv_process_20680_data(st);
+ if (st->wake_sensor_received)
+#ifdef CONFIG_HAS_WAKELOCK
+ wake_lock_timeout(&st->wake_lock, msecs_to_jiffies(200));
+#else
+ __pm_wakeup_event(&st->wake_lock, 200); /* 200 msecs */
+#endif
+ inv_switch_power_in_lp(st, false);
+ }
+#endif /* SENSOR_DATA_FROM_REGISTERS */
+ inv_push_marker_to_buffer(st, END_MARKER, data);
+
+ return 0;
+}
+
diff --git a/drivers/iio/imu/inv_mpu/iam20680/inv_mpu_selftest_20680.c b/drivers/iio/imu/inv_mpu/iam20680/inv_mpu_selftest_20680.c
new file mode 100644
index 000000000000..7a90b4d8b882
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu/iam20680/inv_mpu_selftest_20680.c
@@ -0,0 +1,752 @@
+/*
+* Copyright (C) 2017-2018 InvenSense, Inc.
+*
+* This software is licensed under the terms of the GNU General Public
+* License version 2, as published by the Free Software Foundation, and
+* may be copied, distributed, and modified under those terms.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+#define pr_fmt(fmt) "inv_mpu: " fmt
+
+#include "../inv_mpu_iio.h"
+
+/* register settings */
+#define DEF_SELFTEST_GYRO_SENS (32768 / 250)
+/* wait time before collecting data */
+#define MAX_PACKETS 20
+#define SELFTEST_WAIT_TIME (MAX_PACKETS * 10)
+#define DEF_ST_STABLE_TIME 20
+#define DEF_GYRO_SCALE 131
+#define DEF_ST_PRECISION 1000
+#define DEF_ST_ACCEL_FS_MG 2000UL
+#define DEF_ST_SCALE 32768
+#define DEF_ST_TRY_TIMES 2
+#define DEF_ST_ACCEL_RESULT_SHIFT 1
+#define DEF_ST_SAMPLES 200
+
+#define DEF_ACCEL_ST_SHIFT_DELTA_MIN 500
+#define DEF_ACCEL_ST_SHIFT_DELTA_MAX 1500
+#define DEF_GYRO_CT_SHIFT_DELTA 500
+
+#define SENSOR_UP_TIME 30
+#define REG_UP_TIME 2
+
+#define DEF_ST_ACCEL_FS_MG 2000UL
+#define DEF_ACCEL_ST_SHIFT_DELTA 500
+#define ACCEL_ST_AL_MIN ((DEF_ACCEL_ST_AL_MIN * DEF_ST_SCALE \
+ / DEF_ST_ACCEL_FS_MG) * DEF_ST_PRECISION)
+#define ACCEL_ST_AL_MAX ((DEF_ACCEL_ST_AL_MAX * DEF_ST_SCALE \
+ / DEF_ST_ACCEL_FS_MG) * DEF_ST_PRECISION)
+
+#define THREE_AXIS 3
+#define DEF_ST_MPU6500_ACCEL_LPF 2
+#define DEF_SELFTEST_SAMPLE_RATE 0 /* 1000Hz */
+#define DEF_SELFTEST_SAMPLE_RATE_LP 3 /* 250Hz */
+#define DEF_SELFTEST_SAMPLE_RATE_ACC_LP 10 /* 250Hz LPOSC_CLKSEL */
+#define INV_MPU_SAMPLE_RATE_CHANGE_STABLE 50
+#define DEF_SELFTEST_6500_ACCEL_FS (0 << 3)
+#define DEF_SELFTEST_GYRO_FS (0 << 3)
+#define DEF_ST_6500_STABLE_TIME 20
+#define BIT_ACCEL_OUT 0x08
+#define BITS_GYRO_OUT 0x70
+#define THREE_AXIS 3
+#define DEF_GYRO_WAIT_TIME 10
+#define DEF_GYRO_WAIT_TIME_LP 50
+
+/* Gyro Offset Max Value (dps) */
+#define DEF_GYRO_OFFSET_MAX 20
+/* Gyro Self Test Absolute Limits ST_AL (dps) */
+#define DEF_GYRO_ST_AL 60
+/* Accel Self Test Absolute Limits ST_AL (mg) */
+#define DEF_ACCEL_ST_AL_MIN 225
+#define DEF_ACCEL_ST_AL_MAX 675
+
+struct recover_regs {
+ u8 int_enable; /* REG_INT_ENABLE */
+ u8 fifo_en; /* REG_FIFO_EN */
+ u8 user_ctrl; /* REG_USER_CTRL */
+ u8 config; /* REG_CONFIG */
+ u8 gyro_config; /* REG_GYRO_CONFIG */
+ u8 accel_config; /* REG_ACCEL_CONFIG */
+ u8 accel_config_2; /* REG_ACCEL_CONFIG_2 */
+ u8 smplrt_div; /* REG_SAMPLE_RATE_DIV */
+ u8 lp_mode; /* REG_LP_MODE_CTRL */
+ u8 pwr_mgmt_1; /* REG_PWR_MGMT_1 */
+ u8 pwr_mgmt_2; /* REG_PWR_MGMT_2 */
+};
+
+static struct recover_regs saved_regs;
+
+static const u16 mpu_st_tb[256] = {
+ 2620, 2646, 2672, 2699, 2726, 2753, 2781, 2808,
+ 2837, 2865, 2894, 2923, 2952, 2981, 3011, 3041,
+ 3072, 3102, 3133, 3165, 3196, 3228, 3261, 3293,
+ 3326, 3359, 3393, 3427, 3461, 3496, 3531, 3566,
+ 3602, 3638, 3674, 3711, 3748, 3786, 3823, 3862,
+ 3900, 3939, 3979, 4019, 4059, 4099, 4140, 4182,
+ 4224, 4266, 4308, 4352, 4395, 4439, 4483, 4528,
+ 4574, 4619, 4665, 4712, 4759, 4807, 4855, 4903,
+ 4953, 5002, 5052, 5103, 5154, 5205, 5257, 5310,
+ 5363, 5417, 5471, 5525, 5581, 5636, 5693, 5750,
+ 5807, 5865, 5924, 5983, 6043, 6104, 6165, 6226,
+ 6289, 6351, 6415, 6479, 6544, 6609, 6675, 6742,
+ 6810, 6878, 6946, 7016, 7086, 7157, 7229, 7301,
+ 7374, 7448, 7522, 7597, 7673, 7750, 7828, 7906,
+ 7985, 8065, 8145, 8227, 8309, 8392, 8476, 8561,
+ 8647, 8733, 8820, 8909, 8998, 9088, 9178, 9270,
+ 9363, 9457, 9551, 9647, 9743, 9841, 9939, 10038,
+ 10139, 10240, 10343, 10446, 10550, 10656, 10763, 10870,
+ 10979, 11089, 11200, 11312, 11425, 11539, 11654, 11771,
+ 11889, 12008, 12128, 12249, 12371, 12495, 12620, 12746,
+ 12874, 13002, 13132, 13264, 13396, 13530, 13666, 13802,
+ 13940, 14080, 14221, 14363, 14506, 14652, 14798, 14946,
+ 15096, 15247, 15399, 15553, 15709, 15866, 16024, 16184,
+ 16346, 16510, 16675, 16842, 17010, 17180, 17352, 17526,
+ 17701, 17878, 18057, 18237, 18420, 18604, 18790, 18978,
+ 19167, 19359, 19553, 19748, 19946, 20145, 20347, 20550,
+ 20756, 20963, 21173, 21385, 21598, 21814, 22033, 22253,
+ 22475, 22700, 22927, 23156, 23388, 23622, 23858, 24097,
+ 24338, 24581, 24827, 25075, 25326, 25579, 25835, 26093,
+ 26354, 26618, 26884, 27153, 27424, 27699, 27976, 28255,
+ 28538, 28823, 29112, 29403, 29697, 29994, 30294, 30597,
+ 30903, 31212, 31524, 31839, 32157, 32479, 32804
+};
+
+static void inv_show_saved_setting(struct inv_mpu_state *st)
+{
+ pr_debug(" REG_INT_ENABLE : 0x%02X\n", saved_regs.int_enable);
+ pr_debug(" REG_FIFO_EN : 0x%02X\n", saved_regs.fifo_en);
+ pr_debug(" REG_USER_CTRL : 0x%02X\n", saved_regs.user_ctrl);
+ pr_debug(" REG_CONFIG : 0x%02X\n", saved_regs.config);
+ pr_debug(" REG_GYRO_CONFIG : 0x%02X\n", saved_regs.gyro_config);
+ pr_debug(" REG_ACCEL_CONFIG : 0x%02X\n", saved_regs.accel_config);
+ pr_debug(" REG_ACCEL_CONFIG_2 : 0x%02X\n", saved_regs.accel_config_2);
+ pr_debug(" REG_SAMPLE_RATE_DIV : 0x%02X\n", saved_regs.smplrt_div);
+ pr_debug(" REG_LP_MODE_CTRL : 0x%02X\n", saved_regs.lp_mode);
+ pr_debug(" REG_PWR_MGMT_1 : 0x%02X\n", saved_regs.pwr_mgmt_1);
+ pr_debug(" REG_PWR_MGMT_2 : 0x%02X\n", saved_regs.pwr_mgmt_2);
+}
+
+static int inv_save_setting(struct inv_mpu_state *st)
+{
+ int result;
+
+ result = inv_plat_read(st, REG_PWR_MGMT_1, 1,
+ &saved_regs.pwr_mgmt_1);
+ if (result)
+ return result;
+
+ /* wake up */
+ result = inv_plat_single_write(st, REG_PWR_MGMT_1,
+ (saved_regs.pwr_mgmt_1 & ~BIT_SLEEP));
+ if (result)
+ return result;
+
+ result = inv_plat_read(st, REG_INT_ENABLE, 1,
+ &saved_regs.int_enable);
+ if (result)
+ return result;
+ result = inv_plat_read(st, REG_FIFO_EN, 1,
+ &saved_regs.fifo_en);
+ if (result)
+ return result;
+ result = inv_plat_read(st, REG_USER_CTRL, 1,
+ &saved_regs.user_ctrl);
+ if (result)
+ return result;
+ result = inv_plat_read(st, REG_CONFIG, 1,
+ &saved_regs.config);
+ if (result)
+ return result;
+ result = inv_plat_read(st, REG_GYRO_CONFIG, 1,
+ &saved_regs.gyro_config);
+ if (result)
+ return result;
+ result = inv_plat_read(st, REG_ACCEL_CONFIG, 1,
+ &saved_regs.accel_config);
+ if (result)
+ return result;
+ result = inv_plat_read(st, REG_ACCEL_CONFIG_2, 1,
+ &saved_regs.accel_config_2);
+ if (result)
+ return result;
+ result = inv_plat_read(st, REG_SAMPLE_RATE_DIV, 1,
+ &saved_regs.smplrt_div);
+ if (result)
+ return result;
+ result = inv_plat_read(st, REG_LP_MODE_CTRL, 1,
+ &saved_regs.lp_mode);
+ if (result)
+ return result;
+ result = inv_plat_read(st, REG_PWR_MGMT_2, 1,
+ &saved_regs.pwr_mgmt_2);
+ if (result)
+ return result;
+
+ inv_show_saved_setting(st);
+
+ return result;
+}
+
+static int inv_recover_setting(struct inv_mpu_state *st)
+{
+ int result;
+ /* Stop sensors */
+ result = inv_plat_single_write(st, REG_PWR_MGMT_2,
+ BIT_PWR_ACCEL_STBY | BIT_PWR_GYRO_STBY);
+ if (result)
+ return result;
+
+ /* Restore sensor configurations */
+ result = inv_plat_single_write(st, REG_INT_ENABLE,
+ saved_regs.int_enable);
+ if (result)
+ return result;
+ result = inv_plat_single_write(st, REG_FIFO_EN,
+ saved_regs.fifo_en);
+ if (result)
+ return result;
+ result = inv_plat_single_write(st, REG_USER_CTRL,
+ saved_regs.user_ctrl);
+ if (result)
+ return result;
+ result = inv_plat_single_write(st, REG_CONFIG,
+ saved_regs.config);
+ if (result)
+ return result;
+ result = inv_plat_single_write(st, REG_GYRO_CONFIG,
+ saved_regs.gyro_config);
+ if (result)
+ return result;
+ result = inv_plat_single_write(st, REG_ACCEL_CONFIG,
+ saved_regs.accel_config);
+ if (result)
+ return result;
+ result = inv_plat_single_write(st, REG_ACCEL_CONFIG_2,
+ saved_regs.accel_config_2);
+ if (result)
+ return result;
+ result = inv_plat_single_write(st, REG_SAMPLE_RATE_DIV,
+ saved_regs.smplrt_div);
+ if (result)
+ return result;
+ result = inv_plat_single_write(st, REG_LP_MODE_CTRL,
+ saved_regs.lp_mode);
+ if (result)
+ return result;
+ result = inv_plat_single_write(st, REG_PWR_MGMT_1,
+ saved_regs.pwr_mgmt_1);
+ if (result)
+ return result;
+
+ result = inv_plat_single_write(st, REG_PWR_MGMT_2,
+ saved_regs.pwr_mgmt_2);
+ if (result)
+ return result;
+
+ return result;
+}
+
+int inv_switch_engine(struct inv_mpu_state *st, bool en, u32 mask)
+{
+ u8 data, mgmt_1;
+ int result;
+
+ if (BIT_PWR_GYRO_STBY == mask) {
+ result = inv_plat_read(st, REG_PWR_MGMT_1, 1, &mgmt_1);
+ if (result)
+ return result;
+ mgmt_1 &= ~BIT_CLK_MASK;
+ }
+
+ if ((BIT_PWR_GYRO_STBY == mask) && (!en)) {
+ result = inv_plat_single_write(st, REG_PWR_MGMT_1, mgmt_1);
+ if (result)
+ return result;
+ }
+
+ result = inv_plat_read(st, REG_PWR_MGMT_2, 1, &data);
+ if (result)
+ return result;
+ if (en)
+ data &= (~mask);
+ else
+ data |= mask;
+ data |= BIT_FIFO_LP_EN;
+ result = inv_plat_single_write(st, REG_PWR_MGMT_2, data);
+ if (result)
+ return result;
+
+ if ((BIT_PWR_GYRO_STBY == mask) && en) {
+ /* only gyro on needs sensor up time */
+ msleep(SENSOR_UP_TIME);
+ /* after gyro is on & stable, switch internal clock to PLL */
+ mgmt_1 |= BIT_CLK_PLL;
+ result = inv_plat_single_write(st, REG_PWR_MGMT_1, mgmt_1);
+ if (result)
+ return result;
+ }
+ if ((BIT_PWR_ACCEL_STBY == mask) && en)
+ msleep(REG_UP_TIME);
+
+ return 0;
+}
+
+int inv_set_offset_reg(struct inv_mpu_state *st, int reg, int val)
+{
+ int result;
+ u8 d;
+
+ d = ((val >> 8) & 0xff);
+ result = inv_plat_single_write(st, reg, d);
+ if (result)
+ return result;
+
+ d = (val & 0xff);
+ result = inv_plat_single_write(st, reg + 1, d);
+
+ return result;
+}
+
+/**
+* inv_check_gyro_self_test() - check gyro self test. this function
+* returns zero as success. A non-zero return
+* value indicates failure in self test.
+* @*st: main data structure.
+* @*reg_avg: average value of normal test.
+* @*st_avg: average value of self test
+*/
+int inv_check_gyro_self_test(struct inv_mpu_state *st,
+ int *reg_avg, int *st_avg) {
+ u8 regs[3];
+ int ret_val, result;
+ int otp_value_zero = 0;
+ int st_shift_prod[3], st_shift_cust[3], i;
+
+ ret_val = 0;
+ result = inv_plat_read(st, REG_6500_XG_ST_DATA, 3, regs);
+ if (result)
+ return result;
+ pr_debug("%s self_test gyro shift_code - %02x %02x %02x\n",
+ st->hw->name, regs[0], regs[1], regs[2]);
+
+ for (i = 0; i < 3; i++) {
+ if (regs[i] != 0) {
+ st_shift_prod[i] = mpu_st_tb[regs[i] - 1];
+ } else {
+ st_shift_prod[i] = 0;
+ otp_value_zero = 1;
+ }
+ }
+ pr_debug("%s self_test gyro st_shift_prod - %+d %+d %+d\n",
+ st->hw->name, st_shift_prod[0], st_shift_prod[1],
+ st_shift_prod[2]);
+
+ for (i = 0; i < 3; i++) {
+ st_shift_cust[i] = st_avg[i] - reg_avg[i];
+ if (!otp_value_zero) {
+ /* Self Test Pass/Fail Criteria A */
+ if (st_shift_cust[i] < DEF_GYRO_CT_SHIFT_DELTA
+ * st_shift_prod[i])
+ ret_val = 1;
+ } else {
+ /* Self Test Pass/Fail Criteria B */
+ if (st_shift_cust[i] < DEF_GYRO_ST_AL *
+ DEF_SELFTEST_GYRO_SENS *
+ DEF_ST_PRECISION)
+ ret_val = 1;
+ }
+ }
+ pr_debug("%s self_test gyro st_shift_cust - %+d %+d %+d\n",
+ st->hw->name, st_shift_cust[0], st_shift_cust[1],
+ st_shift_cust[2]);
+
+ if (ret_val == 0) {
+ /* Self Test Pass/Fail Criteria C */
+ for (i = 0; i < 3; i++)
+ if (abs(reg_avg[i]) > DEF_GYRO_OFFSET_MAX *
+ DEF_SELFTEST_GYRO_SENS *
+ DEF_ST_PRECISION)
+ ret_val = 1;
+ }
+
+ return ret_val;
+}
+
+/**
+* inv_check_accel_self_test() - check 6500 accel self test. this function
+* returns zero as success. A non-zero return
+* value indicates failure in self test.
+* @*st: main data structure.
+* @*reg_avg: average value of normal test.
+* @*st_avg: average value of self test
+*/
+int inv_check_accel_self_test(struct inv_mpu_state *st,
+ int *reg_avg, int *st_avg) {
+ int ret_val, result;
+ int st_shift_prod[3], st_shift_cust[3], st_shift_ratio[3], i;
+ u8 regs[3];
+ int otp_value_zero = 0;
+
+ ret_val = 0;
+ result = inv_plat_read(st, REG_6500_XA_ST_DATA, 3, regs);
+ if (result)
+ return result;
+ pr_debug("%s self_test accel shift_code - %02x %02x %02x\n",
+ st->hw->name, regs[0], regs[1], regs[2]);
+
+ for (i = 0; i < 3; i++) {
+ if (regs[i] != 0) {
+ st_shift_prod[i] = mpu_st_tb[regs[i] - 1];
+ } else {
+ st_shift_prod[i] = 0;
+ otp_value_zero = 1;
+ }
+ }
+ pr_debug("%s self_test accel st_shift_prod - %+d %+d %+d\n",
+ st->hw->name, st_shift_prod[0], st_shift_prod[1],
+ st_shift_prod[2]);
+
+ if (!otp_value_zero) {
+ /* Self Test Pass/Fail Criteria A */
+ for (i = 0; i < 3; i++) {
+ st_shift_cust[i] = st_avg[i] - reg_avg[i];
+ st_shift_ratio[i] = abs(st_shift_cust[i] /
+ st_shift_prod[i] - DEF_ST_PRECISION);
+ if (st_shift_ratio[i] > DEF_ACCEL_ST_SHIFT_DELTA)
+ ret_val = 1;
+ }
+ } else {
+ /* Self Test Pass/Fail Criteria B */
+ for (i = 0; i < 3; i++) {
+ st_shift_cust[i] = abs(st_avg[i] - reg_avg[i]);
+ if (st_shift_cust[i] < ACCEL_ST_AL_MIN ||
+ st_shift_cust[i] > ACCEL_ST_AL_MAX)
+ ret_val = 1;
+ }
+ }
+ pr_debug("%s self_test accel st_shift_cust - %+d %+d %+d\n",
+ st->hw->name, st_shift_cust[0], st_shift_cust[1],
+ st_shift_cust[2]);
+
+ return ret_val;
+}
+
+/*
+ * inv_do_test() - do the actual test of self testing
+ */
+int inv_do_test(struct inv_mpu_state *st, int self_test_flag,
+ int *gyro_result, int *accel_result, int lp_mode)
+{
+ int result, i, j, packet_size;
+ u8 data[BYTES_PER_SENSOR * 2], d, dd;
+ int fifo_count, packet_count, ind, s;
+
+ packet_size = BYTES_PER_SENSOR * 2;
+
+ /* disable interrupt */
+ result = inv_plat_single_write(st, REG_INT_ENABLE, 0);
+ if (result)
+ return result;
+ /* disable the sensor output to FIFO */
+ result = inv_plat_single_write(st, REG_FIFO_EN, 0);
+ if (result)
+ return result;
+ /* disable fifo reading */
+ result = inv_plat_single_write(st, REG_USER_CTRL, 0);
+ if (result)
+ return result;
+ /* clear FIFO */
+ result = inv_plat_single_write(st, REG_USER_CTRL, BIT_FIFO_RST);
+ if (result)
+ return result;
+ /* setup parameters */
+ result = inv_plat_single_write(st, REG_CONFIG, INV_FILTER_98HZ);
+ if (result)
+ return result;
+
+ /* gyro lp mode */
+ if (lp_mode == 1)
+ d = BIT_GYRO_CYCLE_EN;
+ else if (lp_mode == 2)
+ d = DEF_SELFTEST_SAMPLE_RATE_ACC_LP;
+ else
+ d = 0;
+ result = inv_plat_single_write(st, REG_LP_MODE_CTRL, d);
+ if (result)
+ return result;
+
+ /* config accel LPF register */
+ if (lp_mode == 2)
+ d = BIT_ACCEL_FCHOCIE_B;
+ else
+ d = DEF_ST_MPU6500_ACCEL_LPF;
+ result = inv_plat_single_write(st, REG_6500_ACCEL_CONFIG2, d);
+ if (result)
+ return result;
+
+ if (lp_mode) {
+ result = inv_plat_single_write(st, REG_SAMPLE_RATE_DIV,
+ DEF_SELFTEST_SAMPLE_RATE_LP);
+ } else {
+ result = inv_plat_single_write(st, REG_SAMPLE_RATE_DIV,
+ DEF_SELFTEST_SAMPLE_RATE);
+ }
+ if (result)
+ return result;
+ /* wait for the sampling rate change to stabilize */
+ mdelay(INV_MPU_SAMPLE_RATE_CHANGE_STABLE);
+ result = inv_plat_single_write(st, REG_GYRO_CONFIG,
+ self_test_flag | DEF_SELFTEST_GYRO_FS);
+ if (result)
+ return result;
+
+ d = DEF_SELFTEST_6500_ACCEL_FS;
+ d |= self_test_flag;
+ result = inv_plat_single_write(st, REG_ACCEL_CONFIG, d);
+ if (result)
+ return result;
+
+ /* wait for the output to get stable */
+ msleep(DEF_ST_6500_STABLE_TIME);
+
+ /* enable FIFO reading */
+ result = inv_plat_single_write(st, REG_USER_CTRL, BIT_FIFO_EN);
+ if (result)
+ return result;
+ /* enable sensor output to FIFO */
+ d = BITS_GYRO_OUT | BIT_ACCEL_OUT;
+ for (i = 0; i < THREE_AXIS; i++) {
+ gyro_result[i] = 0;
+ accel_result[i] = 0;
+ }
+ s = 0;
+ while (s < 200 /*st->self_test.samples*/) {
+ /* Stop FIFO */
+ result = inv_plat_single_write(st, REG_USER_CTRL, 0);
+ if (result)
+ return result;
+ /* clear FIFO */
+ result = inv_plat_single_write(st, REG_USER_CTRL, BIT_FIFO_RST);
+ if (result)
+ return result;
+ /* enable FIFO reading */
+ result = inv_plat_single_write(st, REG_USER_CTRL, BIT_FIFO_EN);
+ if (result)
+ return result;
+
+ /* accel lp mode */
+ dd = BIT_CLK_PLL;
+ if (lp_mode == 2)
+ dd |= BIT_LP_EN;
+ else
+ dd &= ~BIT_LP_EN;
+ result = inv_plat_single_write(st, REG_PWR_MGMT_1, dd);
+ if (result)
+ return result;
+
+ result = inv_plat_single_write(st, REG_FIFO_EN, d);
+ if (result)
+ return result;
+ if (lp_mode)
+ mdelay(DEF_GYRO_WAIT_TIME_LP);
+ else
+ mdelay(DEF_GYRO_WAIT_TIME);
+
+ result = inv_plat_single_write(st, REG_FIFO_EN, 0);
+ if (result)
+ return result;
+
+ result = inv_plat_read(st, REG_FIFO_COUNT_H,
+ FIFO_COUNT_BYTE, data);
+ if (result)
+ return result;
+ fifo_count = be16_to_cpup((__be16 *)(&data[0]));
+ pr_debug("%s self_test fifo_count - %d\n",
+ st->hw->name, fifo_count);
+ packet_count = fifo_count / packet_size;
+ i = 0;
+ while ((i < packet_count) && (s < 200 /*st->self_test.samples*/)) {
+ short vals[3];
+ result = inv_plat_read(st, REG_FIFO_R_W,
+ packet_size, data);
+ if (result)
+ return result;
+ ind = 0;
+
+ for (j = 0; j < THREE_AXIS; j++) {
+ vals[j] = (short)be16_to_cpup(
+ (__be16 *)(&data[ind + 2 * j]));
+ accel_result[j] += vals[j];
+ }
+ ind += BYTES_PER_SENSOR;
+ pr_debug(
+ "%s self_test accel data - %d %+d %+d %+d",
+ st->hw->name, s, vals[0], vals[1], vals[2]);
+
+ for (j = 0; j < THREE_AXIS; j++) {
+ vals[j] = (short)be16_to_cpup(
+ (__be16 *)(&data[ind + 2 * j]));
+ gyro_result[j] += vals[j];
+ }
+ pr_debug("%s self_test gyro data - %d %+d %+d %+d",
+ st->hw->name, s, vals[0], vals[1], vals[2]);
+
+ s++;
+ i++;
+ }
+ }
+
+ for (j = 0; j < THREE_AXIS; j++) {
+ accel_result[j] = accel_result[j] / s;
+ accel_result[j] *= DEF_ST_PRECISION;
+ }
+ for (j = 0; j < THREE_AXIS; j++) {
+ gyro_result[j] = gyro_result[j] / s;
+ gyro_result[j] *= DEF_ST_PRECISION;
+ }
+
+ return 0;
+}
+
+
+int inv_power_up_self_test(struct inv_mpu_state *st)
+{
+ int result;
+
+ result = inv_switch_power_in_lp(st, true);
+
+ /* make sure no interrupts */
+ result = inv_plat_single_write(st, REG_INT_ENABLE, 0);
+ if (result)
+ return result;
+
+ if (result)
+ return result;
+ result = inv_switch_engine(st, true, BIT_PWR_ACCEL_STBY);
+ if (result)
+ return result;
+ result = inv_switch_engine(st, true, BIT_PWR_GYRO_STBY);
+ if (result)
+ return result;
+
+ return 0;
+}
+
+/*
+ * inv_hw_self_test() - main function to do hardware self test
+ */
+int inv_hw_self_test(struct inv_mpu_state *st)
+{
+ int result;
+ int gyro_bias_st[THREE_AXIS], gyro_bias_regular[THREE_AXIS];
+ int accel_bias_st[THREE_AXIS], accel_bias_regular[THREE_AXIS];
+#if 0
+ int gyro_bias_regular_lp[THREE_AXIS];
+ int accel_bias_regular_lp[THREE_AXIS];
+ int dummy_bias_regular[THREE_AXIS];
+#endif
+ int test_times, i;
+ char accel_result, gyro_result;
+
+ result = inv_save_setting(st);
+ if (result)
+ return result;
+
+ result = inv_power_up_self_test(st);
+ if (result)
+ return result;
+ accel_result = 0;
+ gyro_result = 0;
+ test_times = DEF_ST_TRY_TIMES;
+ while (test_times > 0) {
+ result = inv_do_test(st, 0, gyro_bias_regular,
+ accel_bias_regular, 0);
+ if (result == -EAGAIN)
+ test_times--;
+ else
+ test_times = 0;
+ }
+ if (result)
+ goto test_fail;
+ pr_debug("%s self_test accel bias_regular - %+d %+d %+d\n",
+ st->hw->name, accel_bias_regular[0],
+ accel_bias_regular[1], accel_bias_regular[2]);
+ pr_debug("%s self_test gyro bias_regular - %+d %+d %+d\n",
+ st->hw->name, gyro_bias_regular[0], gyro_bias_regular[1],
+ gyro_bias_regular[2]);
+
+ test_times = DEF_ST_TRY_TIMES;
+ while (test_times > 0) {
+ result = inv_do_test(st, BITS_SELF_TEST_EN, gyro_bias_st,
+ accel_bias_st, 0);
+ if (result == -EAGAIN)
+ test_times--;
+ else
+ break;
+ }
+ if (result)
+ goto test_fail;
+ pr_debug("%s self_test accel bias_st - %+d %+d %+d\n",
+ st->hw->name, accel_bias_st[0], accel_bias_st[1],
+ accel_bias_st[2]);
+ pr_debug("%s self_test gyro bias_st - %+d %+d %+d\n",
+ st->hw->name, gyro_bias_st[0], gyro_bias_st[1],
+ gyro_bias_st[2]);
+
+#if 0
+ /* lp gyro mode */
+ test_times = DEF_ST_TRY_TIMES;
+ while (test_times > 0) {
+ result = inv_do_test(st, 0, gyro_bias_regular_lp,
+ dummy_bias_regular, 1);
+ if (result == -EAGAIN)
+ test_times--;
+ else
+ test_times = 0;
+ }
+ if (result)
+ goto test_fail;
+ pr_debug("%s self_test gyro bias_regular lp - %+d %+d %+d\n",
+ st->hw->name, gyro_bias_regular_lp[0], gyro_bias_regular_lp[1],
+ gyro_bias_regular_lp[2]);
+
+ /* lp accel mode */
+ test_times = DEF_ST_TRY_TIMES;
+ while (test_times > 0) {
+ result = inv_do_test(st, 0, dummy_bias_regular,
+ accel_bias_regular_lp, 2);
+ if (result == -EAGAIN)
+ test_times--;
+ else
+ test_times = 0;
+ }
+ if (result)
+ goto test_fail;
+ pr_debug("%s self_test accel bias_regular lp - %+d %+d %+d\n",
+ st->hw->name, accel_bias_regular_lp[0],
+ accel_bias_regular_lp[1], accel_bias_regular_lp[2]);
+#endif
+
+ /* copy bias */
+ for (i = 0; i < 3; i++) {
+ /* gyro : LN bias as LN is default mode */
+ st->gyro_st_bias[i] = gyro_bias_regular[i] / DEF_ST_PRECISION;
+ /* accel : LN bias as LN is default mode */
+ st->accel_st_bias[i] = accel_bias_regular[i] / DEF_ST_PRECISION;
+ }
+
+ /* Check is done on continuous mode data */
+ accel_result = !inv_check_accel_self_test(st,
+ accel_bias_regular, accel_bias_st);
+ gyro_result = !inv_check_gyro_self_test(st,
+ gyro_bias_regular, gyro_bias_st);
+
+test_fail:
+ inv_recover_setting(st);
+ return (accel_result << DEF_ST_ACCEL_RESULT_SHIFT) | gyro_result;
+}
diff --git a/drivers/iio/imu/inv_mpu/iam20680/inv_mpu_setup_20680.c b/drivers/iio/imu/inv_mpu/iam20680/inv_mpu_setup_20680.c
new file mode 100644
index 000000000000..5e9cf8906d79
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu/iam20680/inv_mpu_setup_20680.c
@@ -0,0 +1,466 @@
+/*
+* Copyright (C) 2017-2018 InvenSense, Inc.
+*
+* This software is licensed under the terms of the GNU General Public
+* License version 2, as published by the Free Software Foundation, and
+* may be copied, distributed, and modified under those terms.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+#define pr_fmt(fmt) "inv_mpu: " fmt
+#include "../inv_mpu_iio.h"
+
+/* set LN mode for gyro regardless of conditions */
+#define USE_GYRO_LN_MODE
+
+static int inv_calc_engine_dur(struct inv_engine_info *ei)
+{
+ if (!ei->running_rate)
+ return -EINVAL;
+ ei->dur = ei->base_time / ei->orig_rate;
+ ei->dur *= ei->divider;
+
+ return 0;
+}
+
+static int inv_turn_on_fifo(struct inv_mpu_state *st)
+{
+ u8 int_en, fifo_en, mode, user;
+ int r;
+
+ r = inv_plat_single_write(st, REG_FIFO_EN, 0);
+ if (r)
+ return r;
+ r = inv_plat_single_write(st, REG_USER_CTRL, BIT_FIFO_RST);
+ if (r)
+ return r;
+ fifo_en = 0;
+ int_en = 0;
+
+ if (st->gesture_only_on && (!st->batch.timeout)) {
+ st->gesture_int_count = WOM_DELAY_THRESHOLD;
+ int_en |= BIT_WOM_ALL_INT_EN;
+ }
+#ifdef TIMER_BASED_BATCHING
+ if (st->chip_config.eis_enable)
+ int_en |= BIT_FSYNC_INT_EN;
+ if (!st->batch_timeout) {
+ int_en |= BIT_DATA_RDY_EN;
+ }
+#else
+ if (st->batch.timeout) {
+ if(!st->batch.fifo_wm_th)
+ int_en = BIT_DATA_RDY_EN;
+ } else {
+ int_en = BIT_DATA_RDY_EN;
+ if (st->chip_config.eis_enable)
+ int_en |= BIT_FSYNC_INT_EN;
+ }
+#endif
+ if (st->sensor[SENSOR_GYRO].on)
+ fifo_en |= BITS_GYRO_FIFO_EN;
+
+ if (st->sensor[SENSOR_ACCEL].on)
+ fifo_en |= BIT_ACCEL_FIFO_EN;
+ r = inv_plat_single_write(st, REG_FIFO_EN, fifo_en);
+ if (r)
+ return r;
+ st->int_en = int_en;
+ r = inv_plat_single_write(st, REG_INT_ENABLE, int_en);
+ if (r)
+ return r;
+ if (st->gesture_only_on && (!st->batch.timeout)) {
+ mode = BIT_ACCEL_INTEL_EN | BIT_ACCEL_INTEL_MODE;
+ } else {
+ mode = 0;
+ }
+ r = inv_plat_single_write(st, REG_ACCEL_INTEL_CTRL, mode);
+#ifdef SENSOR_DATA_FROM_REGISTERS
+ user = 0;
+#else
+ user = BIT_FIFO_EN;
+#endif
+ r = inv_plat_single_write(st, REG_USER_CTRL, user | st->i2c_dis);
+#ifdef TIMER_BASED_BATCHING
+ if (fifo_en && st->batch_timeout) {
+ if (st->is_batch_timer_running)
+ hrtimer_cancel(&st ->hr_batch_timer);
+ st->is_batch_timer_running = true;
+ hrtimer_start(&st ->hr_batch_timer,
+ ns_to_ktime(st->batch_timeout), HRTIMER_MODE_REL);
+ } else {
+ if (st->is_batch_timer_running)
+ hrtimer_cancel(&st ->hr_batch_timer);
+ st->is_batch_timer_running = false;
+ }
+#endif
+
+ return r;
+}
+
+/*
+ * inv_reset_fifo() - Reset FIFO related registers.
+ */
+int inv_reset_fifo(struct inv_mpu_state *st, bool turn_off)
+{
+ int r, i;
+ struct inv_timestamp_algo *ts_algo = &st->ts_algo;
+ int dur_ms;
+
+ r = inv_turn_on_fifo(st);
+ if (r)
+ return r;
+
+ ts_algo->last_run_time = get_time_ns();
+ ts_algo->reset_ts = ts_algo->last_run_time;
+ if (st->mode_1k_on)
+ ts_algo->first_sample = MODE_1K_INIT_SAMPLE;
+ else
+ ts_algo->first_sample = 1;
+
+ dur_ms = st->smplrt_div + 1;
+ if ((ts_algo->first_sample * dur_ms) < FIRST_SAMPLE_BUF_MS)
+ ts_algo->first_sample = FIRST_SAMPLE_BUF_MS / dur_ms;
+ if (ts_algo->first_sample == 0)
+ ts_algo->first_sample = 1;
+
+ st->last_temp_comp_time = ts_algo->last_run_time;
+ st->left_over_size = 0;
+ for (i = 0; i < SENSOR_NUM_MAX; i++) {
+ st->sensor[i].calib_flag = 0;
+ st->sensor[i].sample_calib = 0;
+ st->sensor[i].time_calib = ts_algo->last_run_time;
+ }
+
+ ts_algo->calib_counter = 0;
+
+ return 0;
+}
+
+static int inv_turn_on_engine(struct inv_mpu_state *st)
+{
+ u8 v, w;
+ int r;
+ unsigned int wait_ms;
+
+ if (st->chip_config.gyro_enable | st->chip_config.accel_enable) {
+ w = 0;
+ if (!st->chip_config.gyro_enable)
+ w |= BIT_PWR_GYRO_STBY;
+ if (!st->chip_config.accel_enable)
+ w |= BIT_PWR_ACCEL_STBY;
+ } else if (st->chip_config.compass_enable) {
+ w = BIT_PWR_GYRO_STBY;
+ } else {
+ w = (BIT_PWR_GYRO_STBY | BIT_PWR_ACCEL_STBY);
+ }
+
+ r = inv_plat_read(st, REG_PWR_MGMT_2, 1, &v);
+ if (r)
+ return r;
+ r = inv_plat_single_write(st, REG_PWR_MGMT_2, w);
+ if (r)
+ return r;
+
+ wait_ms = 0;
+ if (st->chip_config.gyro_enable
+ && (v & BIT_PWR_GYRO_STBY)) {
+ wait_ms = INV_IAM20680_GYRO_START_TIME;
+ }
+ if (st->chip_config.accel_enable
+ && (v & BIT_PWR_ACCEL_STBY)) {
+ if (INV_IAM20680_ACCEL_START_TIME > wait_ms)
+ wait_ms = INV_IAM20680_ACCEL_START_TIME;
+ }
+ if (wait_ms)
+ msleep(wait_ms);
+
+ if (st->chip_config.has_compass) {
+ if (st->chip_config.compass_enable)
+ r = st->slave_compass->resume(st);
+ else
+ r = st->slave_compass->suspend(st);
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
+static int inv_setup_dmp_rate(struct inv_mpu_state *st)
+{
+ int i;
+
+ for (i = 0; i < SENSOR_NUM_MAX; i++) {
+ if (st->sensor[i].on) {
+ st->cntl |= st->sensor[i].output;
+ st->sensor[i].dur =
+ st->eng_info[st->sensor[i].engine_base].dur;
+ st->sensor[i].div = 1;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * inv_set_lpf() - set low pass filer based on fifo rate.
+ */
+static int inv_set_lpf(struct inv_mpu_state *st, int rate)
+{
+ const short hz[] = {188, 98, 42, 20, 10, 5};
+ const int d[] = {INV_FILTER_188HZ, INV_FILTER_98HZ,
+ INV_FILTER_42HZ, INV_FILTER_20HZ,
+ INV_FILTER_10HZ, INV_FILTER_5HZ};
+ int i, h, data, result;
+
+#ifdef USE_GYRO_LN_MODE
+ if (1) {
+#else
+ if (st->chip_config.eis_enable || st->ois.en || st->mode_1k_on) {
+#endif
+ h = (rate >> 1);
+ i = 0;
+ while ((h < hz[i]) && (i < ARRAY_SIZE(d) - 1))
+ i++;
+ data = d[i];
+ data |= EXT_SYNC_SET;
+ result = inv_plat_single_write(st, REG_CONFIG, data);
+ if (result)
+ return result;
+
+ st->chip_config.lpf = data;
+ result = inv_plat_single_write(st, REG_LP_MODE_CTRL, 0);
+ } else {
+ result = inv_plat_single_write(st, REG_LP_MODE_CTRL,
+ BIT_GYRO_CYCLE_EN);
+ if (result)
+ return result;
+ data = 0;
+ result = inv_plat_single_write(st, REG_CONFIG, data | 3);
+ }
+
+ return result;
+}
+
+static int inv_set_div(struct inv_mpu_state *st, int a_d, int g_d)
+{
+ int result, div;
+
+ if (st->chip_config.gyro_enable)
+ div = g_d;
+ else
+ div = a_d;
+ if (st->chip_config.eis_enable)
+ div = 0;
+
+ st->smplrt_div = div;
+ pr_debug("div= %d\n", div);
+ result = inv_plat_single_write(st, REG_SAMPLE_RATE_DIV, div);
+
+ return result;
+}
+
+// 20680 does not support batching
+static int inv_set_batch(struct inv_mpu_state *st)
+{
+#ifdef TIMER_BASED_BATCHING
+ u64 timeout;
+ int required_fifo_size;
+
+ if (st->batch.timeout) {
+ required_fifo_size = st->batch.timeout * st->eng_info[ENGINE_GYRO].running_rate
+ * st->batch.pk_size / 1000;
+ if (required_fifo_size > MAX_BATCH_FIFO_SIZE) {
+ required_fifo_size = MAX_BATCH_FIFO_SIZE;
+ timeout = (required_fifo_size / st->batch.pk_size) * (1000 / st->eng_info[ENGINE_GYRO].running_rate);
+ } else {
+ timeout = st->batch.timeout;
+ }
+ } else {
+ timeout = 1000 / st->eng_info[ENGINE_GYRO].running_rate;
+ }
+ if (timeout <= 1000 / st->eng_info[ENGINE_GYRO].running_rate)
+ st->batch_timeout = 0;
+ else
+ st->batch_timeout = timeout * 1000000; // ms to ns
+#endif
+ st->batch.fifo_wm_th = 0;
+
+ return 0;
+}
+
+static int inv_set_rate(struct inv_mpu_state *st)
+{
+ int g_d, a_d, result, i;
+
+ result = inv_setup_dmp_rate(st);
+ if (result)
+ return result;
+
+ g_d = st->eng_info[ENGINE_GYRO].divider - 1;
+ a_d = st->eng_info[ENGINE_ACCEL].divider - 1;
+ result = inv_set_div(st, a_d, g_d);
+ if (result)
+ return result;
+ result = inv_set_lpf(st, st->eng_info[ENGINE_GYRO].running_rate);
+ if (result)
+ return result;
+ // set ADLPF at this point not to change after accel is enabled
+ result = inv_set_accel_config2(st, false);
+ st->batch.pk_size = 0;
+ for (i = 0; i < SENSOR_NUM_MAX; i++) {
+ if (st->sensor[i].on)
+ st->batch.pk_size += st->sensor[i].sample_size;
+ }
+
+ inv_set_batch(st);
+
+ return result;
+}
+
+static int inv_determine_engine(struct inv_mpu_state *st)
+{
+ int i;
+ bool a_en, g_en;
+ int accel_rate, gyro_rate;
+
+ a_en = false;
+ g_en = false;
+ gyro_rate = MPU_INIT_SENSOR_RATE;
+ accel_rate = MPU_INIT_SENSOR_RATE;
+ /* loop the streaming sensors to see which engine needs to be turned on
+ */
+ for (i = 0; i < SENSOR_NUM_MAX; i++) {
+ if (st->sensor[i].on) {
+ a_en |= st->sensor[i].a_en;
+ g_en |= st->sensor[i].g_en;
+ }
+ }
+
+ if (st->chip_config.eis_enable) {
+ g_en = true;
+ st->eis.frame_count = 0;
+ st->eis.fsync_delay = 0;
+ st->eis.gyro_counter = 0;
+ st->eis.voting_count = 0;
+ st->eis.voting_count_sub = 0;
+ gyro_rate = BASE_SAMPLE_RATE;
+ } else {
+ st->eis.eis_triggered = false;
+ st->eis.prev_state = false;
+ }
+
+ accel_rate = st->sensor[SENSOR_ACCEL].rate;
+ gyro_rate = max(gyro_rate, st->sensor[SENSOR_GYRO].rate);
+
+ st->ts_algo.clock_base = ENGINE_ACCEL;
+
+ if (g_en) {
+ /* gyro engine needs to be fastest */
+ if (a_en)
+ gyro_rate = max(gyro_rate, accel_rate);
+ accel_rate = gyro_rate;
+ st->ts_algo.clock_base = ENGINE_GYRO;
+ } else if (a_en) {
+ /* accel engine needs to be fastest if gyro engine is off */
+ gyro_rate = accel_rate;
+ st->ts_algo.clock_base = ENGINE_ACCEL;
+ }
+
+ st->eng_info[ENGINE_GYRO].running_rate = gyro_rate;
+ st->eng_info[ENGINE_ACCEL].running_rate = accel_rate;
+ if ((gyro_rate >= BASE_SAMPLE_RATE) ||
+ (accel_rate >= BASE_SAMPLE_RATE))
+ st->mode_1k_on = true;
+ else
+ st->mode_1k_on = false;
+ /* engine divider for pressure and compass is set later */
+ if (st->chip_config.eis_enable || st->mode_1k_on) {
+ st->eng_info[ENGINE_GYRO].divider = 1;
+ st->eng_info[ENGINE_ACCEL].divider = 1;
+ // need to update rate and div for 1khz mode
+ for ( i = 0 ; i < SENSOR_L_NUM_MAX ; i++ ) {
+ if (st->sensor_l[i].on) {
+ st->sensor_l[i].counter = 0;
+ if (st->sensor_l[i].rate)
+ st->sensor_l[i].div =
+ BASE_SAMPLE_RATE
+ / st->sensor_l[i].rate;
+ else
+ st->sensor_l[i].div = 0xffff;
+ }
+ }
+ } else {
+ st->eng_info[ENGINE_GYRO].divider = BASE_SAMPLE_RATE /
+ st->eng_info[ENGINE_GYRO].running_rate;
+ st->eng_info[ENGINE_ACCEL].divider = BASE_SAMPLE_RATE /
+ st->eng_info[ENGINE_ACCEL].running_rate;
+ }
+
+ for ( i = 0 ; i < SENSOR_L_NUM_MAX ; i++ )
+ st->sensor_l[i].counter = 0;
+
+ inv_calc_engine_dur(&st->eng_info[ENGINE_GYRO]);
+ inv_calc_engine_dur(&st->eng_info[ENGINE_ACCEL]);
+
+ pr_debug("gen: %d aen: %d grate: %d arate: %d\n",
+ g_en, a_en, gyro_rate, accel_rate);
+
+ st->chip_config.gyro_enable = g_en;
+ st->chip_config.accel_enable = a_en;
+
+ return 0;
+}
+
+/*
+ * set_inv_enable() - enable function.
+ */
+int set_inv_enable(struct iio_dev *indio_dev)
+{
+ int result;
+ struct inv_mpu_state *st = iio_priv(indio_dev);
+
+ result = inv_switch_power_in_lp(st, true);
+ if (result)
+ return result;
+ inv_stop_interrupt(st);
+ inv_determine_engine(st);
+ result = inv_set_rate(st);
+ if (result) {
+ pr_err("inv_set_rate error\n");
+ return result;
+ }
+ result = inv_turn_on_engine(st);
+ if (result) {
+ pr_err("inv_turn_on_engine error\n");
+ return result;
+ }
+ result = inv_reset_fifo(st, false);
+ if (result)
+ return result;
+ result = inv_switch_power_in_lp(st, false);
+ if ((!st->chip_config.gyro_enable) &&
+ (!st->chip_config.accel_enable)) {
+ inv_set_power(st, false);
+ return 0;
+ }
+
+ return result;
+}
+/* dummy function for 20608D */
+int inv_enable_pedometer_interrupt(struct inv_mpu_state *st, bool en)
+{
+ return 0;
+}
+int inv_dmp_read(struct inv_mpu_state *st, int off, int size, u8 *buf)
+{
+ return 0;
+}
+int inv_firmware_load(struct inv_mpu_state *st)
+{
+ return 0;
+}
diff --git a/drivers/iio/imu/inv_mpu/inv_mpu_common.c b/drivers/iio/imu/inv_mpu/inv_mpu_common.c
new file mode 100644
index 000000000000..33db03418b92
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu/inv_mpu_common.c
@@ -0,0 +1,988 @@
+/*
+ * Copyright (C) 2012-2017 InvenSense, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#define pr_fmt(fmt) "inv_mpu: " fmt
+#include "inv_mpu_iio.h"
+#ifdef CONFIG_RTC_INTF_ALARM
+#include <linux/android_alarm.h>
+#endif
+#include <linux/export.h>
+
+#ifdef CONFIG_RTC_INTF_ALARM
+s64 get_time_ns(void)
+{
+ struct timespec ts;
+
+ /* get_monotonic_boottime(&ts); */
+
+ /* Workaround for some platform on which monotonic clock and
+ * Android SystemClock has a gap.
+ * Use ktime_to_timespec(alarm_get_elapsed_realtime()) instead of
+ * get_monotonic_boottime() for these platform
+ */
+
+ ts = ktime_to_timespec(alarm_get_elapsed_realtime());
+
+ return timespec_to_ns(&ts);
+}
+#else
+s64 get_time_ns(void)
+{
+ struct timespec ts;
+
+ get_monotonic_boottime(&ts);
+
+ /* Workaround for some platform on which monotonic clock and
+ * Android SystemClock has a gap.
+ * Use ktime_to_timespec(alarm_get_elapsed_realtime()) instead of
+ * get_monotonic_boottime() for these platform
+ */
+ return timespec_to_ns(&ts);
+}
+
+#endif
+
+#ifdef ACCEL_BIAS_TEST
+int inv_get_3axis_average(s16 src[], s16 dst[], s16 reset)
+{
+#define BUFFER_SIZE 200
+ static s16 buffer[BUFFER_SIZE][3];
+ static s16 current_position = 0;
+ static s16 ready = 0;
+ int sum[3]= {0,};
+ int i;
+
+ if(reset){
+ current_position = 0;
+ ready = 0;
+ }
+ buffer[current_position][0] = src[0];
+ buffer[current_position][1] = src[1];
+ buffer[current_position][2] = src[2];
+ current_position++;
+ if(current_position == BUFFER_SIZE){
+ ready = 1;
+ current_position = 0;
+ }
+ if(ready){
+ for(i = 0 ; i < BUFFER_SIZE ; i++){
+ sum[0] += buffer[i][0];
+ sum[1] += buffer[i][1];
+ sum[2] += buffer[i][2];
+ }
+ dst[0] = sum[0]/BUFFER_SIZE;
+ dst[1] = sum[1]/BUFFER_SIZE;
+ dst[2] = sum[2]/BUFFER_SIZE;
+ return 1;
+ }
+ return 0;
+}
+#endif
+
+int inv_q30_mult(int a, int b)
+{
+#define DMP_MULTI_SHIFT 30
+ u64 temp;
+ int result;
+
+ temp = ((u64)a) * b;
+ result = (int)(temp >> DMP_MULTI_SHIFT);
+
+ return result;
+}
+#if defined(CONFIG_INV_MPU_IIO_ICM20648) || \
+ defined(CONFIG_INV_MPU_IIO_ICM20690)
+/* inv_read_secondary(): set secondary registers for reading.
+ The chip must be set as bank 3 before calling.
+ */
+int inv_read_secondary(struct inv_mpu_state *st, int ind, int addr,
+ int reg, int len)
+{
+ int result;
+
+ result = inv_plat_single_write(st, st->slv_reg[ind].addr,
+ INV_MPU_BIT_I2C_READ | addr);
+ if (result)
+ return result;
+ result = inv_plat_single_write(st, st->slv_reg[ind].reg, reg);
+ if (result)
+ return result;
+ result = inv_plat_single_write(st, st->slv_reg[ind].ctrl,
+ INV_MPU_BIT_SLV_EN | len);
+
+ return result;
+}
+
+int inv_execute_read_secondary(struct inv_mpu_state *st, int ind, int addr,
+ int reg, int len, u8 *d)
+{
+ int result;
+
+ inv_set_bank(st, BANK_SEL_3);
+ result = inv_read_secondary(st, ind, addr, reg, len);
+ if (result)
+ return result;
+ inv_set_bank(st, BANK_SEL_0);
+ result = inv_plat_single_write(st, REG_USER_CTRL, st->i2c_dis |
+ BIT_I2C_MST_EN);
+ msleep(SECONDARY_INIT_WAIT);
+ result = inv_plat_single_write(st, REG_USER_CTRL, st->i2c_dis);
+ if (result)
+ return result;
+ result = inv_plat_read(st, REG_EXT_SLV_SENS_DATA_00, len, d);
+
+ return result;
+}
+
+/* inv_write_secondary(): set secondary registers for writing.
+ The chip must be set as bank 3 before calling.
+ */
+int inv_write_secondary(struct inv_mpu_state *st, int ind, int addr,
+ int reg, int v)
+{
+ int result;
+
+ result = inv_plat_single_write(st, st->slv_reg[ind].addr, addr);
+ if (result)
+ return result;
+ result = inv_plat_single_write(st, st->slv_reg[ind].reg, reg);
+ if (result)
+ return result;
+ result = inv_plat_single_write(st, st->slv_reg[ind].ctrl,
+ INV_MPU_BIT_SLV_EN | 1);
+
+ result = inv_plat_single_write(st, st->slv_reg[ind].d0, v);
+
+ return result;
+}
+
+int inv_execute_write_secondary(struct inv_mpu_state *st, int ind, int addr,
+ int reg, int v)
+{
+ int result;
+
+ inv_set_bank(st, BANK_SEL_3);
+ result = inv_write_secondary(st, ind, addr, reg, v);
+ if (result)
+ return result;
+ inv_set_bank(st, BANK_SEL_0);
+ result = inv_plat_single_write(st, REG_USER_CTRL, st->i2c_dis |
+ BIT_I2C_MST_EN);
+ msleep(SECONDARY_INIT_WAIT);
+ result = inv_plat_single_write(st, REG_USER_CTRL, st->i2c_dis);
+
+ return result;
+}
+
+int inv_set_bank(struct inv_mpu_state *st, u8 bank)
+{
+#ifdef CONFIG_INV_MPU_IIO_ICM20648
+ int r;
+
+ r = inv_plat_single_write(st, REG_BANK_SEL, bank);
+
+ return r;
+#else
+ return 0;
+#endif
+}
+#endif
+
+#ifdef CONFIG_INV_MPU_IIO_ICM20648
+/**
+ * inv_write_cntl() - Write control word to designated address.
+ * @st: Device driver instance.
+ * @wd: control word.
+ * @en: enable/disable.
+ * @cntl: control address to be written.
+ */
+int inv_write_cntl(struct inv_mpu_state *st, u16 wd, bool en, int cntl)
+{
+ int result;
+ u8 reg[2], d_out[2];
+
+ result = mem_r(cntl, 2, d_out);
+ if (result)
+ return result;
+ reg[0] = ((wd >> 8) & 0xff);
+ reg[1] = (wd & 0xff);
+ if (!en) {
+ d_out[0] &= ~reg[0];
+ d_out[1] &= ~reg[1];
+ } else {
+ d_out[0] |= reg[0];
+ d_out[1] |= reg[1];
+ }
+ result = mem_w(cntl, 2, d_out);
+
+ return result;
+}
+#endif
+
+int inv_set_power(struct inv_mpu_state *st, bool power_on)
+{
+ u8 d;
+ int r;
+
+ if ((!power_on) == st->chip_config.is_asleep)
+ return 0;
+
+ d = BIT_CLK_PLL;
+ if (!power_on)
+ d |= BIT_SLEEP;
+
+ r = inv_plat_single_write(st, REG_PWR_MGMT_1, d);
+ if (r)
+ return r;
+
+ if (power_on)
+ usleep_range(REG_UP_TIME_USEC, REG_UP_TIME_USEC);
+
+ st->chip_config.is_asleep = !power_on;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(inv_set_power);
+
+int inv_stop_interrupt(struct inv_mpu_state *st)
+{
+ int res;
+#if defined(CONFIG_INV_MPU_IIO_ICM20648)
+ /* disable_irq_wake alone should work already. However,
+ it might need system configuration change. From driver side,
+ we will disable IRQ altogether for non-wakeup sensors. */
+ res = inv_plat_read(st, REG_INT_ENABLE, 1, &st->int_en);
+ if (res)
+ return res;
+ res = inv_plat_read(st, REG_INT_ENABLE_2, 1, &st->int_en_2);
+ if (res)
+ return res;
+ res = inv_plat_single_write(st, REG_INT_ENABLE, 0);
+ if (res)
+ return res;
+ res = inv_plat_single_write(st, REG_INT_ENABLE_2, 0);
+ if (res)
+ return res;
+#endif
+#if defined(CONFIG_INV_MPU_IIO_ICM20608D)
+ res = inv_plat_read(st, REG_INT_ENABLE, 1, &st->int_en);
+ if (res)
+ return res;
+ res = inv_plat_single_write(st, REG_INT_ENABLE, 0);
+ if (res)
+ return res;
+#endif
+#if defined(CONFIG_INV_MPU_IIO_ICM20602) \
+ || defined(CONFIG_INV_MPU_IIO_ICM20690) \
+ || defined(CONFIG_INV_MPU_IIO_IAM20680)
+ res = inv_plat_read(st, REG_INT_ENABLE, 1, &st->int_en);
+ if (res)
+ return res;
+ res = inv_plat_single_write(st, REG_INT_ENABLE, 0);
+ if (res)
+ return res;
+#endif
+ return 0;
+}
+int inv_reenable_interrupt(struct inv_mpu_state *st)
+{
+ int res = 0;
+#if defined(CONFIG_INV_MPU_IIO_ICM20648)
+ res = inv_plat_single_write(st, REG_INT_ENABLE, st->int_en);
+ if (res)
+ return res;
+ res = inv_plat_single_write(st, REG_INT_ENABLE_2, st->int_en_2);
+ if (res)
+ return res;
+#elif defined(CONFIG_INV_MPU_IIO_ICM20608D)
+ res = inv_plat_single_write(st, REG_INT_ENABLE, st->int_en);
+ if (res)
+ return res;
+#endif
+#if defined(CONFIG_INV_MPU_IIO_ICM20602) \
+ || defined(CONFIG_INV_MPU_IIO_ICM20690) \
+ || defined(CONFIG_INV_MPU_IIO_IAM20680)
+ res = inv_plat_single_write(st, REG_INT_ENABLE, st->int_en);
+ if (res)
+ return res;
+#endif
+ return res;
+}
+
+static int inv_lp_en_off_mode(struct inv_mpu_state *st, bool on)
+{
+ int r;
+
+ if (!st->chip_config.is_asleep)
+ return 0;
+
+ r = inv_plat_single_write(st, REG_PWR_MGMT_1, BIT_CLK_PLL);
+ st->chip_config.is_asleep = 0;
+
+ return r;
+}
+#ifdef CONFIG_INV_MPU_IIO_ICM20648
+static int inv_lp_en_on_mode(struct inv_mpu_state *st, bool on)
+{
+ int r = 0;
+ u8 w;
+
+ if ((!st->chip_config.is_asleep) &&
+ ((!on) == st->chip_config.lp_en_set))
+ return 0;
+
+ w = BIT_CLK_PLL;
+ if ((!on) && (!st->eis.eis_triggered))
+ w |= BIT_LP_EN;
+ r = inv_plat_single_write(st, REG_PWR_MGMT_1, w);
+ st->chip_config.is_asleep = 0;
+ st->chip_config.lp_en_set = (!on);
+ return r;
+}
+#endif
+#if defined(CONFIG_INV_MPU_IIO_ICM20602) \
+ || defined(CONFIG_INV_MPU_IIO_ICM20690) \
+ || defined(CONFIG_INV_MPU_IIO_IAM20680)
+int inv_set_accel_config2(struct inv_mpu_state *st, bool cycle_mode)
+{
+ int cycle_freq[] = {275, 192, 111, 59};
+ int cont_freq[] = {219, 219, 99, 45, 22, 11, 6};
+ int i, r, rate;
+ u8 v;
+
+ v = 0;
+#ifdef CONFIG_INV_MPU_IIO_ICM20690
+ v |= BIT_FIFO_SIZE_1K;
+#endif
+ if (cycle_mode) {
+ rate = (st->eng_info[ENGINE_ACCEL].running_rate << 1);
+ i = ARRAY_SIZE(cycle_freq) - 1;
+ while (i > 0) {
+ if (rate < cycle_freq[i]) {
+ break;
+ }
+ i--;
+ }
+ r = inv_plat_single_write(st, REG_ACCEL_CONFIG_2, v |
+ (i << 4) | 7);
+ if (r)
+ return r;
+ } else {
+ rate = (st->eng_info[ENGINE_ACCEL].running_rate >> 1);
+ for (i = 1; i < ARRAY_SIZE(cont_freq); i++) {
+ if (rate >= cont_freq[i])
+ break;
+ }
+ if (i > 6)
+ i = 6;
+ r = inv_plat_single_write(st, REG_ACCEL_CONFIG_2, v | i);
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+static int inv_lp_en_on_mode(struct inv_mpu_state *st, bool on)
+{
+ int r = 0;
+ u8 w;
+ bool cond_check;
+
+ if ((!st->chip_config.is_asleep) &&
+ ((!on) == st->chip_config.lp_en_set))
+ return 0;
+ cond_check = (!on) && st->cycle_on;
+
+ w = BIT_CLK_PLL;
+ r = inv_plat_single_write(st, REG_PWR_MGMT_1, w);
+ if (cond_check) {
+ w |= BIT_LP_EN;
+ inv_set_accel_config2(st, true);
+ st->chip_config.lp_en_set = true;
+ r = inv_plat_single_write(st, REG_PWR_MGMT_1, w);
+ } else {
+ inv_set_accel_config2(st, false);
+#ifdef CONFIG_INV_MPU_IIO_ICM20690
+ r = inv_plat_single_write(st, REG_PWR_MGMT_1, w | BIT_SLEEP);
+ if (r)
+ return r;
+#endif
+ st->chip_config.lp_en_set = false;
+ r = inv_plat_single_write(st, REG_PWR_MGMT_1, w);
+ msleep(10);
+ }
+ st->chip_config.is_asleep = 0;
+
+ return r;
+}
+#endif
+#ifdef CONFIG_INV_MPU_IIO_ICM20608D
+static int inv_set_accel_config2(struct inv_mpu_state *st)
+{
+ int cont_freq[] = {219, 219, 99, 45, 22, 11, 6};
+ int dec2_cfg = 0;
+ int i, r, rate;
+
+ rate = (st->eng_info[ENGINE_ACCEL].running_rate << 1);
+ i = 0;
+ if (!st->chip_config.eis_enable){
+ while ((rate < cont_freq[i]) && (i < ARRAY_SIZE(cont_freq) - 1))
+ i++;
+ dec2_cfg = 2<<4; //4x
+ }
+ r = inv_plat_single_write(st, REG_ACCEL_CONFIG_2, i | dec2_cfg);
+ if (r)
+ return r;
+ return 0;
+}
+static int inv_lp_en_on_mode(struct inv_mpu_state *st, bool on)
+{
+ int r = 0;
+ u8 w;
+
+ w = BIT_CLK_PLL;
+ if ((!on) && (!st->chip_config.eis_enable))
+ w |= BIT_LP_EN;
+ inv_set_accel_config2(st);
+ r = inv_plat_single_write(st, REG_PWR_MGMT_1, w);
+
+ return r;
+}
+#endif
+int inv_switch_power_in_lp(struct inv_mpu_state *st, bool on)
+{
+ int r;
+
+ if (st->chip_config.lp_en_mode_off)
+ r = inv_lp_en_off_mode(st, on);
+ else
+ r = inv_lp_en_on_mode(st, on);
+
+ return r;
+}
+EXPORT_SYMBOL_GPL(inv_switch_power_in_lp);
+
+int write_be16_to_mem(struct inv_mpu_state *st, u16 data, int addr)
+{
+ u8 d[2];
+
+ d[0] = (data >> 8) & 0xff;
+ d[1] = data & 0xff;
+
+ return mem_w(addr, sizeof(d), d);
+}
+
+int write_be32_to_mem(struct inv_mpu_state *st, u32 data, int addr)
+{
+ cpu_to_be32s(&data);
+ return mem_w(addr, sizeof(data), (u8 *)&data);
+}
+
+int read_be16_from_mem(struct inv_mpu_state *st, u16 *o, int addr)
+{
+ int result;
+ u8 d[2];
+
+ result = mem_r(addr, 2, (u8 *) &d);
+ *o = d[0] << 8 | d[1];
+
+ return result;
+}
+
+int read_be32_from_mem(struct inv_mpu_state *st, u32 *o, int addr)
+{
+ int result;
+ u32 d = 0;
+
+ result = mem_r(addr, 4, (u8 *) &d);
+ *o = be32_to_cpup((__be32 *)(&d));
+
+ return result;
+}
+
+int be32_to_int(u8 *d)
+{
+ return (d[0] << 24) | (d[1] << 16) | (d[2] << 8) | d[3];
+}
+
+u32 inv_get_cntr_diff(u32 curr_counter, u32 prev)
+{
+ u32 diff;
+
+ if (curr_counter > prev)
+ diff = curr_counter - prev;
+ else
+ diff = 0xffffffff - prev + curr_counter + 1;
+
+ return diff;
+}
+
+int inv_write_2bytes(struct inv_mpu_state *st, int addr, int data)
+{
+ u8 d[2];
+
+ if (data < 0 || data > USHRT_MAX)
+ return -EINVAL;
+
+ d[0] = (u8) ((data >> 8) & 0xff);
+ d[1] = (u8) (data & 0xff);
+
+ return mem_w(addr, ARRAY_SIZE(d), d);
+}
+
+
+
+int inv_process_eis(struct inv_mpu_state *st, u16 delay)
+{
+ int tmp1, tmp2, tmp3;
+
+ switch (st->eis.voting_state) {
+ case 0:
+ st->eis.gyro_counter_s[0] = st->eis.gyro_counter;
+ st->eis.fsync_delay_s[0] = delay - st->eis.fsync_delay;
+ st->eis.voting_count = 1;
+ st->eis.voting_count_sub = 0;
+ st->eis.voting_state = 1;
+ break;
+ case 1:
+ if (abs(st->eis.gyro_counter_s[0] -
+ st->eis.gyro_counter) <= 1) {
+ st->eis.voting_count++;
+ } else {
+ st->eis.gyro_counter_s[2] = st->eis.gyro_counter;
+ st->eis.voting_count_sub++;
+ st->eis.voting_state = 2;
+ }
+ if (st->eis.voting_count > 5)
+ st->eis.voting_state = 3;
+ break;
+ case 2:
+ tmp1 = abs(st->eis.gyro_counter_s[0] - st->eis.gyro_counter);
+ tmp2 = abs(st->eis.gyro_counter_s[2] - st->eis.gyro_counter);
+
+ if ((tmp1 < tmp2) && (tmp1 <= 1))
+ st->eis.voting_count++;
+ else
+ st->eis.voting_count_sub++;
+ if (st->eis.voting_count > 5) {
+ st->eis.voting_state = 3;
+ st->eis.voting_count = 0;
+ st->eis.voting_count_sub = 0;
+ }
+
+ if (st->eis.voting_count_sub > 5) {
+ st->eis.gyro_counter_s[0] = st->eis.gyro_counter;
+ st->eis.fsync_delay_s[0] = delay - st->eis.fsync_delay;
+ st->eis.voting_state = 1;
+ st->eis.voting_count = 1;
+ st->eis.voting_count_sub = 0;
+ }
+ break;
+ case 3:
+ tmp1 = abs(st->eis.gyro_counter_s[0] - st->eis.gyro_counter);
+ if (tmp1 == 1) {
+ st->eis.gyro_counter_s[1] = st->eis.gyro_counter;
+ st->eis.fsync_delay_s[1] = delay - st->eis.fsync_delay;
+ st->eis.voting_state = 4;
+ st->eis.voting_count_sub = 1;
+ st->eis.voting_count = 1;
+ }
+ break;
+ case 4:
+ if (st->eis.gyro_counter == st->eis.gyro_counter_s[0]) {
+ tmp1 = delay - st->eis.fsync_delay;
+ tmp2 = abs(tmp1 - st->eis.fsync_delay_s[0]);
+ if (tmp2 < 3) {
+ st->eis.voting_count++;
+ } else {
+ st->eis.fsync_delay_s[2] = tmp1;
+ st->eis.voting_count_sub = 1;
+ st->eis.voting_state = 5;
+ }
+ if (st->eis.voting_count > 5) {
+ st->eis.voting_count = 1;
+ st->eis.voting_state = 6;
+ }
+ }
+ break;
+ case 5:
+ if (st->eis.gyro_counter == st->eis.gyro_counter_s[0]) {
+ tmp1 = delay - st->eis.fsync_delay;
+
+ tmp2 = abs(tmp1 - st->eis.fsync_delay_s[0]);
+ tmp3 = abs(tmp1 - st->eis.fsync_delay_s[2]);
+ if ((tmp2 < tmp3) && (tmp2 < 3))
+ st->eis.voting_count++;
+ else
+ st->eis.voting_count_sub++;
+ if ((st->eis.voting_count > 5) &&
+ (st->eis.voting_count_sub
+ < st->eis.voting_count)) {
+ st->eis.voting_state = 6;
+ st->eis.voting_count = 1;
+ } else if (st->eis.voting_count_sub > 5) {
+ st->eis.fsync_delay_s[0] = tmp1;
+ st->eis.voting_state = 4;
+ st->eis.voting_count = 1;
+ }
+
+ }
+ break;
+ case 6:
+ if (st->eis.gyro_counter == st->eis.gyro_counter_s[1]) {
+ tmp1 = delay - st->eis.fsync_delay;
+ tmp2 = abs(tmp1 - st->eis.fsync_delay_s[1]);
+ if (tmp2 < 3) {
+ st->eis.voting_count++;
+ } else {
+ st->eis.fsync_delay_s[2] = tmp1;
+ st->eis.voting_count_sub = 1;
+ st->eis.voting_count = 1;
+ st->eis.voting_state = 7;
+ }
+ if (st->eis.voting_count > 5)
+ st->eis.voting_state = 8;
+ }
+ break;
+ case 7:
+ if (st->eis.gyro_counter == st->eis.gyro_counter_s[1]) {
+ tmp1 = delay - st->eis.fsync_delay;
+
+ tmp2 = abs(tmp1 - st->eis.fsync_delay_s[1]);
+ tmp3 = abs(tmp1 - st->eis.fsync_delay_s[2]);
+ if ((tmp2 < tmp3) && (tmp2 < 3))
+ st->eis.voting_count++;
+ else
+ st->eis.voting_count_sub++;
+ if ((st->eis.voting_count > 5) &&
+ (st->eis.voting_count_sub
+ < st->eis.voting_count)) {
+ st->eis.voting_state = 8;
+ } else if (st->eis.voting_count_sub > 5) {
+ st->eis.fsync_delay_s[1] = tmp1;
+ st->eis.voting_state = 6;
+ st->eis.voting_count = 1;
+ }
+
+ }
+ break;
+ default:
+ break;
+ }
+
+ pr_debug("de= %d gc= %d\n", delay, st->eis.gyro_counter);
+ st->eis.fsync_delay = delay;
+ st->eis.gyro_counter = 0;
+
+ pr_debug("state=%d g1= %d d1= %d g2= %d d2= %d\n",
+ st->eis.voting_state,
+ st->eis.gyro_counter_s[0],
+ st->eis.fsync_delay_s[0],
+ st->eis.gyro_counter_s[1],
+ st->eis.fsync_delay_s[1]);
+
+ return 0;
+}
+
+int inv_rate_convert(struct inv_mpu_state *st, int ind, int data)
+{
+ int t, out, out1, out2;
+ int base_freq;
+
+ if (data <= MPU_DEFAULT_DMP_FREQ)
+ base_freq = MPU_DEFAULT_DMP_FREQ;
+ else
+ base_freq = BASE_SAMPLE_RATE;
+
+ t = base_freq / data;
+ if (!t)
+ t = 1;
+ out1 = base_freq / (t + 1);
+ out2 = base_freq / t;
+ if ((data - out1) * INV_ODR_BUFFER_MULTI < data)
+ out = out1;
+ else
+ out = out2;
+
+ return out;
+}
+
+static void inv_check_wake_non_wake(struct inv_mpu_state *st,
+ enum SENSOR_L wake, enum SENSOR_L non_wake)
+{
+ int tmp_rate;
+
+ if (!st->sensor_l[wake].on && !st->sensor_l[non_wake].on)
+ return;
+
+ tmp_rate = MPU_INIT_SENSOR_RATE;
+ if (st->sensor_l[wake].on)
+ tmp_rate = st->sensor_l[wake].rate;
+ if (st->sensor_l[non_wake].on)
+ tmp_rate = max(tmp_rate, st->sensor_l[non_wake].rate);
+ st->sensor_l[wake].rate = tmp_rate;
+ st->sensor_l[non_wake].rate = tmp_rate;
+}
+
+static void inv_check_wake_non_wake_divider(struct inv_mpu_state *st,
+ enum SENSOR_L wake, enum SENSOR_L non_wake)
+{
+ if (st->sensor_l[wake].on && st->sensor_l[non_wake].on)
+ st->sensor_l[non_wake].div = 0xffff;
+
+}
+
+#if defined(CONFIG_INV_MPU_IIO_ICM20602) \
+ || defined(CONFIG_INV_MPU_IIO_ICM20690) \
+ || defined(CONFIG_INV_MPU_IIO_IAM20680)
+int inv_check_sensor_on(struct inv_mpu_state *st)
+{
+ int i, max_rate;
+ enum SENSOR_L wake[] = {SENSOR_L_GYRO_WAKE, SENSOR_L_ACCEL_WAKE,
+ SENSOR_L_MAG_WAKE};
+ enum SENSOR_L non_wake[] = {SENSOR_L_GYRO, SENSOR_L_ACCEL,
+ SENSOR_L_MAG};
+
+ st->sensor_l[SENSOR_L_GESTURE_ACCEL].rate = GESTURE_ACCEL_RATE;
+ for (i = 0; i < SENSOR_NUM_MAX; i++)
+ st->sensor[i].on = false;
+ for (i = 0; i < SENSOR_NUM_MAX; i++)
+ st->sensor[i].rate = MPU_INIT_SENSOR_RATE;
+
+ if ((st->step_detector_l_on
+ || st->step_detector_wake_l_on
+ || st->step_counter_l_on
+ || st->step_counter_wake_l_on
+ || st->chip_config.pick_up_enable
+ || st->chip_config.tilt_enable)
+ && (!st->sensor_l[SENSOR_L_ACCEL].on)
+ && (!st->sensor_l[SENSOR_L_ACCEL_WAKE].on))
+ st->sensor_l[SENSOR_L_GESTURE_ACCEL].on = true;
+ else
+ st->sensor_l[SENSOR_L_GESTURE_ACCEL].on = false;
+
+
+ st->chip_config.wake_on = false;
+ for (i = 0; i < SENSOR_L_NUM_MAX; i++) {
+ if (st->sensor_l[i].on && st->sensor_l[i].rate) {
+ st->sensor[st->sensor_l[i].base].on = true;
+ st->chip_config.wake_on |= st->sensor_l[i].wake_on;
+ }
+ }
+ if (st->sensor_l[SENSOR_L_GESTURE_ACCEL].on &&
+ (!st->sensor[SENSOR_GYRO].on) &&
+ (!st->sensor[SENSOR_COMPASS].on))
+ st->gesture_only_on = true;
+ else
+ st->gesture_only_on = false;
+
+ for (i = 0; i < SENSOR_L_NUM_MAX; i++) {
+ if (st->sensor_l[i].on) {
+ st->sensor[st->sensor_l[i].base].rate =
+ max(st->sensor[st->sensor_l[i].base].rate,
+ st->sensor_l[i].rate);
+ }
+ }
+ max_rate = MPU_INIT_SENSOR_RATE;
+ if (st->chip_config.eis_enable) {
+ max_rate = ESI_GYRO_RATE;
+ st->sensor_l[SENSOR_L_EIS_GYRO].rate = ESI_GYRO_RATE;
+ }
+
+ for (i = 0; i < SENSOR_NUM_MAX; i++) {
+ if (st->sensor[i].on) {
+ max_rate = max(max_rate, st->sensor[i].rate);
+ }
+ }
+ for (i = 0; i < SENSOR_NUM_MAX; i++) {
+ if (st->sensor[i].on) {
+ st->sensor[i].rate = max_rate;
+ }
+ }
+ for (i = 0; i < ARRAY_SIZE(wake); i++)
+ inv_check_wake_non_wake(st, wake[i], non_wake[i]);
+
+ for (i = 0; i < SENSOR_L_NUM_MAX; i++) {
+ if (st->sensor_l[i].on) {
+ if (st->sensor_l[i].rate)
+ st->sensor_l[i].div =
+ st->sensor[st->sensor_l[i].base].rate
+ / st->sensor_l[i].rate;
+ else
+ st->sensor_l[i].div = 0xffff;
+ pr_debug("sensor= %d, div= %d\n",
+ i, st->sensor_l[i].div);
+ }
+ }
+ for (i = 0; i < ARRAY_SIZE(wake); i++)
+ inv_check_wake_non_wake_divider(st, wake[i], non_wake[i]);
+
+ if (st->step_detector_wake_l_on ||
+ st->step_counter_wake_l_on ||
+ st->chip_config.pick_up_enable ||
+ st->chip_config.tilt_enable)
+ st->chip_config.wake_on = true;
+
+ return 0;
+}
+#else
+static void inv_do_check_sensor_on(struct inv_mpu_state *st,
+ enum SENSOR_L *wake,
+ enum SENSOR_L *non_wake, int sensor_size)
+{
+ int i;
+
+ for (i = 0; i < SENSOR_NUM_MAX; i++)
+ st->sensor[i].on = false;
+
+ for (i = 0; i < SENSOR_NUM_MAX; i++)
+ st->sensor[i].rate = MPU_INIT_SENSOR_RATE;
+
+ st->chip_config.wake_on = false;
+ for (i = 0; i < SENSOR_L_NUM_MAX; i++) {
+ if (st->sensor_l[i].on && st->sensor_l[i].rate) {
+ st->sensor[st->sensor_l[i].base].on = true;
+ st->chip_config.wake_on |= st->sensor_l[i].wake_on;
+ }
+ }
+
+ for (i = 0; i < SENSOR_L_NUM_MAX; i++) {
+ if (st->sensor_l[i].on) {
+ st->sensor[st->sensor_l[i].base].rate =
+ max(st->sensor[st->sensor_l[i].base].rate,
+ st->sensor_l[i].rate);
+ }
+ }
+ for (i = 0; i < sensor_size; i++)
+ inv_check_wake_non_wake(st, wake[i], non_wake[i]);
+
+ for (i = 0; i < SENSOR_L_NUM_MAX; i++) {
+ if (st->sensor_l[i].on) {
+ if (st->sensor_l[i].rate)
+ st->sensor_l[i].div =
+ st->sensor[st->sensor_l[i].base].rate
+ / st->sensor_l[i].rate;
+ else
+ st->sensor_l[i].div = 0xffff;
+ }
+ }
+ for (i = 0; i < sensor_size; i++)
+ inv_check_wake_non_wake_divider(st, wake[i], non_wake[i]);
+
+ if (st->step_detector_wake_l_on ||
+ st->step_counter_wake_l_on ||
+ st->chip_config.pick_up_enable ||
+ st->chip_config.tilt_enable ||
+ st->smd.on)
+ st->chip_config.wake_on = true;
+
+}
+#endif
+
+#if defined(CONFIG_INV_MPU_IIO_ICM20608D)
+int inv_check_sensor_on(struct inv_mpu_state *st)
+{
+ enum SENSOR_L wake[] = {SENSOR_L_GYRO_WAKE, SENSOR_L_ACCEL_WAKE,
+ SENSOR_L_SIXQ_WAKE, SENSOR_L_PEDQ_WAKE,
+ SENSOR_L_GYRO_CAL_WAKE};
+ enum SENSOR_L non_wake[] = {SENSOR_L_GYRO, SENSOR_L_ACCEL,
+ SENSOR_L_SIXQ, SENSOR_L_PEDQ,
+ SENSOR_L_GYRO_CAL};
+
+ inv_do_check_sensor_on(st, wake, non_wake, ARRAY_SIZE(wake));
+
+ return 0;
+}
+#endif
+
+#if defined(CONFIG_INV_MPU_IIO_ICM20648)
+int inv_check_sensor_on(struct inv_mpu_state *st)
+{
+ enum SENSOR_L wake[] = {SENSOR_L_GYRO_WAKE, SENSOR_L_ACCEL_WAKE,
+ SENSOR_L_MAG_WAKE, SENSOR_L_ALS_WAKE,
+ SENSOR_L_SIXQ_WAKE, SENSOR_L_PEDQ_WAKE,
+ SENSOR_L_NINEQ_WAKE, SENSOR_L_GEOMAG_WAKE,
+ SENSOR_L_PRESSURE_WAKE,
+ SENSOR_L_GYRO_CAL_WAKE,
+ SENSOR_L_MAG_CAL_WAKE};
+ enum SENSOR_L non_wake[] = {SENSOR_L_GYRO, SENSOR_L_ACCEL,
+ SENSOR_L_MAG, SENSOR_L_ALS,
+ SENSOR_L_SIXQ, SENSOR_L_PEDQ,
+ SENSOR_L_NINEQ, SENSOR_L_GEOMAG,
+ SENSOR_L_PRESSURE,
+ SENSOR_L_GYRO_CAL,
+ SENSOR_L_MAG_CAL};
+
+ inv_do_check_sensor_on(st, wake, non_wake, ARRAY_SIZE(wake));
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+int inv_mpu_suspend(struct iio_dev *indio_dev)
+{
+ struct inv_mpu_state *st = iio_priv(indio_dev);
+
+ /* add code according to different request Start */
+ dev_info(st->dev, "%s suspend\n", st->hw->name);
+ mutex_lock(&indio_dev->mlock);
+
+ st->resume_state = false;
+ if (st->chip_config.wake_on) {
+ enable_irq_wake(st->irq);
+ } else {
+ inv_stop_interrupt(st);
+ }
+
+ mutex_unlock(&indio_dev->mlock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(inv_mpu_suspend);
+
+/*
+ * inv_mpu_complete(): complete method for this driver.
+ * This method can be modified according to the request of different
+ * customers. It basically undo everything suspend is doing
+ * and recover the chip to what it was before suspend. We use complete to
+ * make sure that alarm clock resume is finished. If we use resume, the
+ * alarm clock may not resume yet and get incorrect clock reading.
+ */
+void inv_mpu_complete(struct iio_dev *indio_dev)
+{
+ struct inv_mpu_state *st = iio_priv(indio_dev);
+
+ dev_info(st->dev, "%s resume\n", st->hw->name);
+ if (st->resume_state)
+ return;
+
+ mutex_lock(&indio_dev->mlock);
+
+ if (!st->chip_config.wake_on) {
+ inv_reenable_interrupt(st);
+ } else {
+ disable_irq_wake(st->irq);
+ }
+ /* resume state is used to synchronize read_fifo such that it won't
+ proceed unless resume is finished. */
+ st->resume_state = true;
+ /* resume flag is indicating that current clock reading is from resume,
+ it has up to 1 second drift and should do proper processing */
+ st->ts_algo.resume_flag = true;
+ mutex_unlock(&indio_dev->mlock);
+ wake_up_interruptible(&st->wait_queue);
+
+ return;
+}
+EXPORT_SYMBOL_GPL(inv_mpu_complete);
+#endif
diff --git a/drivers/iio/imu/inv_mpu/inv_mpu_dts.c b/drivers/iio/imu/inv_mpu/inv_mpu_dts.c
new file mode 100644
index 000000000000..0b8b3fc29b0a
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu/inv_mpu_dts.c
@@ -0,0 +1,343 @@
+/*
+ * Copyright (C) 2012-2017 InvenSense, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/of_gpio.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/export.h>
+
+#include <linux/iio/imu/mpu.h>
+#include "inv_mpu_dts.h"
+#include "inv_mpu_iio.h"
+
+#ifdef CONFIG_OF
+
+static int inv_mpu_power_on(struct mpu_platform_data *pdata)
+{
+ int err;
+
+ if (!IS_ERR(pdata->vdd_ana)) {
+ err = regulator_enable(pdata->vdd_ana);
+ if (err)
+ return err;
+ }
+ if (!IS_ERR(pdata->vdd_i2c)) {
+ err = regulator_enable(pdata->vdd_i2c);
+ if (err)
+ goto error_disable_vdd_ana;
+ }
+
+ return 0;
+
+error_disable_vdd_ana:
+ regulator_disable(pdata->vdd_ana);
+ return err;
+}
+
+static int inv_mpu_power_off(struct mpu_platform_data *pdata)
+{
+ if (!IS_ERR(pdata->vdd_ana))
+ regulator_disable(pdata->vdd_ana);
+ if (!IS_ERR(pdata->vdd_i2c))
+ regulator_disable(pdata->vdd_i2c);
+
+ return 0;
+}
+
+static int inv_parse_orientation_matrix(struct device *dev, s8 *orient)
+{
+ int rc, i;
+ struct device_node *np = dev->of_node;
+ u32 temp_val, temp_val2;
+
+ for (i = 0; i < 9; i++)
+ orient[i] = 0;
+
+ /* parsing axis x orientation matrix */
+ rc = of_property_read_u32(np, "axis_map_x", &temp_val);
+ if (rc) {
+ dev_err(dev, "Unable to read axis_map_x\n");
+ return rc;
+ }
+ rc = of_property_read_u32(np, "negate_x", &temp_val2);
+ if (rc) {
+ dev_err(dev, "Unable to read negate_x\n");
+ return rc;
+ }
+ if (temp_val2)
+ orient[temp_val] = -1;
+ else
+ orient[temp_val] = 1;
+
+ /* parsing axis y orientation matrix */
+ rc = of_property_read_u32(np, "axis_map_y", &temp_val);
+ if (rc) {
+ dev_err(dev, "Unable to read axis_map_y\n");
+ return rc;
+ }
+ rc = of_property_read_u32(np, "negate_y", &temp_val2);
+ if (rc) {
+ dev_err(dev, "Unable to read negate_y\n");
+ return rc;
+ }
+ if (temp_val2)
+ orient[3 + temp_val] = -1;
+ else
+ orient[3 + temp_val] = 1;
+
+ /* parsing axis z orientation matrix */
+ rc = of_property_read_u32(np, "axis_map_z", &temp_val);
+ if (rc) {
+ dev_err(dev, "Unable to read axis_map_z\n");
+ return rc;
+ }
+ rc = of_property_read_u32(np, "negate_z", &temp_val2);
+ if (rc) {
+ dev_err(dev, "Unable to read negate_z\n");
+ return rc;
+ }
+ if (temp_val2)
+ orient[6 + temp_val] = -1;
+ else
+ orient[6 + temp_val] = 1;
+
+ return 0;
+}
+
+static int inv_parse_secondary_orientation_matrix(struct device *dev,
+ s8 *orient)
+{
+ int rc, i;
+ struct device_node *np = dev->of_node;
+ u32 temp_val, temp_val2;
+
+ for (i = 0; i < 9; i++)
+ orient[i] = 0;
+
+ /* parsing axis x orientation matrix */
+ rc = of_property_read_u32(np, "inven,secondary_axis_map_x", &temp_val);
+ if (rc) {
+ dev_err(dev, "Unable to read secondary axis_map_x\n");
+ return rc;
+ }
+ rc = of_property_read_u32(np, "inven,secondary_negate_x", &temp_val2);
+ if (rc) {
+ dev_err(dev, "Unable to read secondary negate_x\n");
+ return rc;
+ }
+ if (temp_val2)
+ orient[temp_val] = -1;
+ else
+ orient[temp_val] = 1;
+
+ /* parsing axis y orientation matrix */
+ rc = of_property_read_u32(np, "inven,secondary_axis_map_y", &temp_val);
+ if (rc) {
+ dev_err(dev, "Unable to read secondary axis_map_y\n");
+ return rc;
+ }
+ rc = of_property_read_u32(np, "inven,secondary_negate_y", &temp_val2);
+ if (rc) {
+ dev_err(dev, "Unable to read secondary negate_y\n");
+ return rc;
+ }
+ if (temp_val2)
+ orient[3 + temp_val] = -1;
+ else
+ orient[3 + temp_val] = 1;
+
+ /* parsing axis z orientation matrix */
+ rc = of_property_read_u32(np, "inven,secondary_axis_map_z", &temp_val);
+ if (rc) {
+ dev_err(dev, "Unable to read secondary axis_map_z\n");
+ return rc;
+ }
+ rc = of_property_read_u32(np, "inven,secondary_negate_z", &temp_val2);
+ if (rc) {
+ dev_err(dev, "Unable to read secondary negate_z\n");
+ return rc;
+ }
+ if (temp_val2)
+ orient[6 + temp_val] = -1;
+ else
+ orient[6 + temp_val] = 1;
+
+ return 0;
+}
+
+static int inv_parse_secondary(struct device *dev,
+ struct mpu_platform_data *pdata)
+{
+ int rc;
+ struct device_node *np = dev->of_node;
+ u32 temp_val;
+ const char *name;
+
+ if (of_property_read_string(np, "inven,secondary_type", &name)) {
+ dev_err(dev, "Missing secondary type.\n");
+ return -EINVAL;
+ }
+ if (!strcmp(name, "compass")) {
+ pdata->sec_slave_type = SECONDARY_SLAVE_TYPE_COMPASS;
+ } else if (!strcmp(name, "none")) {
+ pdata->sec_slave_type = SECONDARY_SLAVE_TYPE_NONE;
+ return 0;
+ } else {
+ return -EINVAL;
+ }
+
+ if (of_property_read_string(np, "inven,secondary_name", &name)) {
+ dev_err(dev, "Missing secondary name.\n");
+ return -EINVAL;
+ }
+ if (!strcmp(name, "ak8963"))
+ pdata->sec_slave_id = COMPASS_ID_AK8963;
+ else if (!strcmp(name, "ak8975"))
+ pdata->sec_slave_id = COMPASS_ID_AK8975;
+ else if (!strcmp(name, "ak8972"))
+ pdata->sec_slave_id = COMPASS_ID_AK8972;
+ else if (!strcmp(name, "ak09911"))
+ pdata->sec_slave_id = COMPASS_ID_AK09911;
+ else if (!strcmp(name, "ak09912"))
+ pdata->sec_slave_id = COMPASS_ID_AK09912;
+ else if (!strcmp(name, "ak09916"))
+ pdata->sec_slave_id = COMPASS_ID_AK09916;
+ else
+ return -EINVAL;
+ rc = of_property_read_u32(np, "inven,secondary_reg", &temp_val);
+ if (rc) {
+ dev_err(dev, "Unable to read secondary register\n");
+ return rc;
+ }
+ pdata->secondary_i2c_addr = temp_val;
+ rc = inv_parse_secondary_orientation_matrix(dev,
+ pdata->
+ secondary_orientation);
+
+ return rc;
+}
+
+static int inv_parse_aux(struct device *dev, struct mpu_platform_data *pdata)
+{
+ int rc;
+ struct device_node *np = dev->of_node;
+ u32 temp_val;
+ const char *name;
+
+ if (of_property_read_string(np, "inven,aux_type", &name)) {
+ dev_err(dev, "Missing aux type.\n");
+ return -EINVAL;
+ }
+ if (!strcmp(name, "pressure")) {
+ pdata->aux_slave_type = SECONDARY_SLAVE_TYPE_PRESSURE;
+ } else if (!strcmp(name, "none")) {
+ pdata->aux_slave_type = SECONDARY_SLAVE_TYPE_NONE;
+ return 0;
+ } else {
+ return -EINVAL;
+ }
+
+ if (of_property_read_string(np, "inven,aux_name", &name)) {
+ dev_err(dev, "Missing aux name.\n");
+ return -EINVAL;
+ }
+ if (!strcmp(name, "bmp280"))
+ pdata->aux_slave_id = PRESSURE_ID_BMP280;
+ else
+ return -EINVAL;
+
+ rc = of_property_read_u32(np, "inven,aux_reg", &temp_val);
+ if (rc) {
+ dev_err(dev, "Unable to read aux register\n");
+ return rc;
+ }
+ pdata->aux_i2c_addr = temp_val;
+
+ return 0;
+}
+
+static int inv_parse_readonly_secondary(struct device *dev,
+ struct mpu_platform_data *pdata)
+{
+ int rc;
+ struct device_node *np = dev->of_node;
+ u32 temp_val;
+ const char *name;
+
+ if (of_property_read_string(np, "inven,read_only_slave_type", &name)) {
+ dev_err(dev, "Missing read only slave type type.\n");
+ return -EINVAL;
+ }
+ if (!strcmp(name, "als")) {
+ pdata->read_only_slave_type = SECONDARY_SLAVE_TYPE_ALS;
+ } else if (!strcmp(name, "none")) {
+ pdata->read_only_slave_type = SECONDARY_SLAVE_TYPE_NONE;
+ return 0;
+ } else {
+ return -EINVAL;
+ }
+
+ if (of_property_read_string(np, "inven,read_only_slave_name", &name)) {
+ dev_err(dev, "Missing read only slave type name.\n");
+ return -EINVAL;
+ }
+ if (!strcmp(name, "apds9930"))
+ pdata->read_only_slave_id = ALS_ID_APDS_9930;
+ else
+ return -EINVAL;
+
+ rc = of_property_read_u32(np, "inven,read_only_slave_reg", &temp_val);
+ if (rc) {
+ dev_err(dev, "Unable to read read only slave reg register\n");
+ return rc;
+ }
+ pdata->read_only_i2c_addr = temp_val;
+
+ return 0;
+}
+
+int invensense_mpu_parse_dt(struct device *dev, struct mpu_platform_data *pdata)
+{
+ int rc;
+
+ rc = inv_parse_orientation_matrix(dev, pdata->orientation);
+ if (rc)
+ return rc;
+ rc = inv_parse_secondary(dev, pdata);
+ if (rc)
+ return rc;
+ inv_parse_aux(dev, pdata);
+
+ inv_parse_readonly_secondary(dev, pdata);
+
+ pdata->vdd_ana = regulator_get(dev, "inven,vdd_ana");
+ if (IS_ERR(pdata->vdd_ana)) {
+ rc = PTR_ERR(pdata->vdd_ana);
+ dev_warn(dev, "regulator get failed vdd_ana-supply rc=%d\n", rc);
+ }
+ pdata->vdd_i2c = regulator_get(dev, "inven,vcc_i2c");
+ if (IS_ERR(pdata->vdd_i2c)) {
+ rc = PTR_ERR(pdata->vdd_i2c);
+ dev_warn(dev, "regulator get failed vcc-i2c-supply rc=%d\n", rc);
+ }
+ pdata->power_on = inv_mpu_power_on;
+ pdata->power_off = inv_mpu_power_off;
+ dev_dbg(dev, "parse dt complete\n");
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(invensense_mpu_parse_dt);
+
+#endif /* CONFIG_OF */
diff --git a/drivers/iio/imu/inv_mpu/inv_mpu_dts.h b/drivers/iio/imu/inv_mpu/inv_mpu_dts.h
new file mode 100644
index 000000000000..90966febb930
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu/inv_mpu_dts.h
@@ -0,0 +1,25 @@
+/*
+* Copyright (C) 2012-2017 InvenSense, Inc.
+*
+* This software is licensed under the terms of the GNU General Public
+* License version 2, as published by the Free Software Foundation, and
+* may be copied, distributed, and modified under those terms.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+
+#ifndef _INV_MPU_DTS_H_
+#define _INV_MPU_DTS_H_
+
+#include <linux/kernel.h>
+#include <linux/iio/imu/mpu.h>
+
+#ifdef CONFIG_OF
+int invensense_mpu_parse_dt(struct device *dev,
+ struct mpu_platform_data *pdata);
+#endif
+
+#endif /* #ifndef _INV_MPU_DTS_H_ */
diff --git a/drivers/iio/imu/inv_mpu/inv_mpu_i2c.c b/drivers/iio/imu/inv_mpu/inv_mpu_i2c.c
new file mode 100644
index 000000000000..e7838fce84a8
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu/inv_mpu_i2c.c
@@ -0,0 +1,556 @@
+/*
+* Copyright (C) 2012-2018 InvenSense, Inc.
+*
+* This software is licensed under the terms of the GNU General Public
+* License version 2, as published by the Free Software Foundation, and
+* may be copied, distributed, and modified under those terms.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+#define pr_fmt(fmt) "inv_mpu: " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/sysfs.h>
+#include <linux/jiffies.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/kfifo.h>
+#include <linux/poll.h>
+#include <linux/miscdevice.h>
+#include <linux/spinlock.h>
+
+#include "inv_mpu_iio.h"
+#include "inv_mpu_dts.h"
+
+#define CONFIG_DYNAMIC_DEBUG_I2C 0
+
+/**
+ * inv_i2c_read_base() - Read one or more bytes from the device registers.
+ * @st: Device driver instance.
+ * @i2c_addr: i2c address of device.
+ * @reg: First device register to be read from.
+ * @length: Number of bytes to read.
+ * @data: Data read from device.
+ * NOTE:This is not re-implementation of i2c_smbus_read because i2c
+ * address could be specified in this case. We could have two different
+ * i2c address due to secondary i2c interface.
+ */
+int inv_i2c_read_base(struct inv_mpu_state *st, u16 i2c_addr,
+ u8 reg, u16 length, u8 *data)
+{
+ struct i2c_msg msgs[2];
+ int res;
+
+ if (!data)
+ return -EINVAL;
+
+ msgs[0].addr = i2c_addr;
+ msgs[0].flags = 0; /* write */
+ msgs[0].buf = &reg;
+ msgs[0].len = 1;
+
+ msgs[1].addr = i2c_addr;
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].buf = data;
+ msgs[1].len = length;
+
+ res = i2c_transfer(st->sl_handle, msgs, 2);
+
+ if (res < 2) {
+ if (res >= 0)
+ res = -EIO;
+ } else
+ res = 0;
+ INV_I2C_INC_MPUWRITE(3);
+ INV_I2C_INC_MPUREAD(length);
+
+ return res;
+}
+
+/**
+ * inv_i2c_single_write_base() - Write a byte to a device register.
+ * @st: Device driver instance.
+ * @i2c_addr: I2C address of the device.
+ * @reg: Device register to be written to.
+ * @data: Byte to write to device.
+ * NOTE:This is not re-implementation of i2c_smbus_write because i2c
+ * address could be specified in this case. We could have two different
+ * i2c address due to secondary i2c interface.
+ */
+int inv_i2c_single_write_base(struct inv_mpu_state *st,
+ u16 i2c_addr, u8 reg, u8 data)
+{
+ u8 tmp[2];
+ struct i2c_msg msg;
+ int res;
+
+ tmp[0] = reg;
+ tmp[1] = data;
+
+ msg.addr = i2c_addr;
+ msg.flags = 0; /* write */
+ msg.buf = tmp;
+ msg.len = 2;
+
+ INV_I2C_INC_MPUWRITE(3);
+
+ res = i2c_transfer(st->sl_handle, &msg, 1);
+ if (res < 1) {
+ if (res == 0)
+ res = -EIO;
+ return res;
+ } else
+ return 0;
+}
+
+static int inv_i2c_single_write(struct inv_mpu_state *st, u8 reg, u8 data)
+{
+ return inv_i2c_single_write_base(st, st->i2c_addr, reg, data);
+}
+
+static int inv_i2c_read(struct inv_mpu_state *st, u8 reg, int len, u8 *data)
+{
+ return inv_i2c_read_base(st, st->i2c_addr, reg, len, data);
+}
+
+static int _memory_write(struct inv_mpu_state *st, u8 mpu_addr, u16 mem_addr,
+ u32 len, u8 const *data)
+{
+ u8 bank[2];
+ u8 addr[2];
+ u8 buf[513];
+
+ struct i2c_msg msgs[3];
+ int res;
+
+ if (!data || !st)
+ return -EINVAL;
+
+ if (len >= (sizeof(buf) - 1))
+ return -ENOMEM;
+
+ bank[0] = REG_MEM_BANK_SEL;
+ bank[1] = mem_addr >> 8;
+
+ addr[0] = REG_MEM_START_ADDR;
+ addr[1] = mem_addr & 0xFF;
+
+ buf[0] = REG_MEM_R_W;
+ memcpy(buf + 1, data, len);
+
+ /* write message */
+ msgs[0].addr = mpu_addr;
+ msgs[0].flags = 0;
+ msgs[0].buf = bank;
+ msgs[0].len = sizeof(bank);
+
+ msgs[1].addr = mpu_addr;
+ msgs[1].flags = 0;
+ msgs[1].buf = addr;
+ msgs[1].len = sizeof(addr);
+
+ msgs[2].addr = mpu_addr;
+ msgs[2].flags = 0;
+ msgs[2].buf = (u8 *) buf;
+ msgs[2].len = len + 1;
+
+ INV_I2C_INC_MPUWRITE(3 + 3 + (2 + len));
+
+#if CONFIG_DYNAMIC_DEBUG_I2C
+ {
+ char *write = 0;
+ pr_debug("%s WM%02X%02X%02X%s%s - %d\n", st->hw->name,
+ mpu_addr, bank[1], addr[1],
+ wr_pr_debug_begin(data, len, write),
+ wr_pr_debug_end(write), len);
+ }
+#endif
+
+ res = i2c_transfer(st->sl_handle, msgs, 3);
+ if (res != 3) {
+ if (res >= 0)
+ res = -EIO;
+ return res;
+ } else {
+ return 0;
+ }
+}
+
+static int inv_i2c_mem_write(struct inv_mpu_state *st, u8 mpu_addr, u16 mem_addr,
+ u32 len, u8 const *data)
+{
+ int r, i, j;
+#define DMP_MEM_CMP_SIZE 16
+ u8 w[DMP_MEM_CMP_SIZE];
+ bool retry;
+
+ j = 0;
+ retry = true;
+ while ((j < 3) && retry) {
+ retry = false;
+ r = _memory_write(st, mpu_addr, mem_addr, len, data);
+ if (len < DMP_MEM_CMP_SIZE) {
+ r = mem_r(mem_addr, len, w);
+ for (i = 0; i < len; i++) {
+ if (data[i] != w[i]) {
+ pr_debug
+ ("error write=%x, len=%d,data=%x, w=%x, i=%d\n",
+ mem_addr, len, data[i], w[i], i);
+ retry = true;
+ }
+ }
+ }
+ j++;
+ }
+
+ return r;
+}
+
+static int inv_i2c_mem_read(struct inv_mpu_state *st, u8 mpu_addr, u16 mem_addr,
+ u32 len, u8 *data)
+{
+ u8 bank[2];
+ u8 addr[2];
+ u8 buf;
+
+ struct i2c_msg msgs[4];
+ int res;
+
+ if (!data || !st)
+ return -EINVAL;
+
+ bank[0] = REG_MEM_BANK_SEL;
+ bank[1] = mem_addr >> 8;
+
+ addr[0] = REG_MEM_START_ADDR;
+ addr[1] = mem_addr & 0xFF;
+
+ buf = REG_MEM_R_W;
+
+ /* write message */
+ msgs[0].addr = mpu_addr;
+ msgs[0].flags = 0;
+ msgs[0].buf = bank;
+ msgs[0].len = sizeof(bank);
+
+ msgs[1].addr = mpu_addr;
+ msgs[1].flags = 0;
+ msgs[1].buf = addr;
+ msgs[1].len = sizeof(addr);
+
+ msgs[2].addr = mpu_addr;
+ msgs[2].flags = 0;
+ msgs[2].buf = &buf;
+ msgs[2].len = 1;
+
+ msgs[3].addr = mpu_addr;
+ msgs[3].flags = I2C_M_RD;
+ msgs[3].buf = data;
+ msgs[3].len = len;
+
+ res = i2c_transfer(st->sl_handle, msgs, 4);
+ if (res != 4) {
+ if (res >= 0)
+ res = -EIO;
+ } else
+ res = 0;
+ INV_I2C_INC_MPUWRITE(3 + 3 + 3);
+ INV_I2C_INC_MPUREAD(len);
+
+#if CONFIG_DYNAMIC_DEBUG_I2C
+ {
+ char *read = 0;
+ pr_debug("%s RM%02X%02X%02X%02X - %s%s\n", st->hw->name,
+ mpu_addr, bank[1], addr[1], len,
+ wr_pr_debug_begin(data, len, read),
+ wr_pr_debug_end(read));
+ }
+#endif
+
+ return res;
+}
+
+/*
+ * inv_mpu_probe() - probe function.
+ */
+static int inv_mpu_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct inv_mpu_state *st;
+ struct iio_dev *indio_dev;
+ int result;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ result = -ENOSYS;
+ pr_err("I2c function error\n");
+ goto out_no_free;
+ }
+
+#ifdef KERNEL_VERSION_4_X
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*st));
+ if (indio_dev == NULL) {
+ pr_err("memory allocation failed\n");
+ result = -ENOMEM;
+ goto out_no_free;
+ }
+#else
+ indio_dev = iio_device_alloc(sizeof(*st));
+ if (indio_dev == NULL) {
+ pr_err("memory allocation failed\n");
+ result = -ENOMEM;
+ goto out_no_free;
+ }
+#endif
+ st = iio_priv(indio_dev);
+ st->client = client;
+ st->sl_handle = client->adapter;
+ st->i2c_addr = client->addr;
+ st->write = inv_i2c_single_write;
+ st->read = inv_i2c_read;
+ st->mem_write = inv_i2c_mem_write;
+ st->mem_read = inv_i2c_mem_read;
+ st->dev = &client->dev;
+ st->bus_type = BUS_I2C;
+#ifdef CONFIG_OF
+ result = invensense_mpu_parse_dt(st->dev, &st->plat_data);
+ if (result)
+# ifdef KERNEL_VERSION_4_X
+ return -ENODEV;
+# else
+ goto out_free;
+# endif
+
+ /* Power on device */
+ if (st->plat_data.power_on) {
+ result = st->plat_data.power_on(&st->plat_data);
+ if (result < 0) {
+ dev_err(st->dev, "power_on failed: %d\n", result);
+# ifdef KERNEL_VERSION_4_X
+ return -ENODEV;
+# else
+ goto out_free;
+# endif
+ }
+ pr_info("%s: power on here.\n", __func__);
+ }
+ pr_info("%s: power on.\n", __func__);
+
+ msleep(100);
+#else
+ if (dev_get_platdata(st->dev) == NULL)
+# ifdef KERNEL_VERSION_4_X
+ return -ENODEV;
+# else
+ goto out_free;
+# endif
+ st->plat_data = *(struct mpu_platform_data *)dev_get_platdata(st->dev);
+#endif
+
+ /* power is turned on inside check chip type */
+ result = inv_check_chip_type(indio_dev, id->name);
+ if (result)
+#ifdef KERNEL_VERSION_4_X
+ return -ENODEV;
+#else
+ goto out_free;
+#endif
+
+ /* Make state variables available to all _show and _store functions. */
+ i2c_set_clientdata(client, indio_dev);
+ indio_dev->dev.parent = st->dev;
+ indio_dev->name = id->name;
+
+ st->irq = client->irq;
+
+ result = inv_mpu_configure_ring(indio_dev);
+ if (result) {
+ pr_err("configure ring buffer fail\n");
+ goto out_free;
+ }
+#ifdef KERNEL_VERSION_4_X
+ INV_I2C_SETIRQ(IRQ_MPU, st->irq);
+ result = devm_iio_device_register(st->dev, indio_dev);
+ if (result) {
+ pr_err("IIO device register fail\n");
+ goto out_unreg_ring;
+ }
+#else
+ result = iio_buffer_register(indio_dev, indio_dev->channels,
+ indio_dev->num_channels);
+ if (result) {
+ pr_err("ring buffer register fail\n");
+ goto out_unreg_ring;
+ }
+ INV_I2C_SETIRQ(IRQ_MPU, client->irq);
+ result = iio_device_register(indio_dev);
+ if (result) {
+ pr_err("IIO device register fail\n");
+ goto out_remove_ring;
+ }
+#endif
+
+ result = inv_create_dmp_sysfs(indio_dev);
+ if (result) {
+ pr_err("create dmp sysfs failed\n");
+ goto out_unreg_iio;
+ }
+ init_waitqueue_head(&st->wait_queue);
+ st->resume_state = true;
+#ifdef CONFIG_HAS_WAKELOCK
+ wake_lock_init(&st->wake_lock, WAKE_LOCK_SUSPEND, "inv_mpu");
+#else
+ wakeup_source_init(&st->wake_lock, "inv_mpu");
+#endif
+ dev_info(st->dev, "%s ma-kernel-%s is ready to go!\n",
+ indio_dev->name, INVENSENSE_DRIVER_VERSION);
+
+#ifdef SENSOR_DATA_FROM_REGISTERS
+ pr_info("Data read from registers\n");
+#else
+ pr_info("Data read from FIFO\n");
+#endif
+#ifdef TIMER_BASED_BATCHING
+ pr_info("Timer based batching\n");
+#endif
+
+ return 0;
+#ifdef KERNEL_VERSION_4_X
+out_unreg_iio:
+ devm_iio_device_unregister(st->dev, indio_dev);
+out_unreg_ring:
+ inv_mpu_unconfigure_ring(indio_dev);
+out_free:
+ devm_iio_device_free(st->dev, indio_dev);
+out_no_free:
+#else
+out_unreg_iio:
+ iio_device_unregister(indio_dev);
+out_remove_ring:
+ iio_buffer_unregister(indio_dev);
+out_unreg_ring:
+ inv_mpu_unconfigure_ring(indio_dev);
+out_free:
+ iio_device_free(indio_dev);
+out_no_free:
+#endif
+ dev_err(st->dev, "%s failed %d\n", __func__, result);
+
+ return -EIO;
+}
+
+static void inv_mpu_shutdown(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct inv_mpu_state *st = iio_priv(indio_dev);
+ int result;
+
+ mutex_lock(&indio_dev->mlock);
+ inv_switch_power_in_lp(st, true);
+ dev_dbg(st->dev, "Shutting down %s...\n", st->hw->name);
+
+ /* reset to make sure previous state are not there */
+ result = inv_plat_single_write(st, REG_PWR_MGMT_1, BIT_H_RESET);
+ if (result)
+ dev_err(st->dev, "Failed to reset %s\n",
+ st->hw->name);
+ msleep(POWER_UP_TIME);
+ /* turn off power to ensure gyro engine is off */
+ result = inv_set_power(st, false);
+ if (result)
+ dev_err(st->dev, "Failed to turn off %s\n",
+ st->hw->name);
+ inv_switch_power_in_lp(st, false);
+ mutex_unlock(&indio_dev->mlock);
+}
+
+/*
+ * inv_mpu_remove() - remove function.
+ */
+static int inv_mpu_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct inv_mpu_state *st = iio_priv(indio_dev);
+
+#ifdef KERNEL_VERSION_4_X
+ devm_iio_device_unregister(st->dev, indio_dev);
+#else
+ iio_device_unregister(indio_dev);
+ iio_buffer_unregister(indio_dev);
+#endif
+ inv_mpu_unconfigure_ring(indio_dev);
+#ifdef KERNEL_VERSION_4_X
+ devm_iio_device_free(st->dev, indio_dev);
+#else
+ iio_device_free(indio_dev);
+#endif
+ dev_info(st->dev, "inv-mpu-iio module removed.\n");
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int inv_mpu_i2c_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+
+ return inv_mpu_suspend(indio_dev);
+}
+
+static void inv_mpu_i2c_complete(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+
+ inv_mpu_complete(indio_dev);
+}
+#endif
+
+static const struct dev_pm_ops inv_mpu_i2c_pmops = {
+#ifdef CONFIG_PM_SLEEP
+ .suspend = inv_mpu_i2c_suspend,
+ .complete = inv_mpu_i2c_complete,
+#endif
+};
+
+/* device id table is used to identify what device can be
+ * supported by this driver
+ */
+static const struct i2c_device_id inv_mpu_id[] = {
+#ifdef CONFIG_INV_MPU_IIO_ICM20648
+ {"icm20645", ICM20645},
+ {"icm10340", ICM10340},
+ {"icm20648", ICM20648},
+#else
+ {"icm20608d", ICM20608D},
+ {"icm20690", ICM20690},
+ {"icm20602", ICM20602},
+ {"iam20680", IAM20680},
+#endif
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, inv_mpu_id);
+
+static struct i2c_driver inv_mpu_driver = {
+ .probe = inv_mpu_probe,
+ .remove = inv_mpu_remove,
+ .shutdown = inv_mpu_shutdown,
+ .id_table = inv_mpu_id,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "inv-mpu-iio-i2c",
+ .pm = &inv_mpu_i2c_pmops,
+ },
+};
+module_i2c_driver(inv_mpu_driver);
+
+MODULE_AUTHOR("Invensense Corporation");
+MODULE_DESCRIPTION("Invensense I2C device driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/imu/inv_mpu/inv_mpu_iio.h b/drivers/iio/imu/inv_mpu/inv_mpu_iio.h
new file mode 100644
index 000000000000..9e7316558eae
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu/inv_mpu_iio.h
@@ -0,0 +1,1138 @@
+/*
+ * Copyright (C) 2012-2018 InvenSense, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _INV_MPU_IIO_H_
+#define _INV_MPU_IIO_H_
+
+#include <linux/version.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,0,0))
+#define KERNEL_VERSION_4_X
+#endif
+
+#include <linux/i2c.h>
+#include <linux/kfifo.h>
+#include <linux/miscdevice.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/iio/imu/mpu.h>
+#include <linux/interrupt.h>
+#include <linux/semaphore.h>
+#ifdef CONFIG_HAS_WAKELOCK
+#include <linux/wakelock.h>
+#else
+#include <linux/pm_wakeup.h>
+#endif
+#include <linux/wait.h>
+
+#include <linux/iio/sysfs.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/kfifo_buf.h>
+
+#ifdef CONFIG_INV_MPU_IIO_ICM20648
+#include "icm20648/dmp3Default.h"
+#endif
+#ifdef CONFIG_INV_MPU_IIO_ICM20608D
+#include "icm20608d/dmp3Default_20608D.h"
+#endif
+
+#include "inv_test/inv_counters.h"
+
+#if defined(CONFIG_INV_MPU_IIO_ICM20648)
+#include "icm20648/inv_mpu_iio_reg_20648.h"
+#elif defined(CONFIG_INV_MPU_IIO_ICM20602)
+#include "icm20602/inv_mpu_iio_reg_20602.h"
+#elif defined(CONFIG_INV_MPU_IIO_ICM20608D)
+#include "icm20608d/inv_mpu_iio_reg_20608.h"
+#elif defined(CONFIG_INV_MPU_IIO_ICM20690)
+#include "icm20690/inv_mpu_iio_reg_20690.h"
+#elif defined(CONFIG_INV_MPU_IIO_IAM20680)
+#include "iam20680/inv_mpu_iio_reg_20680.h"
+#endif
+
+#define INVENSENSE_DRIVER_VERSION "8.1.2-simple-test1"
+
+/* #define DEBUG */
+
+/* #define ACCEL_BIAS_TEST */
+
+/* #define BIAS_CONFIDENCE_HIGH 1 */
+
+#define MAX_FIFO_READ_SIZE 128
+#define MAX_DMP_READ_SIZE 16
+
+/* data header defines */
+#define WAKE_HDR 0x8000
+
+#define ACCEL_HDR 1
+#define GYRO_HDR 2
+#define COMPASS_HDR 3
+#define ALS_HDR 4
+#define SIXQUAT_HDR 5
+#define NINEQUAT_HDR 6
+#define PEDQUAT_HDR 7
+#define GEOMAG_HDR 8
+#define PRESSURE_HDR 9
+#define GYRO_CALIB_HDR 10
+#define COMPASS_CALIB_HDR 11
+#define STEP_COUNTER_HDR 12
+#define STEP_DETECTOR_HDR 13
+#define STEP_COUNT_HDR 14
+#define ACTIVITY_HDR 15
+#define PICK_UP_HDR 16
+#define EMPTY_MARKER 17
+#define END_MARKER 18
+#define COMPASS_ACCURACY_HDR 19
+#define ACCEL_ACCURACY_HDR 20
+#define GYRO_ACCURACY_HDR 21
+#define EIS_GYRO_HDR 36
+#define EIS_CALIB_HDR 37
+#define LPQ_HDR 38
+
+#define ACCEL_WAKE_HDR (ACCEL_HDR | WAKE_HDR)
+#define GYRO_WAKE_HDR (GYRO_HDR | WAKE_HDR)
+#define COMPASS_WAKE_HDR (COMPASS_HDR | WAKE_HDR)
+#define ALS_WAKE_HDR (ALS_HDR | WAKE_HDR)
+#define SIXQUAT_WAKE_HDR (SIXQUAT_HDR | WAKE_HDR)
+#define NINEQUAT_WAKE_HDR (NINEQUAT_HDR | WAKE_HDR)
+#define PEDQUAT_WAKE_HDR (PEDQUAT_HDR | WAKE_HDR)
+#define GEOMAG_WAKE_HDR (GEOMAG_HDR | WAKE_HDR)
+#define PRESSURE_WAKE_HDR (PRESSURE_HDR | WAKE_HDR)
+#define GYRO_CALIB_WAKE_HDR (GYRO_CALIB_HDR | WAKE_HDR)
+#define COMPASS_CALIB_WAKE_HDR (COMPASS_CALIB_HDR | WAKE_HDR)
+#define STEP_COUNTER_WAKE_HDR (STEP_COUNTER_HDR | WAKE_HDR)
+#define STEP_DETECTOR_WAKE_HDR (STEP_DETECTOR_HDR | WAKE_HDR)
+
+/* init parameters */
+#define MPU_INIT_SMD_THLD 1500
+#define MPU_INIT_GYRO_SCALE 3
+#define MPU_INIT_ACCEL_SCALE 2
+#define MPU_INIT_PED_INT_THRESH 2
+#define MPU_INIT_PED_STEP_THRESH 6
+#define MPU_4X_TS_GYRO_SHIFT (3160000 / 2)
+#define DMP_START_ADDR_20645 0x900
+#define DMP_START_ADDR_20648 0x1000
+#define DMP_START_ADDR_10340 0x0a60
+#define DMP_START_ADDR_20608D 0x4B0
+#define MAX_WR_SZ 100
+#define WOM_DELAY_THRESHOLD 200
+#define INV_ODR_BUFFER_MULTI 20
+#define INV_ODR_OVER_FACTOR 20
+
+#define COVARIANCE_SIZE 14
+#define ACCEL_COVARIANCE_SIZE (COVARIANCE_SIZE * sizeof(int))
+
+enum inv_bus_type {
+ BUS_I2C = 0,
+ BUS_SPI,
+};
+
+struct inv_mpu_state;
+
+enum INV_ENGINE {
+ ENGINE_GYRO = 0,
+ ENGINE_ACCEL,
+ ENGINE_PRESSURE,
+ ENGINE_I2C,
+ ENGINE_NUM_MAX,
+};
+
+/**
+ * struct inv_hw_s - Other important hardware information.
+ * @num_reg: Number of registers on device.
+ * @name: name of the chip
+ */
+struct inv_hw_s {
+ u8 num_reg;
+ u8 *name;
+};
+
+/**
+ * struct inv_sensor - information for each sensor.
+ * @ts: this sensors timestamp.
+ * @ts_adj: sensor timestamp adjustment.
+ * @previous_ts: previous timestamp for this sensor.
+ * @dur: duration between samples in ns.
+ * @rate: sensor data rate.
+ * @sample_size: number of bytes for the sensor.
+ * @odr_addr: output data rate address in DMP.
+ * @counter_addr: output counter address in DMP.
+ * @output: output on/off control word.
+ * @time_calib: calibrate timestamp.
+ * @sample_calib: calibrate bytes accumulated.
+ * @div: divider in DMP mode.
+ * @calib_flag: calibrate flag used to improve the accuracy of estimation.
+ * @on: sensor on/off.
+ * @a_en: accel engine requirement.
+ * @g_en: gyro engine requirement.
+ * @c_en: compass_engine requirement.
+ * @p_en: pressure engine requirement.
+ * @engine_base: engine base for this sensor.
+ * @count: number of samples in one session.
+ * @send: decide whether to send this sample or not.
+ */
+struct inv_sensor {
+ u64 ts;
+ s64 ts_adj;
+ u64 previous_ts;
+ int dur;
+ int rate;
+ u8 sample_size;
+ int odr_addr;
+ int counter_addr;
+ u16 output;
+ u64 time_calib;
+ u32 sample_calib;
+ int div;
+ bool calib_flag;
+ bool on;
+ bool a_en;
+ bool g_en;
+ bool c_en;
+ bool p_en;
+ enum INV_ENGINE engine_base;
+ int count;
+ bool send;
+};
+
+/**
+ * struct inv_sensor - information for each sensor.
+ * @sample_size: number of bytes for the sensor.
+ * @output: output on/off control word.
+ * @on: sensor on/off.
+ * @header: accuracy header for communicate with HAL
+ *dd @count: number of samples in one session.
+ */
+struct inv_sensor_accuracy {
+ u16 output;
+ u8 sample_size;
+ bool on;
+ u16 header;
+};
+
+enum SENSOR_ACCURACY {
+ SENSOR_ACCEL_ACCURACY = 0,
+ SENSOR_GYRO_ACCURACY,
+ SENSOR_COMPASS_ACCURACY,
+ SENSOR_ACCURACY_NUM_MAX,
+};
+
+enum SENSOR_L {
+ SENSOR_L_ACCEL = 0,
+ SENSOR_L_GYRO,
+ SENSOR_L_MAG,
+ SENSOR_L_ALS,
+ SENSOR_L_SIXQ,
+ SENSOR_L_THREEQ,
+ SENSOR_L_NINEQ,
+ SENSOR_L_PEDQ,
+ SENSOR_L_GEOMAG,
+ SENSOR_L_PRESSURE,
+ SENSOR_L_GYRO_CAL,
+ SENSOR_L_MAG_CAL,
+ SENSOR_L_EIS_GYRO,
+ /*wake sensors */
+ SENSOR_L_ACCEL_WAKE = 13,
+ SENSOR_L_GYRO_WAKE,
+ SENSOR_L_MAG_WAKE,
+ SENSOR_L_ALS_WAKE,
+ SENSOR_L_SIXQ_WAKE,
+ SENSOR_L_NINEQ_WAKE,
+ SENSOR_L_PEDQ_WAKE,
+ SENSOR_L_GEOMAG_WAKE,
+ SENSOR_L_PRESSURE_WAKE,
+ SENSOR_L_GYRO_CAL_WAKE,
+ SENSOR_L_MAG_CAL_WAKE,
+ SENSOR_L_GESTURE_ACCEL,
+ SENSOR_L_NUM_MAX,
+};
+
+/**
+ * struct android_l_sensor - information for each android sensor.
+ * @ts: this sensors timestamp.
+ * @base: android sensor based on invensense sensor.
+ * @rate: output rate.
+ * @on: sensor on/off.
+ * @wake_on: wake on sensor is on/off.
+ * @div: divider for the output.
+ * @counter: counter works with the divider.
+ * @header: header for the output.
+ */
+struct android_l_sensor {
+ u64 ts;
+ enum INV_SENSORS base;
+ int rate;
+ bool on;
+ bool wake_on;
+ int div;
+ int counter;
+ u16 header;
+};
+
+/**
+ * struct inv_batch - information for batchmode.
+ * @on: normal batch mode on.
+ * @default_on: default batch on. This is optimization option.
+ * @overflow_on: overflow mode for batchmode.
+ * @wake_fifo_on: overflow for suspend mode.
+ * @step_only: mean only step detector data is batched.
+ * @post_isr_run: mean post isr has runned once.
+ * @counter: counter for batch mode.
+ * @timeout: nominal timeout value for batchmode in milliseconds.
+ * @max_rate: max rate for all batched sensors.
+ * @pk_size: packet size;
+ * @engine_base: engine base batch mode should stick to.
+ */
+struct inv_batch {
+ bool on;
+ bool default_on;
+ bool overflow_on;
+ bool wake_fifo_on;
+ bool step_only;
+ bool post_isr_run;
+ u32 counter;
+ u32 timeout;
+ u32 max_rate;
+ u32 pk_size;
+ u32 fifo_wm_th;
+ enum INV_ENGINE engine_base;
+};
+
+/**
+ * struct inv_chip_config_s - Cached chip configuration data.
+ * @fsr: Full scale range.
+ * @lpf: Digital low pass filter frequency.
+ * @accel_fs: accel full scale range.
+ * @accel_enable: enable accel functionality
+ * @gyro_enable: enable gyro functionality
+ * @compass_enable: enable compass functinality.
+ * @geomag_enable: enable geomag sensor functions.
+ * @als_enable: enable ALS functionality.
+ * @pressure_enable: eanble pressure functionality.
+ * @secondary_enable: secondary I2C bus enabled or not.
+ * @has_gyro: has gyro or not.
+ * @has_compass: has secondary I2C compass or not.
+ * @has_pressure: has secondary I2C pressure or not.
+ * @has_als: has secondary I2C als or not.
+ * @slave_enable: secondary I2C interface enabled or not.
+ * @normal_compass_measure: discard first compass data after reset.
+ * @is_asleep: 1 if chip is powered down.
+ * @lp_en_set: 1 if LP_EN bit is set;
+ * @lp_en_mode_off: debug mode that turns off LP_EN mode off.
+ * @clk_sel: debug_mode that turns on/off clock selection.
+ * @dmp_on: dmp is on/off.
+ * @dmp_event_int_on: dmp event interrupt on/off.
+ * @wom_on: WOM interrupt on. This is an internal variable.
+ * @step_indicator_on: step indicate bit added to the sensor or not.
+ * @tilt_enable: tilt enable.
+ * @pick_up_enable: pick up gesture enable.
+ * @step_detector_on: step detector on or not.
+ * @activity_on: turn on/off activity.
+ * @activity_eng_on: activity engine on/off.
+ * @firmware_loaded: flag indicate firmware loaded or not.
+ * @low_power_gyro_on: flag indicating low power gyro on/off.
+ * @wake_on: any wake on sensor is on/off.
+ * @compass_rate: compass engine rate. Determined by underlying data.
+ */
+struct inv_chip_config_s {
+ u32 fsr:2;
+ u32 lpf:3;
+ u32 accel_fs:2;
+ u32 accel_enable:1;
+ u32 gyro_enable:1;
+ u32 compass_enable:1;
+ u32 geomag_enable:1;
+ u32 als_enable:1;
+ u32 prox_enable:1;
+ u32 pressure_enable:1;
+ u32 has_gyro:1;
+ u32 has_compass:1;
+ u32 has_pressure:1;
+ u32 has_als:1;
+ u32 slave_enable:1;
+ u32 normal_compass_measure:1;
+ u32 is_asleep:1;
+ u32 lp_en_set:1;
+ u32 lp_en_mode_off:1;
+ u32 clk_sel:1;
+ u32 dmp_on:1;
+ u32 dmp_event_int_on:1;
+ u32 wom_on:1;
+ u32 step_indicator_on:1;
+ u32 tilt_enable:1;
+ u32 pick_up_enable:1;
+ u32 eis_enable:1;
+ u32 step_detector_on:1;
+ u32 activity_on:1;
+ u32 activity_eng_on:1;
+ u32 firmware_loaded:1;
+ u32 low_power_gyro_on:1;
+ u32 wake_on:1;
+ int compass_rate;
+};
+
+/**
+ * struct inv_temp_comp - temperature compensation structure.
+ * @t_lo: raw temperature in low temperature.
+ * @t_hi: raw temperature in high temperature.
+ * @b_lo: gyro bias in low temperature.
+ * @b_hi: gyro bias in high temperature.
+ * @has_low: flag indicate low temperature parameters is updated.
+ * @has_high: flag indicates high temperature parameters is updated.
+ * @slope: slope for temperature compensation.
+ */
+struct inv_temp_comp {
+ int t_lo;
+ int t_hi;
+ int b_lo[3];
+ int b_hi[3];
+ bool has_low;
+ bool has_high;
+ int slope[3];
+};
+
+/**
+ * struct inv_chip_info_s - Chip related information.
+ * @product_id: Product id.
+ * @product_revision: Product revision.
+ * @silicon_revision: Silicon revision.
+ * @software_revision: software revision.
+ * @compass_sens: compass sensitivity.
+ * @gyro_sens_trim: Gyro sensitivity trim factor.
+ * @accel_sens_trim: accel sensitivity trim factor.
+ */
+struct inv_chip_info_s {
+ u8 product_id;
+ u8 product_revision;
+ u8 silicon_revision;
+ u8 software_revision;
+ u8 compass_sens[3];
+ u32 gyro_sens_trim;
+ u32 accel_sens_trim;
+};
+
+/**
+ * struct inv_smd significant motion detection structure.
+ * @threshold: accel threshold for motion detection.
+ * @delay: delay time to confirm 2nd motion.
+ * @delay2: delay window parameter.
+ * @on: smd on/off.
+ */
+struct inv_smd {
+ u32 threshold;
+ u32 delay;
+ u32 delay2;
+ bool on;
+};
+
+/**
+ * struct inv_ped pedometer related data structure.
+ * @step: steps taken.
+ * @time: time taken during the period.
+ * @last_step_time: last time the step is taken.
+ * @step_thresh: step threshold to show steps.
+ * @int_thresh: step threshold to generate interrupt.
+ * @int_on: pedometer interrupt enable/disable.
+ * @on: pedometer on/off.
+ * @engine_on: pedometer engine on/off.
+ */
+struct inv_ped {
+ u64 step;
+ u64 time;
+ u64 last_step_time;
+ u16 step_thresh;
+ u16 int_thresh;
+ bool int_on;
+ bool on;
+ bool engine_on;
+};
+
+/**
+ * struct inv_eis EIS related data structure.
+ * @prev_gyro: latest gyro data just before FSYNC triggerd
+ * @prev_timestamp: latest gyro timestamp just before FSYNC triggered
+ * @current_gyro: gyro data just after FSYNC triggerd
+ * @current_timestamp: gyro timestamp just after FSYNC triggered
+ * @fsync_timestamp: timestamp of FSYNC event
+ * @fsync_delay: delay time of FSYNC and Gyro data. DMP data of FSYNC event
+ * @eis_triggered: check fsync event is triggered or not.
+ * @eis_frame: current frame is eis frame;
+ * @current_sync: current frame contains fsync counter.
+ * @frame_count: frame count for synchronization.
+ */
+struct inv_eis {
+ int prev_gyro[3];
+ u64 prev_timestamp;
+ int current_gyro[3];
+ u64 current_timestamp;
+ u32 frame_dur;
+ u64 slope[3];
+ u64 fsync_timestamp;
+ u64 last_fsync_timestamp;
+ u16 fsync_delay;
+ bool eis_triggered;
+ bool eis_frame;
+ bool current_sync;
+ bool prev_state;
+ u32 frame_count;
+ int gyro_counter;
+ int gyro_counter_s[3];
+ int fsync_delay_s[3];
+ int voting_count;
+ int voting_count_sub;
+ int voting_state;
+ int count_precision;
+};
+
+enum TRIGGER_STATE {
+ DATA_TRIGGER = 0,
+ RATE_TRIGGER,
+ EVENT_TRIGGER,
+ MISC_TRIGGER,
+ DEBUG_TRIGGER,
+};
+
+enum inv_fifo_count_mode {
+ BYTE_MODE,
+ RECORD_MODE
+};
+
+/**
+ * struct inv_secondary_reg - secondary registers data structure.
+ * @addr: address of the slave.
+ * @reg: register address of slave.
+ * @ctrl: control register.
+ * @d0: data out register.
+ */
+struct inv_secondary_reg {
+ u8 addr;
+ u8 reg;
+ u8 ctrl;
+ u8 d0;
+};
+
+struct inv_secondary_set {
+ u8 delay_enable;
+ u8 delay_time;
+ u8 odr_config;
+};
+/**
+ * struct inv_engine_info - data structure for engines.
+ * @base_time: base time for each engine.
+ * @base_time_1k: base time when chip is running at 1K;
+ * @divider: divider used to downsample engine rate from original rate.
+ * @running_rate: the actually running rate of engine.
+ * @orig_rate: original rate for each engine before downsample.
+ * @dur: duration for one tick.
+ * @last_update_time: last update time.
+ */
+struct inv_engine_info {
+ u32 base_time;
+ u32 base_time_1k;
+ u32 divider;
+ u32 running_rate;
+ u32 orig_rate;
+ u32 dur;
+ u64 last_update_time;
+};
+
+struct inv_ois {
+ int gyro_fs;
+ int accel_fs;
+ bool en;
+};
+
+/**
+ * struct inv_timestamp_algo - timestamp algorithm .
+ * @last_run_time: last time the post ISR runs.
+ * @ts_for_calib: ts storage for calibration.
+ * @reset_ts: reset time.
+ * @dmp_ticks: dmp ticks storage for calibration.
+ * @start_dmp_counter: dmp counter when start a new session.
+ * @calib_counter: calibration counter for timestamp.
+ * @resume_flag: flag to indicate this is the first time after resume. time
+ could have up to 1 seconds difference.
+ * @clock_base: clock base to calculate the timestamp.
+ * @gyro_ts_shift: 9 K counter for EIS.
+ * @first_sample: first of 1K running should be dropped it affects timing
+ */
+struct inv_timestamp_algo {
+ u64 last_run_time;
+ u64 ts_for_calib;
+ u64 reset_ts;
+ u32 dmp_ticks;
+ u32 start_dmp_counter;
+ int calib_counter;
+ bool resume_flag;
+ enum INV_ENGINE clock_base;
+ u32 gyro_ts_shift;
+ u32 first_sample;
+};
+
+struct inv_mpu_slave;
+/**
+ * struct inv_mpu_state - Driver state variables.
+ * @dev: device address of the current bus, i2c or spi.
+ * @chip_config: Cached attribute information.
+ * @chip_info: Chip information from read-only registers.
+ * @smd: SMD data structure.
+ * @ped: pedometer data structure.
+ * @batch: batchmode data structure.
+ * @temp_comp: gyro temperature compensation structure.
+ * @slave_compass: slave compass.
+ * @slave_pressure: slave pressure.
+ * @slave_als: slave als.
+ * @slv_reg: slave register data structure.
+ * @ts_algo: timestamp algorithm data structure.
+ * @sec_set: slave register odr config.
+ * @eng_info: information for each engine.
+ * @hw: Other hardware-specific information.
+ * @chip_type: chip type.
+ * @suspend_resume_sema: semaphore for suspend/resume.
+ * @wake_lock: wake lock of the system.
+ * @client: i2c client handle.
+ * @plat_data: platform data.
+ * @sl_handle: Handle to I2C port.
+ * @sensor{SENSOR_NUM_MAX]: sensor individual properties.
+ * @sensor_l[SENSOR_L_NUM_MAX]: android L sensors properties.
+ * @sensor_accuracy[SENSOR_ACCURACY_NUM_MAX]: sensor accuracy.
+ * @sensor_acurracy_flag: flag indiciate whether to check output accuracy.
+ * @irq: irq number store.
+ * @accel_bias: accel bias store.
+ * @gyro_bias: gyro bias store.
+ * @accel_st_bias: accel bias store, result of self-test.
+ * @gyro_st_bias: gyro bias store, result of self-test.
+ * @gyro_ois_st_bias: gyro bias store from ois self test result.
+ * @input_accel_dmp_bias[3]: accel bias for dmp.
+ * @input_gyro_dmp_bias[3]: gyro bias for dmp.
+ * @input_compass_dmp_bias[3]: compass bias for dmp.
+ * @input_accel_bias[3]: accel bias for offset register.
+ * @input_gyro_bias[3]: gyro bias for offset register.
+ * @fifo_data[8]: fifo data storage.
+ * @i2c_addr: i2c address.
+ * @header_count: header count in current FIFO.
+ * @step_det_count: number of step detectors in one batch.
+ * @gyro_sf: gyro scale factor.
+ * @left_over[LEFT_OVER_BYTES]: left over bytes storage.
+ * @left_over_size: left over size.
+ * @fifo_count: current fifo_count;
+ * @wake_sensor_received: wake up sensor received.
+ * @accel_cal_enable: accel calibration on/off
+ * @gyro_cal_enable: gyro calibration on/off
+ * @calib_compass_on: calibrate compass on.
+ * @debug_determine_engine_on: determine engine on/off.
+ * @poke_mode_on: poke mode on/off.
+ * @mode_1k_on: indicate 1K Hz mode is on.
+ * @poke_ts: time stamp for poke feature.
+ * @step_detector_base_ts: base time stamp for step detector calculation.
+ * @last_temp_comp_time: last time temperature compensation is done.
+ * @i2c_dis: disable I2C interface or not.
+ * @name: name for the chip.
+ * @gyro_st_data: gyro self test data.
+ * @accel_st_data: accel self test data.
+ * @secondary_name: name for the slave device in the secondary I2C.
+ * @compass_var: compass variance from DMP.
+ * @current_compass_matrix: matrix compass data multiplied to before soft iron.
+ * @final_compass_matrix: matrix compass data multiplied to before soft iron.
+ * @trigger_state: information that which part triggers set_inv_enable.
+ * @firmware: firmware data pointer.
+ * @accel_calib_threshold: accel calibration threshold;
+ * @accel_calib_rate: divider for accel calibration rate.
+ * @accel_covariance[COVARIANCE_SIZE]: accel covariance data;
+ * @kf: kfifo for activity store.
+ * @activity_size: size for activity.
+ * @cntl: control word for sensor enable.
+ * @cntl2: control word for sensor extension.
+ * @motion_event_cntl: control word for events.
+ * @dmp_image_size: dmp image size.
+ * @dmp_start_address: start address of dmp.
+ * @step_counter_l_on: step counter android L sensor on/off.
+ * @step_counter_wake_l_on: step counter android L sensor wake on/off .
+ * @step_detector_l_on: step detector android L sensor on/off.
+ * @step_detector_wake_l_on: step detector android L sensor wake on/off .
+ * @gesture_only_on: indicate it is gesture only.
+ * @mag_divider: mag divider when gyro/accel is faster than mag maximum rate.
+ * @special_mag_mode: for 20690, there is special mag mode need to be handled.
+ * @mag_start_flag: when mag divider is non zero, need to check the start.
+ * @prev_steps: previous steps sent to the user.
+ * @aut_key_in: authentication key input.
+ * @aut_key_out: authentication key output.
+ * @suspend_state: state variable to indicate that we are in suspend state.
+ * @secondary_gyro_on: DMP out signal to turn on gyro.
+ * @secondary_mag_on: DMP out signal to turn on mag.
+ * @secondary_prox_on: DMP out signal to turn on proximity.
+ * @secondary_switch: showing this setup is triggerred by secondary switch.
+ * @send_calib_gyro: flag to indicate to send calibrated gyro.
+ * @send_raw_compass: flag to send raw compass.
+ * @resume_state: flag to synchronize the processing of inv_read_fifo()
+ * @cycle_on: variable indicate accel cycle mode is on.
+ * @secondary_switch_data: secondary switch data for activity.
+ * @raw_gyro_data[6]: save raw gyro data.
+ * @raw_compass_data[3]: save raw compass data.
+ * @wait_queue: wait queue to wake up inv_read_fifo()
+ * @bac_drive_conf: bac drive configuration.
+ * @bac_walk_conf: bac walk configuration.
+ * @bac_smd_conf: bac smd configuration.
+ * @bac_bike_conf: bac bike configuration.
+ * @bac_run_conf: bac run configuration.
+ * @bac_still_conf: back still configuration.
+ * @power_on_data: power on data.
+ * @fifo_data_store: store of FIFO data.
+ * @int_en: store interrupt enable register data.
+ * @int_en2: store interrupt enable register 2 data.
+ * @gesture_int_count: interrupt count for gesture only mode.
+ * @smplrt_div: SMPLRT_DIV register value.
+ */
+struct inv_mpu_state {
+ struct device *dev;
+ int (*write)(struct inv_mpu_state *st, u8 reg, u8 data);
+ int (*read)(struct inv_mpu_state *st, u8 reg, int len, u8 *data);
+ int (*mem_write)(struct inv_mpu_state *st, u8 mpu_addr, u16 mem_addr,
+ u32 len, u8 const *data);
+ int (*mem_read)(struct inv_mpu_state *st, u8 mpu_addr, u16 mem_addr,
+ u32 len, u8 *data);
+ struct inv_chip_config_s chip_config;
+ struct inv_chip_info_s chip_info;
+ struct inv_smd smd;
+ struct inv_ped ped;
+ struct inv_eis eis;
+ struct inv_batch batch;
+ struct inv_temp_comp temp_comp;
+ struct inv_mpu_slave *slave_compass;
+ struct inv_mpu_slave *slave_pressure;
+ struct inv_mpu_slave *slave_als;
+ struct inv_secondary_reg slv_reg[4];
+ struct inv_timestamp_algo ts_algo;
+ struct inv_secondary_set sec_set;
+ struct inv_engine_info eng_info[ENGINE_NUM_MAX];
+ const struct inv_hw_s *hw;
+ enum inv_devices chip_type;
+ enum inv_bus_type bus_type;
+ enum inv_fifo_count_mode fifo_count_mode;
+#ifdef CONFIG_HAS_WAKELOCK
+ struct wake_lock wake_lock;
+#else
+ struct wakeup_source wake_lock;
+#endif
+#ifdef TIMER_BASED_BATCHING
+ struct hrtimer hr_batch_timer;
+ u64 batch_timeout;
+ bool is_batch_timer_running;
+ struct work_struct batch_work;
+#endif
+ struct i2c_client *client;
+ struct mpu_platform_data plat_data;
+ void *sl_handle;
+ struct inv_sensor sensor[SENSOR_NUM_MAX];
+ struct android_l_sensor sensor_l[SENSOR_L_NUM_MAX];
+ struct inv_sensor_accuracy sensor_accuracy[SENSOR_ACCURACY_NUM_MAX];
+ struct inv_ois ois;
+ bool sensor_acurracy_flag[SENSOR_ACCURACY_NUM_MAX];
+ short irq;
+ int accel_bias[3];
+ int gyro_bias[3];
+ int accel_st_bias[3];
+ int accel_ois_st_bias[3];
+ int gyro_st_bias[3];
+ int gyro_ois_st_bias[3];
+ int input_accel_dmp_bias[3];
+ int input_gyro_dmp_bias[3];
+ int input_compass_dmp_bias[3];
+ int input_accel_bias[3];
+ int input_gyro_bias[3];
+ u8 fifo_data[8];
+ u8 i2c_addr;
+ int header_count;
+ int step_det_count;
+ s32 gyro_sf;
+ u8 left_over[LEFT_OVER_BYTES];
+ u32 left_over_size;
+ u32 fifo_count;
+ bool wake_sensor_received;
+ bool accel_cal_enable;
+ bool gyro_cal_enable;
+ bool calib_compass_on;
+ bool debug_determine_engine_on;
+ bool poke_mode_on;
+ bool mode_1k_on;
+ u64 poke_ts;
+ u64 step_detector_base_ts;
+ u64 last_temp_comp_time;
+ u8 i2c_dis;
+ u8 name[20];
+ u8 gyro_st_data[3];
+ u8 accel_st_data[3];
+ u8 secondary_name[20];
+ s32 compass_var;
+ int current_compass_matrix[9];
+ int final_compass_matrix[9];
+ enum TRIGGER_STATE trigger_state;
+ u8 *firmware;
+ int accel_calib_threshold;
+ int accel_calib_rate;
+ u32 accel_covariance[COVARIANCE_SIZE];
+ DECLARE_KFIFO(kf, u8, 128);
+ u32 activity_size;
+ int wom_thld;
+ u16 cntl;
+ u16 cntl2;
+ u16 motion_event_cntl;
+ int dmp_image_size;
+ int dmp_start_address;
+ bool step_counter_l_on;
+ bool step_counter_wake_l_on;
+ bool step_detector_l_on;
+ bool step_detector_wake_l_on;
+ bool gesture_only_on;
+ bool mag_start_flag;
+ int mag_divider;
+ bool special_mag_mode;
+ int prev_steps;
+ u32 curr_steps;
+ int aut_key_in;
+ int aut_key_out;
+ bool secondary_gyro_on;
+ bool secondary_mag_on;
+ bool secondary_prox_on;
+ bool secondary_switch;
+ bool send_calib_gyro;
+ bool send_raw_compass;
+ bool send_raw_gyro;
+ bool resume_state;
+ bool cycle_on;
+ int secondary_switch_data;
+ u8 raw_gyro_data[6];
+ u32 raw_compass_data[3];
+ wait_queue_head_t wait_queue;
+ u32 bac_drive_conf;
+ u32 bac_walk_conf;
+ u32 bac_smd_conf;
+ u32 bac_bike_conf;
+ u32 bac_run_conf;
+ u32 bac_still_conf;
+ u32 power_on_data;
+ u8 fifo_data_store[HARDWARE_FIFO_SIZE + LEFT_OVER_BYTES];
+ u8 int_en;
+ u8 int_en_2;
+ u8 gesture_int_count;
+ u8 smplrt_div;
+};
+
+/**
+ * struct inv_mpu_slave - MPU slave structure.
+ * @st_upper: compass self test upper limit.
+ * @st_lower: compass self test lower limit.
+ * @scale: compass scale.
+ * @rate_scale: decide how fast a compass can read.
+ * @min_read_time: minimum time between each reading.
+ * @self_test: self test method of the slave.
+ * @set_scale: set scale of slave
+ * @get_scale: read scale back of the slave.
+ * @suspend: suspend operation.
+ * @resume: resume operation.
+ * @setup: setup chip. initialization.
+ * @combine_data: combine raw data into meaningful data.
+ * @read_data: read external sensor and output
+ * @get_mode: get current chip mode.
+ * @set_lpf: set low pass filter.
+ * @set_fs: set full scale
+ * @prev_ts: last time it is read.
+ */
+struct inv_mpu_slave {
+ const short *st_upper;
+ const short *st_lower;
+ int scale;
+ int rate_scale;
+ int min_read_time;
+ int (*self_test) (struct inv_mpu_state *);
+ int (*set_scale) (struct inv_mpu_state *, int scale);
+ int (*get_scale) (struct inv_mpu_state *, int *val);
+ int (*suspend) (struct inv_mpu_state *);
+ int (*resume) (struct inv_mpu_state *);
+ int (*setup) (struct inv_mpu_state *);
+ int (*combine_data) (u8 *in, short *out);
+ int (*read_data) (struct inv_mpu_state *, short *out);
+ int (*get_mode) (void);
+ int (*set_lpf) (struct inv_mpu_state *, int rate);
+ int (*set_fs) (struct inv_mpu_state *, int fs);
+ u64 prev_ts;
+};
+
+/* scan element definition */
+enum inv_mpu_scan {
+ INV_MPU_SCAN_TIMESTAMP,
+};
+
+/* IIO attribute address */
+enum MPU_IIO_ATTR_ADDR {
+ ATTR_DMP_GYRO_X_DMP_BIAS,
+ ATTR_DMP_GYRO_Y_DMP_BIAS,
+ ATTR_DMP_GYRO_Z_DMP_BIAS,
+ ATTR_DMP_GYRO_CAL_ENABLE,
+ ATTR_DMP_ACCEL_X_DMP_BIAS,
+ ATTR_DMP_ACCEL_Y_DMP_BIAS,
+ ATTR_DMP_ACCEL_Z_DMP_BIAS,
+ ATTR_DMP_MAGN_X_DMP_BIAS,
+ ATTR_DMP_MAGN_Y_DMP_BIAS,
+ ATTR_DMP_MAGN_Z_DMP_BIAS,
+ ATTR_DMP_MAGN_ACCURACY,
+ ATTR_GYRO_X_OFFSET,
+ ATTR_GYRO_Y_OFFSET,
+ ATTR_GYRO_Z_OFFSET,
+ ATTR_ACCEL_X_OFFSET,
+ ATTR_ACCEL_Y_OFFSET,
+ ATTR_ACCEL_Z_OFFSET,
+ ATTR_DMP_SC_AUTH,
+ ATTR_DMP_EIS_AUTH,
+ ATTR_DMP_ACCEL_CAL_ENABLE,
+ ATTR_DMP_PED_INT_ON,
+ ATTR_DMP_PED_STEP_THRESH,
+ ATTR_DMP_PED_INT_THRESH,
+ ATTR_DMP_PED_ON,
+ ATTR_DMP_SMD_ENABLE,
+ ATTR_DMP_TILT_ENABLE,
+ ATTR_DMP_PICK_UP_ENABLE,
+ ATTR_DMP_EIS_ENABLE,
+ ATTR_DMP_PEDOMETER_STEPS,
+ ATTR_DMP_PEDOMETER_TIME,
+ ATTR_DMP_PEDOMETER_COUNTER,
+ ATTR_DMP_LOW_POWER_GYRO_ON,
+ ATTR_DMP_LP_EN_OFF,
+ ATTR_DMP_CLK_SEL,
+ ATTR_DMP_DEBUG_MEM_READ,
+ ATTR_DMP_DEBUG_MEM_WRITE,
+ ATTR_DEBUG_REG_WRITE,
+ ATTR_DEBUG_WRITE_CFG,
+ ATTR_DEBUG_REG_ADDR,
+ ATTR_WOM_THLD,
+ /* *****above this line, are DMP features, power needs on/off */
+ /* *****below this line, are DMP features, no power needed */
+ ATTR_IN_POWER_ON,
+ ATTR_DMP_ON,
+ ATTR_DMP_EVENT_INT_ON,
+ ATTR_DMP_STEP_COUNTER_ON,
+ ATTR_DMP_STEP_COUNTER_WAKE_ON,
+ ATTR_DMP_BATCHMODE_TIMEOUT,
+ ATTR_DMP_BATCHMODE_WAKE_FIFO_FULL,
+ ATTR_DMP_STEP_DETECTOR_ON,
+ ATTR_DMP_STEP_DETECTOR_WAKE_ON,
+ ATTR_DMP_ACTIVITY_ON,
+ ATTR_DMP_IN_ANGLVEL_ACCURACY_ENABLE,
+ ATTR_DMP_IN_ACCEL_ACCURACY_ENABLE,
+ ATTR_DMP_DEBUG_DETERMINE_ENGINE_ON,
+ ATTR_DMP_MISC_GYRO_RECALIBRATION,
+ ATTR_DMP_MISC_ACCEL_RECALIBRATION,
+ ATTR_DMP_PARAMS_ACCEL_CALIBRATION_THRESHOLD,
+ ATTR_DMP_PARAMS_ACCEL_CALIBRATION_RATE,
+ ATTR_GYRO_SCALE,
+ ATTR_ACCEL_SCALE,
+ ATTR_COMPASS_SCALE,
+ ATTR_COMPASS_SENSITIVITY_X,
+ ATTR_COMPASS_SENSITIVITY_Y,
+ ATTR_COMPASS_SENSITIVITY_Z,
+ ATTR_GYRO_ENABLE,
+ ATTR_ACCEL_ENABLE,
+ ATTR_COMPASS_ENABLE,
+ ATTR_FIRMWARE_LOADED,
+ ATTR_POKE_MODE,
+ ATTR_ANGLVEL_X_CALIBBIAS,
+ ATTR_ANGLVEL_Y_CALIBBIAS,
+ ATTR_ANGLVEL_Z_CALIBBIAS,
+ ATTR_ACCEL_X_CALIBBIAS,
+ ATTR_ACCEL_Y_CALIBBIAS,
+ ATTR_ACCEL_Z_CALIBBIAS,
+ ATTR_ANGLVEL_X_ST_CALIBBIAS,
+ ATTR_ANGLVEL_Y_ST_CALIBBIAS,
+ ATTR_ANGLVEL_Z_ST_CALIBBIAS,
+ ATTR_ANGLVEL_X_OIS_ST_CALIBBIAS,
+ ATTR_ANGLVEL_Y_OIS_ST_CALIBBIAS,
+ ATTR_ANGLVEL_Z_OIS_ST_CALIBBIAS,
+ ATTR_ACCEL_X_ST_CALIBBIAS,
+ ATTR_ACCEL_Y_ST_CALIBBIAS,
+ ATTR_ACCEL_Z_ST_CALIBBIAS,
+ ATTR_ACCEL_X_OIS_ST_CALIBBIAS,
+ ATTR_ACCEL_Y_OIS_ST_CALIBBIAS,
+ ATTR_ACCEL_Z_OIS_ST_CALIBBIAS,
+ ATTR_GYRO_MATRIX,
+ ATTR_ACCEL_MATRIX,
+ ATTR_COMPASS_MATRIX,
+ ATTR_FSYNC_FRAME_COUNT,
+ ATTR_SECONDARY_NAME,
+ ATTR_GYRO_SF,
+ ATTR_BAC_DRIVE_CONFIDENCE,
+ ATTR_BAC_WALK_CONFIDENCE,
+ ATTR_BAC_SMD_CONFIDENCE,
+ ATTR_BAC_BIKE_CONFIDENCE,
+ ATTR_BAC_STILL_CONFIDENCE,
+ ATTR_BAC_RUN_CONFIDENCE,
+ IN_OIS_ACCEL_FS,
+ IN_OIS_GYRO_FS,
+ IN_OIS_ENABLE,
+};
+
+int inv_mpu_configure_ring(struct iio_dev *indio_dev);
+int inv_mpu_probe_trigger(struct iio_dev *indio_dev);
+void inv_mpu_unconfigure_ring(struct iio_dev *indio_dev);
+void inv_mpu_remove_trigger(struct iio_dev *indio_dev);
+#ifdef CONFIG_PM_SLEEP
+int inv_mpu_suspend(struct iio_dev *indio_dev);
+void inv_mpu_complete(struct iio_dev *indio_dev);
+#endif
+
+int inv_get_pedometer_steps(struct inv_mpu_state *st, int *ped);
+int inv_get_pedometer_time(struct inv_mpu_state *st, int *ped);
+int inv_read_pedometer_counter(struct inv_mpu_state *st);
+
+int inv_dmp_read(struct inv_mpu_state *st, int off, int size, u8 *buf);
+int inv_firmware_load(struct inv_mpu_state *st);
+
+int set_inv_enable(struct iio_dev *indio_dev);
+
+int inv_mpu_setup_compass_slave(struct inv_mpu_state *st);
+int inv_mpu_setup_pressure_slave(struct inv_mpu_state *st);
+int inv_mpu_setup_als_slave(struct inv_mpu_state *st);
+int inv_mpu_initialize(struct inv_mpu_state *st);
+int inv_set_accel_sf(struct inv_mpu_state *st);
+int inv_set_gyro_sf(struct inv_mpu_state *st);
+s64 get_time_ns(void);
+int inv_i2c_read_base(struct inv_mpu_state *st, u16 i, u8 r, u16 l, u8 *d);
+int inv_i2c_single_write_base(struct inv_mpu_state *st, u16 i, u8 r, u8 d);
+int write_be32_to_mem(struct inv_mpu_state *st, u32 data, int addr);
+int write_be16_to_mem(struct inv_mpu_state *st, u16 data, int addr);
+int read_be32_from_mem(struct inv_mpu_state *st, u32 *o, int addr);
+int read_be16_from_mem(struct inv_mpu_state *st, u16 *o, int addr);
+u32 inv_get_cntr_diff(u32 curr_counter, u32 prev);
+int inv_write_2bytes(struct inv_mpu_state *st, int k, int data);
+int inv_set_bank(struct inv_mpu_state *st, u8 bank);
+int inv_set_power(struct inv_mpu_state *st, bool power_on);
+int inv_switch_power_in_lp(struct inv_mpu_state *st, bool on);
+#ifndef CONFIG_INV_MPU_IIO_ICM20608D
+int inv_set_accel_config2(struct inv_mpu_state *st, bool cycle_mode);
+#endif
+int inv_stop_dmp(struct inv_mpu_state *st);
+int inv_reset_fifo(struct inv_mpu_state *st, bool turn_off);
+int inv_create_dmp_sysfs(struct iio_dev *ind);
+int inv_check_chip_type(struct iio_dev *indio_dev, const char *name);
+int inv_write_compass_matrix(struct inv_mpu_state *st, int *adj);
+irqreturn_t inv_read_fifo(int irq, void *dev_id);
+#ifdef TIMER_BASED_BATCHING
+void inv_batch_work(struct work_struct *work);
+#endif
+int inv_flush_batch_data(struct iio_dev *indio_dev, int data);
+static inline int mpu_memory_write(struct inv_mpu_state *st, u8 mpu_addr,
+ u16 mem_addr, u32 len, u8 const *data)
+{
+ int ret = -1;
+
+ if (st->mem_write)
+ ret = st->mem_write(st, mpu_addr, mem_addr, len, data);
+
+ return ret;
+}
+static inline int mpu_memory_read(struct inv_mpu_state *st, u8 mpu_addr,
+ u16 mem_addr, u32 len, u8 *data)
+{
+ int ret = -1;
+
+ if (st->mem_read)
+ ret = st->mem_read(st, mpu_addr, mem_addr, len, data);
+
+ return ret;
+}
+int inv_read_secondary(struct inv_mpu_state *st, int ind, int addr,
+ int reg, int len);
+int inv_write_secondary(struct inv_mpu_state *st, int ind, int addr,
+ int reg, int v);
+int inv_execute_write_secondary(struct inv_mpu_state *st, int ind, int addr,
+ int reg, int v);
+int inv_execute_read_secondary(struct inv_mpu_state *st, int ind, int addr,
+ int reg, int len, u8 *d);
+
+int inv_push_16bytes_buffer(struct inv_mpu_state *st, u16 hdr,
+ u64 t, int *q, s16 accur);
+int inv_push_gyro_data(struct inv_mpu_state *st, s16 *raw, s32 *calib, u64 t);
+int inv_push_8bytes_buffer(struct inv_mpu_state *st, u16 hdr, u64 t, s16 *d);
+int inv_push_8bytes_kf(struct inv_mpu_state *st, u16 hdr, u64 t, s16 *d);
+
+void inv_push_step_indicator(struct inv_mpu_state *st, u64 t);
+int inv_send_steps(struct inv_mpu_state *st, int step, u64 t);
+int inv_push_marker_to_buffer(struct inv_mpu_state *st, u16 hdr, int data);
+
+int inv_check_sensor_on(struct inv_mpu_state *st);
+int inv_write_cntl(struct inv_mpu_state *st, u16 wd, bool en, int cntl);
+
+int inv_get_packet_size(struct inv_mpu_state *st, u16 hdr,
+ u32 *pk_size, u8 *dptr);
+int inv_parse_packet(struct inv_mpu_state *st, u16 hdr, u8 *dptr);
+int inv_pre_parse_packet(struct inv_mpu_state *st, u16 hdr, u8 *dptr);
+int inv_process_dmp_data(struct inv_mpu_state *st);
+
+int be32_to_int(u8 *d);
+void inv_convert_and_push_16bytes(struct inv_mpu_state *st, u16 hdr,
+ u8 *d, u64 t, s8 *m);
+void inv_convert_and_push_8bytes(struct inv_mpu_state *st, u16 hdr,
+ u8 *d, u64 t, s8 *m);
+int inv_get_dmp_ts(struct inv_mpu_state *st, int i);
+int inv_process_step_det(struct inv_mpu_state *st, u8 *dptr);
+int inv_process_eis(struct inv_mpu_state *st, u16 delay);
+int inv_rate_convert(struct inv_mpu_state *st, int ind, int data);
+
+int inv_setup_dmp_firmware(struct inv_mpu_state *st);
+/* used to print i2c data using pr_debug */
+char *wr_pr_debug_begin(u8 const *data, u32 len, char *string);
+char *wr_pr_debug_end(char *string);
+
+int inv_hw_self_test(struct inv_mpu_state *st);
+int inv_q30_mult(int a, int b);
+#ifdef ACCEL_BIAS_TEST
+int inv_get_3axis_average(s16 src[], s16 dst[], s16 reset);
+#endif
+
+static inline int inv_plat_single_write(struct inv_mpu_state *st,
+ u8 reg, u8 data)
+{
+ int ret = -1;
+
+ if (st->write)
+ ret = st->write(st, reg, data);
+
+ return ret;
+}
+static inline int inv_plat_read(struct inv_mpu_state *st, u8 reg,
+ int len, u8 *data)
+{
+ int ret = -1;
+
+ if (st->read)
+ ret = st->read(st, reg, len, data);
+
+ return ret;
+}
+irqreturn_t inv_read_fifo(int , void *);
+
+int inv_stop_interrupt(struct inv_mpu_state *st);
+int inv_reenable_interrupt(struct inv_mpu_state *st);
+
+int inv_enable_pedometer_interrupt(struct inv_mpu_state *st, bool en);
+int inv_dataout_control1(struct inv_mpu_state *st, u16 cntl1);
+int inv_dataout_control2(struct inv_mpu_state *st, u16 cntl2);
+int inv_motion_interrupt_control(struct inv_mpu_state *st,
+ u16 motion_event_cntl);
+
+int inv_bound_timestamp(struct inv_mpu_state *st);
+int inv_update_dmp_ts(struct inv_mpu_state *st, int ind);
+int inv_get_last_run_time_non_dmp_record_mode(struct inv_mpu_state *st);
+
+#define mem_w(a, b, c) mpu_memory_write(st, st->i2c_addr, a, b, c)
+#define mem_r(a, b, c) mpu_memory_read(st, st->i2c_addr, a, b, c)
+
+#endif /* #ifndef _INV_MPU_IIO_H_ */
diff --git a/drivers/iio/imu/inv_mpu/inv_mpu_ring.c b/drivers/iio/imu/inv_mpu/inv_mpu_ring.c
new file mode 100644
index 000000000000..3e5bccbea0df
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu/inv_mpu_ring.c
@@ -0,0 +1,643 @@
+/*
+* Copyright (C) 2012-2018 InvenSense, Inc.
+*
+* This software is licensed under the terms of the GNU General Public
+* License version 2, as published by the Free Software Foundation, and
+* may be copied, distributed, and modified under those terms.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+#define pr_fmt(fmt) "inv_mpu: " fmt
+
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/sysfs.h>
+#include <linux/jiffies.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/kfifo.h>
+#include <linux/poll.h>
+#include <linux/math64.h>
+#include <linux/miscdevice.h>
+
+#include "inv_mpu_iio.h"
+
+static void inv_push_timestamp(struct iio_dev *indio_dev, u64 t)
+{
+ u8 buf[IIO_BUFFER_BYTES];
+ struct inv_mpu_state *st;
+
+ st = iio_priv(indio_dev);
+ if (st->poke_mode_on)
+ memcpy(buf, &st->poke_ts, sizeof(t));
+ else
+ memcpy(buf, &t, sizeof(t));
+ iio_push_to_buffers(indio_dev, buf);
+}
+
+int inv_push_marker_to_buffer(struct inv_mpu_state *st, u16 hdr, int data)
+{
+ struct iio_dev *indio_dev = iio_priv_to_dev(st);
+ u8 buf[IIO_BUFFER_BYTES];
+
+ memcpy(buf, &hdr, sizeof(hdr));
+ memcpy(&buf[4], &data, sizeof(data));
+ iio_push_to_buffers(indio_dev, buf);
+
+ return 0;
+}
+static int inv_calc_precision(struct inv_mpu_state *st)
+{
+ int diff;
+ int init;
+
+ if (st->eis.voting_state != 8)
+ return 0;
+ diff = abs(st->eis.fsync_delay_s[1] - st->eis.fsync_delay_s[0]);
+ init = 0;
+ if (diff)
+ init = st->sensor[SENSOR_GYRO].dur / diff;
+
+ if (abs(init - NSEC_PER_USEC) < (NSEC_PER_USEC >> 3))
+ st->eis.count_precision = init;
+ else
+ st->eis.voting_state = 0;
+
+ pr_debug("dur= %d prc= %d\n", st->sensor[SENSOR_GYRO].dur,
+ st->eis.count_precision);
+
+ return 0;
+}
+
+static s64 calc_frame_ave(struct inv_mpu_state *st, int delay)
+{
+ s64 ts;
+
+ ts = st->eis.current_timestamp - delay;
+#if defined(CONFIG_INV_MPU_IIO_ICM20648) | defined(CONFIG_INV_MPU_IIO_ICM20690)
+ ts -= st->ts_algo.gyro_ts_shift;
+#endif
+ pr_debug("shift= %d ts = %lld\n", st->ts_algo.gyro_ts_shift, ts);
+
+ return ts;
+}
+
+static void inv_push_eis_ring(struct inv_mpu_state *st, int *q, bool sync,
+ s64 t)
+{
+ struct iio_dev *indio_dev = iio_priv_to_dev(st);
+ struct inv_eis *eis = &st->eis;
+ u8 buf[IIO_BUFFER_BYTES];
+ int tmp, ii;
+
+ buf[0] = (EIS_GYRO_HDR & 0xff);
+ buf[1] = (EIS_GYRO_HDR >> 8);
+ memcpy(buf + 4, &q[0], sizeof(q[0]));
+ iio_push_to_buffers(indio_dev, buf);
+ for (ii = 0; ii < 2; ii++)
+ memcpy(buf + 4 * ii, &q[ii + 1], sizeof(q[ii]));
+ iio_push_to_buffers(indio_dev, buf);
+ tmp = eis->frame_count;
+ if (sync)
+ tmp |= 0x80000000;
+ memcpy(buf, &tmp, sizeof(tmp));
+ iio_push_to_buffers(indio_dev, buf);
+ inv_push_timestamp(indio_dev, t);
+}
+static int inv_do_interpolation_gyro(struct inv_mpu_state *st, int *prev,
+ s64 prev_t, int *curr, s64 curr_t, s64 t, bool trigger)
+{
+ int i;
+ int out[3];
+#if defined(CONFIG_INV_MPU_IIO_ICM20648) | defined(CONFIG_INV_MPU_IIO_ICM20690)
+ prev_t -= st->ts_algo.gyro_ts_shift;
+ prev_t += MPU_4X_TS_GYRO_SHIFT;
+ curr_t -= st->ts_algo.gyro_ts_shift;
+ curr_t += MPU_4X_TS_GYRO_SHIFT;
+#endif
+ if ((t > prev_t) && (t < curr_t)) {
+ for (i = 0; i < 3; i++)
+ out[i] = (int)div_s64((s64)(curr[i] - prev[i]) *
+ (s64)(t - prev_t), curr_t - prev_t) + prev[i];
+ } else if (t < prev_t) {
+ for (i = 0; i < 3; i++)
+ out[i] = prev[i];
+ } else {
+ for (i = 0; i < 3; i++)
+ out[i] = curr[i];
+ }
+ pr_debug("prev= %lld t = %lld curr= %lld\n", prev_t, t, curr_t);
+ pr_debug("prev = %d, %d, %d\n", prev[0], prev[1], prev[2]);
+ pr_debug("curr = %d, %d, %d\n", curr[0], curr[1], curr[2]);
+ pr_debug("out = %d, %d, %d\n", out[0], out[1], out[2]);
+ inv_push_eis_ring(st, out, trigger, t);
+
+ return 0;
+}
+#if defined(CONFIG_INV_MPU_IIO_ICM20648) | defined(CONFIG_INV_MPU_IIO_ICM20690)
+static void inv_handle_triggered_eis(struct inv_mpu_state *st)
+{
+ struct inv_eis *eis = &st->eis;
+ int delay;
+
+ if (st->eis.eis_frame) {
+ inv_calc_precision(st);
+ delay = ((int)st->eis.fsync_delay) * st->eis.count_precision;
+ eis->fsync_timestamp = calc_frame_ave(st, delay);
+ inv_do_interpolation_gyro(st,
+ st->eis.prev_gyro, st->eis.prev_timestamp,
+ st->eis.current_gyro, st->eis.current_timestamp,
+ eis->fsync_timestamp, true);
+ pr_debug("fsync=%lld, curr=%lld, delay=%d\n",
+ eis->fsync_timestamp, eis->current_timestamp, delay);
+ inv_push_eis_ring(st, st->eis.current_gyro, false,
+ st->eis.current_timestamp - st->ts_algo.gyro_ts_shift
+ + MPU_4X_TS_GYRO_SHIFT);
+ eis->last_fsync_timestamp = eis->fsync_timestamp;
+ } else {
+ pr_debug("cur= %lld\n", st->eis.current_timestamp);
+ inv_push_eis_ring(st, st->eis.current_gyro, false,
+ st->eis.current_timestamp - st->ts_algo.gyro_ts_shift
+ + MPU_4X_TS_GYRO_SHIFT);
+ }
+}
+#else
+static void inv_handle_triggered_eis(struct inv_mpu_state *st)
+{
+ struct inv_eis *eis = &st->eis;
+ int delay;
+
+ if ((st->eis.eis_frame && (st->eis.fsync_delay != 5)) ||
+ (st->eis.eis_frame && (st->eis.fsync_delay == 5) &&
+ (!st->eis.current_sync))
+ ) {
+ inv_calc_precision(st);
+ delay = ((int)st->eis.fsync_delay) * st->eis.count_precision;
+ eis->fsync_timestamp = calc_frame_ave(st, delay);
+ inv_do_interpolation_gyro(st,
+ st->eis.prev_gyro, st->eis.prev_timestamp,
+ st->eis.current_gyro, st->eis.current_timestamp,
+ eis->fsync_timestamp, true);
+ pr_debug("fsync=%lld, curr=%lld, delay=%d\n",
+ eis->fsync_timestamp, eis->current_timestamp, delay);
+ inv_push_eis_ring(st, st->eis.current_gyro, false,
+ st->eis.current_timestamp);
+ eis->last_fsync_timestamp = eis->fsync_timestamp;
+ st->eis.eis_frame = false;
+ } else {
+ st->eis.current_sync = false;
+ pr_debug("cur= %lld\n", st->eis.current_timestamp);
+ inv_push_eis_ring(st, st->eis.current_gyro, false,
+ st->eis.current_timestamp);
+ }
+}
+#endif
+static void inv_push_eis_buffer(struct inv_mpu_state *st, u64 t, int *q)
+{
+ int ii;
+
+ if (st->eis.eis_triggered) {
+ for (ii = 0; ii < 3; ii++)
+ st->eis.prev_gyro[ii] = st->eis.current_gyro[ii];
+ st->eis.prev_timestamp = st->eis.current_timestamp;
+
+ for (ii = 0; ii < 3; ii++)
+ st->eis.current_gyro[ii] = q[ii];
+ st->eis.current_timestamp = t;
+ inv_handle_triggered_eis(st);
+ } else {
+ for (ii = 0; ii < 3; ii++)
+ st->eis.current_gyro[ii] = q[ii];
+ st->eis.current_timestamp = t;
+ }
+}
+static int inv_push_16bytes_final(struct inv_mpu_state *st, int j,
+ s32 *q, u64 t, s16 accur)
+{
+ struct iio_dev *indio_dev = iio_priv_to_dev(st);
+ u8 buf[IIO_BUFFER_BYTES];
+ int ii;
+
+ memcpy(buf, &st->sensor_l[j].header, sizeof(st->sensor_l[j].header));
+ memcpy(buf + 2, &accur, sizeof(accur));
+ memcpy(buf + 4, &q[0], sizeof(q[0]));
+ iio_push_to_buffers(indio_dev, buf);
+ for (ii = 0; ii < 2; ii++)
+ memcpy(buf + 4 * ii, &q[ii + 1], sizeof(q[ii]));
+ iio_push_to_buffers(indio_dev, buf);
+ inv_push_timestamp(indio_dev, t);
+ st->sensor_l[j].counter = 0;
+ if (st->sensor_l[j].wake_on)
+ st->wake_sensor_received = true;
+
+ return 0;
+}
+int inv_push_16bytes_buffer(struct inv_mpu_state *st, u16 sensor,
+ u64 t, int *q, s16 accur)
+{
+ int j;
+
+ for (j = 0; j < SENSOR_L_NUM_MAX; j++) {
+ if (st->sensor_l[j].on && (st->sensor_l[j].base == sensor)) {
+ st->sensor_l[j].counter++;
+ if ((st->sensor_l[j].div != 0xffff) &&
+ (st->sensor_l[j].counter >=
+ st->sensor_l[j].div)) {
+ pr_debug(
+ "Sensor_l = %d sensor = %d header [%04X] div [%d] ts [%lld] %d %d %d\n",
+ j, sensor,
+ st->sensor_l[j].header,
+ st->sensor_l[j].div,
+ t, q[0], q[1], q[2]);
+ inv_push_16bytes_final(st, j, q, t, accur);
+ }
+ }
+ }
+ return 0;
+}
+
+void inv_convert_and_push_16bytes(struct inv_mpu_state *st, u16 hdr,
+ u8 *d, u64 t, s8 *m)
+{
+ int i, j;
+ s32 in[3], out[3];
+
+ for (i = 0; i < 3; i++)
+ in[i] = be32_to_int(d + i * 4);
+ /* multiply with orientation matrix can be optimized like this */
+ for (i = 0; i < 3; i++)
+ for (j = 0; j < 3; j++)
+ if (m[i * 3 + j])
+ out[i] = in[j] * m[i * 3 + j];
+
+ inv_push_16bytes_buffer(st, hdr, t, out, 0);
+}
+
+void inv_convert_and_push_8bytes(struct inv_mpu_state *st, u16 hdr,
+ u8 *d, u64 t, s8 *m)
+{
+ int i, j;
+ s16 in[3], out[3];
+
+ for (i = 0; i < 3; i++)
+ in[i] = be16_to_cpup((__be16 *) (d + i * 2));
+
+ /* multiply with orientation matrix can be optimized like this */
+ for (i = 0; i < 3; i++)
+ for (j = 0; j < 3; j++)
+ if (m[i * 3 + j])
+ out[i] = in[j] * m[i * 3 + j];
+
+ inv_push_8bytes_buffer(st, hdr, t, out);
+}
+
+int inv_push_special_8bytes_buffer(struct inv_mpu_state *st,
+ u16 hdr, u64 t, s16 *d)
+{
+ struct iio_dev *indio_dev = iio_priv_to_dev(st);
+ u8 buf[IIO_BUFFER_BYTES];
+ int j;
+
+ memcpy(buf, &hdr, sizeof(hdr));
+ memcpy(&buf[2], &d[0], sizeof(d[0]));
+ for (j = 0; j < 2; j++)
+ memcpy(&buf[4 + j * 2], &d[j + 1], sizeof(d[j]));
+ iio_push_to_buffers(indio_dev, buf);
+ inv_push_timestamp(indio_dev, t);
+
+ return 0;
+}
+
+static int inv_s16_gyro_push(struct inv_mpu_state *st, int i, s16 *raw, u64 t)
+{
+ if (st->sensor_l[i].on) {
+ st->sensor_l[i].counter++;
+ if ((st->sensor_l[i].div != 0xffff) &&
+ (st->sensor_l[i].counter >= st->sensor_l[i].div)) {
+ inv_push_special_8bytes_buffer(st,
+ st->sensor_l[i].header, t, raw);
+ st->sensor_l[i].counter = 0;
+ if (st->sensor_l[i].wake_on)
+ st->wake_sensor_received = true;
+ }
+ }
+
+ return 0;
+}
+
+static int inv_s32_gyro_push(struct inv_mpu_state *st, int i, s32 *calib, u64 t)
+{
+ if (st->sensor_l[i].on) {
+ st->sensor_l[i].counter++;
+ if ((st->sensor_l[i].div != 0xffff) &&
+ (st->sensor_l[i].counter >= st->sensor_l[i].div)) {
+ inv_push_16bytes_final(st, i, calib, t, 0);
+ st->sensor_l[i].counter = 0;
+ if (st->sensor_l[i].wake_on)
+ st->wake_sensor_received = true;
+ }
+ }
+
+ return 0;
+}
+
+int inv_push_gyro_data(struct inv_mpu_state *st, s16 *raw, s32 *calib, u64 t)
+{
+ int gyro_data[] = {SENSOR_L_GYRO, SENSOR_L_GYRO_WAKE};
+ int calib_data[] = {SENSOR_L_GYRO_CAL, SENSOR_L_GYRO_CAL_WAKE};
+ int i;
+
+ if (st->sensor_l[SENSOR_L_EIS_GYRO].on)
+ inv_push_eis_buffer(st, t, calib);
+
+ for (i = 0; i < 2; i++)
+ inv_s16_gyro_push(st, gyro_data[i], raw, t);
+ for (i = 0; i < 2; i++)
+ inv_s32_gyro_push(st, calib_data[i], calib, t);
+
+ return 0;
+}
+int inv_push_8bytes_buffer(struct inv_mpu_state *st, u16 sensor, u64 t, s16 *d)
+{
+ struct iio_dev *indio_dev = iio_priv_to_dev(st);
+ u8 buf[IIO_BUFFER_BYTES];
+ int ii, j;
+
+ if ((sensor == STEP_DETECTOR_HDR) ||
+ (sensor == STEP_DETECTOR_WAKE_HDR)) {
+ memcpy(buf, &sensor, sizeof(sensor));
+ memcpy(&buf[2], &d[0], sizeof(d[0]));
+ for (j = 0; j < 2; j++)
+ memcpy(&buf[4 + j * 2], &d[j + 1], sizeof(d[j]));
+ iio_push_to_buffers(indio_dev, buf);
+ inv_push_timestamp(indio_dev, t);
+ if (sensor == STEP_DETECTOR_WAKE_HDR)
+ st->wake_sensor_received = true;
+ return 0;
+ }
+ for (ii = 0; ii < SENSOR_L_NUM_MAX; ii++) {
+ if (st->sensor_l[ii].on &&
+ (st->sensor_l[ii].base == sensor) &&
+ (st->sensor_l[ii].div != 0xffff)) {
+ st->sensor_l[ii].counter++;
+ if (st->sensor_l[ii].counter >= st->sensor_l[ii].div) {
+ pr_debug(
+ "Sensor_l = %d sensor = %d header [%04X] div [%d] ts [%lld] %d %d %d\n",
+ ii, sensor, st->sensor_l[ii].header,
+ st->sensor_l[ii].div, t, d[0], d[1], d[2]);
+
+ memcpy(buf, &st->sensor_l[ii].header,
+ sizeof(st->sensor_l[ii].header));
+ memcpy(&buf[2], &d[0], sizeof(d[0]));
+ for (j = 0; j < 2; j++)
+ memcpy(&buf[4 + j * 2], &d[j + 1],
+ sizeof(d[j]));
+
+ iio_push_to_buffers(indio_dev, buf);
+ inv_push_timestamp(indio_dev, t);
+ st->sensor_l[ii].counter = 0;
+ if (st->sensor_l[ii].wake_on)
+ st->wake_sensor_received = true;
+ }
+ }
+ }
+
+ return 0;
+}
+#ifdef CONFIG_INV_MPU_IIO_ICM20648
+/* Implemented activity to string function for BAC test */
+#define TILT_DETECTED 0x1000
+#define NONE 0x00
+#define DRIVE 0x01
+#define WALK 0x02
+#define RUN 0x04
+#define BIKE 0x08
+#define TILT 0x10
+#define STILL 0x20
+#define DRIVE_WALK (DRIVE | WALK)
+#define DRIVE_RUN (DRIVE | RUN)
+
+char *act_string(s16 data)
+{
+ data &= (~TILT);
+ switch (data) {
+ case NONE:
+ return "None";
+ case DRIVE:
+ return "Drive";
+ case WALK:
+ return "Walk";
+ case RUN:
+ return "Run";
+ case BIKE:
+ return "Bike";
+ case STILL:
+ return "Still";
+ case DRIVE_WALK:
+ return "drive and walk";
+ case DRIVE_RUN:
+ return "drive and run";
+ default:
+ return "Unknown";
+ }
+ return "Unknown";
+}
+
+char *inv_tilt_check(s16 data)
+{
+ if (data & TILT)
+ return "Tilt";
+ else
+ return "None";
+}
+
+int inv_push_8bytes_kf(struct inv_mpu_state *st, u16 hdr, u64 t, s16 *d)
+{
+ struct iio_dev *indio_dev = iio_priv_to_dev(st);
+ u8 buf[IIO_BUFFER_BYTES];
+ int i;
+
+ if (st->chip_config.activity_on) {
+ memcpy(buf, &hdr, sizeof(hdr));
+ for (i = 0; i < 3; i++)
+ memcpy(&buf[2 + i * 2], &d[i], sizeof(d[i]));
+
+ kfifo_in(&st->kf, buf, IIO_BUFFER_BYTES);
+ memcpy(buf, &t, sizeof(t));
+ kfifo_in(&st->kf, buf, IIO_BUFFER_BYTES);
+ st->activity_size += IIO_BUFFER_BYTES * 2;
+ }
+ if (st->chip_config.tilt_enable) {
+ pr_debug("d[0] = %04X, [%X : %s] to [%X : %s]",
+ d[0], d[0] & 0x00FF,
+ inv_tilt_check(d[0] & 0x00FF),
+ (d[0] & 0xFF00) >> 8, inv_tilt_check((d[0] & 0xFF00) >> 8));
+ sysfs_notify(&indio_dev->dev.kobj, NULL, "poll_tilt");
+ }
+
+ pr_debug("d[0] = %04X, [%X : %s] to [%X : %s]", d[0], d[0] & 0x00FF,
+ act_string(d[0] & 0x00FF),
+ (d[0] & 0xFF00) >> 8, act_string((d[0] & 0xFF00) >> 8));
+
+ read_be32_from_mem(st, &st->bac_drive_conf, BAC_DRIVE_CONFIDENCE);
+ read_be32_from_mem(st, &st->bac_walk_conf, BAC_WALK_CONFIDENCE);
+ read_be32_from_mem(st, &st->bac_smd_conf, BAC_SMD_CONFIDENCE);
+ read_be32_from_mem(st, &st->bac_bike_conf, BAC_BIKE_CONFIDENCE);
+ read_be32_from_mem(st, &st->bac_still_conf, BAC_STILL_CONFIDENCE);
+ read_be32_from_mem(st, &st->bac_run_conf, BAC_RUN_CONFIDENCE);
+
+ return 0;
+}
+#endif
+
+int inv_send_steps(struct inv_mpu_state *st, int step, u64 ts)
+{
+ s16 s[3];
+
+ s[0] = 0;
+ s[1] = (s16) (step & 0xffff);
+ s[2] = (s16) ((step >> 16) & 0xffff);
+ if (st->step_counter_l_on)
+ inv_push_special_8bytes_buffer(st, STEP_COUNTER_HDR, ts, s);
+ if (st->step_counter_wake_l_on) {
+ inv_push_special_8bytes_buffer(st, STEP_COUNTER_WAKE_HDR,
+ ts, s);
+ st->wake_sensor_received = true;
+ }
+ return 0;
+}
+
+void inv_push_step_indicator(struct inv_mpu_state *st, u64 t)
+{
+ s16 sen[3];
+#define STEP_INDICATOR_HEADER 0x0001
+
+ sen[0] = 0;
+ sen[1] = 0;
+ sen[2] = 0;
+ inv_push_8bytes_buffer(st, STEP_INDICATOR_HEADER, t, sen);
+}
+
+/*
+ * inv_irq_handler() - Cache a timestamp at each data ready interrupt.
+ */
+static irqreturn_t inv_irq_handler(int irq, void *dev_id)
+{
+ return IRQ_WAKE_THREAD;
+}
+
+#ifdef TIMER_BASED_BATCHING
+static enum hrtimer_restart inv_batch_timer_handler(struct hrtimer *timer)
+{
+ struct inv_mpu_state *st =
+ container_of(timer, struct inv_mpu_state, hr_batch_timer);
+
+ if (st->chip_config.gyro_enable || st->chip_config.accel_enable) {
+ hrtimer_forward_now(&st->hr_batch_timer,
+ ns_to_ktime(st->batch_timeout));
+ schedule_work(&st->batch_work);
+ return HRTIMER_RESTART;
+ }
+ st->is_batch_timer_running = 0;
+ return HRTIMER_NORESTART;
+}
+#endif
+
+void inv_mpu_unconfigure_ring(struct iio_dev *indio_dev)
+{
+ struct inv_mpu_state *st = iio_priv(indio_dev);
+#ifdef KERNEL_VERSION_4_X
+ devm_free_irq(st->dev, st->irq, st);
+ devm_iio_kfifo_free(st->dev, indio_dev->buffer);
+#else
+ free_irq(st->irq, st);
+ iio_kfifo_free(indio_dev->buffer);
+#endif
+};
+EXPORT_SYMBOL_GPL(inv_mpu_unconfigure_ring);
+
+#ifndef KERNEL_VERSION_4_X
+static int inv_predisable(struct iio_dev *indio_dev)
+{
+ return 0;
+}
+
+static int inv_preenable(struct iio_dev *indio_dev)
+{
+ return 0;
+}
+
+static const struct iio_buffer_setup_ops inv_mpu_ring_setup_ops = {
+ .preenable = &inv_preenable,
+ .predisable = &inv_predisable,
+};
+#endif
+
+int inv_mpu_configure_ring(struct iio_dev *indio_dev)
+{
+ int ret;
+ struct inv_mpu_state *st = iio_priv(indio_dev);
+ struct iio_buffer *ring;
+
+#ifdef TIMER_BASED_BATCHING
+ /* configure hrtimer */
+ hrtimer_init(&st->hr_batch_timer, CLOCK_BOOTTIME, HRTIMER_MODE_REL);
+ st->hr_batch_timer.function = inv_batch_timer_handler;
+ INIT_WORK(&st->batch_work, inv_batch_work);
+#endif
+#ifdef KERNEL_VERSION_4_X
+ ring = devm_iio_kfifo_allocate(st->dev);
+ if (!ring)
+ return -ENOMEM;
+ ring->scan_timestamp = true;
+ iio_device_attach_buffer(indio_dev, ring);
+ ret = devm_request_threaded_irq(st->dev,
+ st->irq,
+ inv_irq_handler,
+ inv_read_fifo,
+ IRQF_TRIGGER_RISING | IRQF_SHARED,
+ "inv_irq",
+ st);
+ if (ret) {
+ devm_iio_kfifo_free(st->dev, ring);
+ return ret;
+ }
+
+ // this mode does not use ops
+ indio_dev->modes = INDIO_ALL_BUFFER_MODES;
+
+ return ret;
+#else
+ ring = iio_kfifo_allocate(indio_dev);
+ if (!ring)
+ return -ENOMEM;
+ indio_dev->buffer = ring;
+ /* setup ring buffer */
+ ring->scan_timestamp = true;
+ indio_dev->setup_ops = &inv_mpu_ring_setup_ops;
+ ret = request_threaded_irq(st->irq,
+ inv_irq_handler,
+ inv_read_fifo,
+ IRQF_TRIGGER_RISING | IRQF_SHARED,
+ "inv_irq",
+ st);
+ if (ret)
+ goto error_iio_sw_rb_free;
+
+ indio_dev->modes |= INDIO_BUFFER_HARDWARE;
+
+ return 0;
+error_iio_sw_rb_free:
+ iio_kfifo_free(indio_dev->buffer);
+
+ return ret;
+#endif
+}
+EXPORT_SYMBOL_GPL(inv_mpu_configure_ring);
diff --git a/drivers/iio/imu/inv_mpu/inv_mpu_spi.c b/drivers/iio/imu/inv_mpu/inv_mpu_spi.c
new file mode 100644
index 000000000000..fb916788a9df
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu/inv_mpu_spi.c
@@ -0,0 +1,410 @@
+/*
+* Copyright (C) 2012-2018 InvenSense, Inc.
+*
+* This software is licensed under the terms of the GNU General Public
+* License version 2, as published by the Free Software Foundation, and
+* may be copied, distributed, and modified under those terms.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+#define pr_fmt(fmt) "inv_mpu: " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/sysfs.h>
+#include <linux/jiffies.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/kfifo.h>
+#include <linux/poll.h>
+#include <linux/miscdevice.h>
+#include <linux/spinlock.h>
+
+#include "inv_mpu_iio.h"
+#include "inv_mpu_dts.h"
+
+#define INV_SPI_READ 0x80
+
+static int inv_spi_single_write(struct inv_mpu_state *st, u8 reg, u8 data)
+{
+ struct spi_message msg;
+ int res;
+ u8 d[2];
+ struct spi_transfer xfers = {
+ .tx_buf = d,
+ .bits_per_word = 8,
+ .len = 2,
+ };
+
+ pr_debug("reg_write: reg=0x%x data=0x%x\n", reg, data);
+ d[0] = reg;
+ d[1] = data;
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfers, &msg);
+ res = spi_sync(to_spi_device(st->dev), &msg);
+
+ return res;
+}
+
+static int inv_spi_read(struct inv_mpu_state *st, u8 reg, int len, u8 *data)
+{
+ struct spi_message msg;
+ int res;
+ u8 d[1];
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = d,
+ .bits_per_word = 8,
+ .len = 1,
+ },
+ {
+ .rx_buf = data,
+ .bits_per_word = 8,
+ .len = len,
+ }
+ };
+
+ if (!data)
+ return -EINVAL;
+
+ d[0] = (reg | INV_SPI_READ);
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfers[0], &msg);
+ spi_message_add_tail(&xfers[1], &msg);
+ res = spi_sync(to_spi_device(st->dev), &msg);
+
+ if (len ==1)
+ pr_debug("reg_read: reg=0x%x length=%d data=0x%x\n",
+ reg, len, data[0]);
+ else
+ pr_debug("reg_read: reg=0x%x length=%d d0=0x%x d1=0x%x\n",
+ reg, len, data[0], data[1]);
+
+ return res;
+
+}
+
+static int inv_spi_mem_write(struct inv_mpu_state *st, u8 mpu_addr, u16 mem_addr,
+ u32 len, u8 const *data)
+{
+ struct spi_message msg;
+ u8 buf[258];
+ int res;
+
+ struct spi_transfer xfers = {
+ .tx_buf = buf,
+ .bits_per_word = 8,
+ .len = len + 1,
+ };
+
+ if (!data || !st)
+ return -EINVAL;
+
+ if (len > (sizeof(buf) - 1))
+ return -ENOMEM;
+
+ inv_plat_single_write(st, REG_MEM_BANK_SEL, mem_addr >> 8);
+ inv_plat_single_write(st, REG_MEM_START_ADDR, mem_addr & 0xFF);
+
+ buf[0] = REG_MEM_R_W;
+ memcpy(buf + 1, data, len);
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfers, &msg);
+ res = spi_sync(to_spi_device(st->dev), &msg);
+
+ return res;
+}
+
+static int inv_spi_mem_read(struct inv_mpu_state *st, u8 mpu_addr, u16 mem_addr,
+ u32 len, u8 *data)
+{
+ int res;
+
+ if (!data || !st)
+ return -EINVAL;
+
+ if (len > 256)
+ return -EINVAL;
+
+ res = inv_plat_single_write(st, REG_MEM_BANK_SEL, mem_addr >> 8);
+ res = inv_plat_single_write(st, REG_MEM_START_ADDR, mem_addr & 0xFF);
+ res = inv_plat_read(st, REG_MEM_R_W, len, data);
+
+ return res;
+}
+
+/*
+ * inv_mpu_probe() - probe function.
+ */
+static int inv_mpu_probe(struct spi_device *spi)
+{
+ const struct spi_device_id *id = spi_get_device_id(spi);
+ struct inv_mpu_state *st;
+ struct iio_dev *indio_dev;
+ int result;
+
+#ifdef KERNEL_VERSION_4_X
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (indio_dev == NULL) {
+ pr_err("memory allocation failed\n");
+ result = -ENOMEM;
+ goto out_no_free;
+ }
+#else
+ indio_dev = iio_device_alloc(sizeof(*st));
+ if (indio_dev == NULL) {
+ pr_err("memory allocation failed\n");
+ result = -ENOMEM;
+ goto out_no_free;
+ }
+#endif
+ st = iio_priv(indio_dev);
+ st->write = inv_spi_single_write;
+ st->read = inv_spi_read;
+ st->mem_write = inv_spi_mem_write;
+ st->mem_read = inv_spi_mem_read;
+ st->dev = &spi->dev;
+ st->irq = spi->irq;
+#if !defined(CONFIG_INV_MPU_IIO_ICM20602) \
+ && !defined(CONFIG_INV_MPU_IIO_IAM20680)
+ st->i2c_dis = BIT_I2C_IF_DIS;
+#endif
+ st->bus_type = BUS_SPI;
+ spi_set_drvdata(spi, indio_dev);
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = id->name;
+
+#ifdef CONFIG_OF
+ result = invensense_mpu_parse_dt(st->dev, &st->plat_data);
+ if (result)
+# ifdef KERNEL_VERSION_4_X
+ return -ENODEV;
+# else
+ goto out_free;
+# endif
+ /* Power on device */
+ if (st->plat_data.power_on) {
+ result = st->plat_data.power_on(&st->plat_data);
+ if (result < 0) {
+ dev_err(st->dev, "power_on failed: %d\n", result);
+# ifdef KERNEL_VERSION_4_X
+ return -ENODEV;
+# else
+ goto out_free;
+# endif
+ }
+ pr_info("%s: power on here.\n", __func__);
+ }
+ pr_info("%s: power on.\n", __func__);
+
+ msleep(100);
+#else
+ if (dev_get_platdata(st->dev) == NULL)
+# ifdef KERNEL_VERSION_4_X
+ return -ENODEV;
+# else
+ goto out_free;
+# endif
+ st->plat_data = *(struct mpu_platform_data *)dev_get_platdata(st->dev);
+#endif
+
+ /* power is turned on inside check chip type */
+ result = inv_check_chip_type(indio_dev, id->name);
+ if (result)
+#ifdef KERNEL_VERSION_4_X
+ return -ENODEV;
+#else
+ goto out_free;
+#endif
+
+ result = inv_mpu_configure_ring(indio_dev);
+ if (result) {
+ pr_err("configure ring buffer fail\n");
+ goto out_free;
+ }
+#ifdef KERNEL_VERSION_4_X
+ result = devm_iio_device_register(st->dev, indio_dev);
+ if (result) {
+ pr_err("IIO device register fail\n");
+ goto out_unreg_ring;
+ }
+#else
+ result = iio_buffer_register(indio_dev, indio_dev->channels,
+ indio_dev->num_channels);
+ if (result) {
+ pr_err("ring buffer register fail\n");
+ goto out_unreg_ring;
+ }
+
+ result = iio_device_register(indio_dev);
+ if (result) {
+ pr_err("IIO device register fail\n");
+ goto out_remove_ring;
+ }
+#endif
+
+ result = inv_create_dmp_sysfs(indio_dev);
+ if (result) {
+ pr_err("create dmp sysfs failed\n");
+ goto out_unreg_iio;
+ }
+ init_waitqueue_head(&st->wait_queue);
+ st->resume_state = true;
+#ifdef CONFIG_HAS_WAKELOCK
+ wake_lock_init(&st->wake_lock, WAKE_LOCK_SUSPEND, "inv_mpu");
+#else
+ wakeup_source_init(&st->wake_lock, "inv_mpu");
+#endif
+ dev_info(st->dev, "%s ma-kernel-%s is ready to go!\n",
+ indio_dev->name, INVENSENSE_DRIVER_VERSION);
+
+#ifdef SENSOR_DATA_FROM_REGISTERS
+ pr_info("Data read from registers\n");
+#else
+ pr_info("Data read from FIFO\n");
+#endif
+#ifdef TIMER_BASED_BATCHING
+ pr_info("Timer based batching\n");
+#endif
+
+ return 0;
+#ifdef KERNEL_VERSION_4_X
+out_unreg_iio:
+ devm_iio_device_unregister(st->dev, indio_dev);
+out_unreg_ring:
+ inv_mpu_unconfigure_ring(indio_dev);
+out_free:
+ devm_iio_device_free(st->dev, indio_dev);
+out_no_free:
+#else
+out_unreg_iio:
+ iio_device_unregister(indio_dev);
+out_remove_ring:
+ iio_buffer_unregister(indio_dev);
+out_unreg_ring:
+ inv_mpu_unconfigure_ring(indio_dev);
+out_free:
+ iio_device_free(indio_dev);
+out_no_free:
+#endif
+ dev_err(st->dev, "%s failed %d\n", __func__, result);
+
+ return -EIO;
+}
+
+static void inv_mpu_shutdown(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct inv_mpu_state *st = iio_priv(indio_dev);
+ int result;
+
+ mutex_lock(&indio_dev->mlock);
+ inv_switch_power_in_lp(st, true);
+ dev_dbg(st->dev, "Shutting down %s...\n", st->hw->name);
+
+ /* reset to make sure previous state are not there */
+ result = inv_plat_single_write(st, REG_PWR_MGMT_1, BIT_H_RESET);
+ if (result)
+ dev_err(st->dev, "Failed to reset %s\n",
+ st->hw->name);
+ msleep(POWER_UP_TIME);
+ /* turn off power to ensure gyro engine is off */
+ result = inv_set_power(st, false);
+ if (result)
+ dev_err(st->dev, "Failed to turn off %s\n",
+ st->hw->name);
+ inv_switch_power_in_lp(st, false);
+ mutex_unlock(&indio_dev->mlock);
+}
+
+/*
+ * inv_mpu_remove() - remove function.
+ */
+static int inv_mpu_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct inv_mpu_state *st = iio_priv(indio_dev);
+
+#ifdef KERNEL_VERSION_4_X
+ devm_iio_device_unregister(st->dev, indio_dev);
+#else
+ iio_device_unregister(indio_dev);
+ iio_buffer_unregister(indio_dev);
+#endif
+ inv_mpu_unconfigure_ring(indio_dev);
+#ifdef KERNEL_VERSION_4_X
+ devm_iio_device_free(st->dev, indio_dev);
+#else
+ iio_device_free(indio_dev);
+#endif
+ dev_info(st->dev, "inv-mpu-iio module removed.\n");
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int inv_mpu_spi_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(to_spi_device(dev));
+
+ return inv_mpu_suspend(indio_dev);
+}
+
+static void inv_mpu_spi_complete(struct device *dev)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(to_spi_device(dev));
+
+ inv_mpu_complete(indio_dev);
+}
+#endif
+
+static const struct dev_pm_ops inv_mpu_spi_pmops = {
+#ifdef CONFIG_PM_SLEEP
+ .suspend = inv_mpu_spi_suspend,
+ .complete = inv_mpu_spi_complete,
+#endif
+};
+
+/* device id table is used to identify what device can be
+ * supported by this driver
+ */
+static const struct spi_device_id inv_mpu_id[] = {
+#ifdef CONFIG_INV_MPU_IIO_ICM20648
+ {"icm20645", ICM20645},
+ {"icm10340", ICM10340},
+ {"icm20648", ICM20648},
+#else
+ {"icm20608d", ICM20608D},
+ {"icm20690", ICM20690},
+ {"icm20602", ICM20602},
+ {"iam20680", IAM20680},
+#endif
+ {}
+};
+
+MODULE_DEVICE_TABLE(spi, inv_mpu_id);
+
+static struct spi_driver inv_mpu_driver = {
+ .probe = inv_mpu_probe,
+ .remove = inv_mpu_remove,
+ .shutdown = inv_mpu_shutdown,
+ .id_table = inv_mpu_id,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "inv-mpu-iio-spi",
+ .pm = &inv_mpu_spi_pmops,
+ },
+};
+module_spi_driver(inv_mpu_driver);
+
+MODULE_AUTHOR("Invensense Corporation");
+MODULE_DESCRIPTION("Invensense SPI device driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/imu/inv_mpu/inv_mpu_timestamp.c b/drivers/iio/imu/inv_mpu/inv_mpu_timestamp.c
new file mode 100644
index 000000000000..2cc721b18596
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu/inv_mpu_timestamp.c
@@ -0,0 +1,280 @@
+/*
+ * Copyright (C) 2012-2018 InvenSense, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#define pr_fmt(fmt) "inv_mpu: " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/sysfs.h>
+#include <linux/jiffies.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/kfifo.h>
+#include <linux/poll.h>
+#include <linux/miscdevice.h>
+#include <linux/math64.h>
+
+#include "inv_mpu_iio.h"
+
+#define INV_TIME_CALIB_THRESHOLD_1 2
+
+#define MIN_DELAY (3 * NSEC_PER_MSEC)
+#define JITTER_THRESH ( 1 * NSEC_PER_MSEC)
+
+int inv_update_dmp_ts(struct inv_mpu_state *st, int ind)
+{
+ int i;
+ u32 counter;
+ u64 ts;
+ enum INV_ENGINE en_ind;
+ struct inv_timestamp_algo *ts_algo = &st->ts_algo;
+ u32 base_time;
+ u64 cal_period;
+
+ if (st->mode_1k_on)
+ cal_period = (NSEC_PER_SEC >> 2);
+ else
+ cal_period = 2 * NSEC_PER_SEC;
+
+ ts = ts_algo->last_run_time - st->sensor[ind].time_calib;
+ counter = st->sensor[ind].sample_calib;
+ en_ind = st->sensor[ind].engine_base;
+ if (en_ind != ts_algo->clock_base)
+ return 0;
+ /* we average over 2 seconds period to do the timestamp calculation */
+ if (ts < cal_period)
+ return 0;
+ /* this is the first time we do timestamp averaging, return */
+ /* after resume from suspend, the clock of linux has up to 1 seconds
+ drift. We should start from the resume clock instead of using clock
+ before resume */
+ if ((!st->sensor[ind].calib_flag) || ts_algo->resume_flag) {
+ st->sensor[ind].sample_calib = 0;
+ st->sensor[ind].time_calib = ts_algo->last_run_time;
+ st->sensor[ind].calib_flag = 1;
+ ts_algo->resume_flag = false;
+
+ return 0;
+ }
+ /* if the sample number in current FIFO is not zero and between now and
+ last update time is more than 2 seconds, we do calculation */
+ if ((counter > 0) &&
+ (ts_algo->last_run_time - st->eng_info[en_ind].last_update_time >
+ cal_period)) {
+ /* duration for each sensor */
+ st->sensor[ind].dur = (u32) div_u64(ts, counter);
+ /* engine duration derived from each sensor */
+ if (st->sensor[ind].div)
+ st->eng_info[en_ind].dur = st->sensor[ind].dur /
+ st->sensor[ind].div;
+ else
+ pr_err("sensor %d divider zero!\n", ind);
+ /* update base time for each sensor */
+ if (st->eng_info[en_ind].divider) {
+ base_time = (st->eng_info[en_ind].dur /
+ st->eng_info[en_ind].divider) *
+ st->eng_info[en_ind].orig_rate;
+ if (st->mode_1k_on)
+ st->eng_info[en_ind].base_time_1k = base_time;
+ else
+ st->eng_info[en_ind].base_time = base_time;
+ } else {
+ pr_err("engine %d divider zero!\n", en_ind);
+ }
+
+ st->eng_info[en_ind].last_update_time = ts_algo->last_run_time;
+ /* update all the sensors duration based on the same engine */
+ for (i = 0; i < SENSOR_NUM_MAX; i++) {
+ if (st->sensor[i].on &&
+ (st->sensor[i].engine_base == en_ind))
+ st->sensor[i].dur = st->sensor[i].div *
+ st->eng_info[en_ind].dur;
+ }
+
+ }
+ st->sensor[ind].sample_calib = 0;
+ st->sensor[ind].time_calib = ts_algo->last_run_time;
+
+ return 0;
+}
+/**
+ * int inv_get_last_run_time_non_dmp_record_mode(struct inv_mpu_state *st)
+ * This is the function to get last run time in non dmp and record mode.
+ * This function will update the last_run_time, which is important parameter
+ * in overall timestamp algorithm.
+ * return value: this function returns fifo count value.
+*/
+int inv_get_last_run_time_non_dmp_record_mode(struct inv_mpu_state *st)
+{
+ long long t_pre, t_post, dur;
+ int fifo_count;
+#ifndef SENSOR_DATA_FROM_REGISTERS
+ int res;
+ u8 data[2];
+#endif
+
+ t_pre = get_time_ns();
+#ifndef SENSOR_DATA_FROM_REGISTERS
+ res = inv_plat_read(st, REG_FIFO_COUNT_H, FIFO_COUNT_BYTE, data);
+ if (res) {
+ pr_info("read REG_FIFO_COUNT_H failed= %d\n", res);
+ return 0;
+ }
+#endif
+ t_post = get_time_ns();
+
+#ifdef SENSOR_DATA_FROM_REGISTERS
+ if (st->fifo_count_mode == BYTE_MODE)
+ fifo_count = st->batch.pk_size;
+ else
+ fifo_count = 1;
+#else
+ fifo_count = be16_to_cpup((__be16 *) (data));
+#endif
+ pr_debug("fifc=%d\n", fifo_count);
+ if (!fifo_count)
+ return 0;
+ if (st->special_mag_mode && (fifo_count == 2)) {
+ pr_debug("special trigger\n");
+ fifo_count = 1;
+ }
+
+ /* In non DMP mode, either gyro or accel duration is the duration
+ for each sample */
+ if (st->chip_config.gyro_enable)
+ dur = st->eng_info[ENGINE_GYRO].dur;
+ else
+ dur = st->eng_info[ENGINE_ACCEL].dur;
+
+ if (st->fifo_count_mode == BYTE_MODE) {
+ fifo_count /= st->batch.pk_size;
+ }
+
+ /* In record mode, each number in fifo_count is 1 record or 1 sample */
+ st->ts_algo.last_run_time += dur * fifo_count;
+ if (st->ts_algo.last_run_time < t_pre)
+ st->ts_algo.last_run_time = t_pre;
+ if (st->ts_algo.last_run_time > t_post)
+ st->ts_algo.last_run_time = t_post;
+
+ return fifo_count;
+}
+
+int inv_get_dmp_ts(struct inv_mpu_state *st, int i)
+{
+ u64 current_time;
+ int expected_lower_duration, expected_upper_duration;
+
+ current_time = get_time_ns();
+
+ st->sensor[i].ts += st->sensor[i].dur + st->sensor[i].ts_adj;
+
+ if (st->sensor[i].ts < st->sensor[i].previous_ts)
+ st->sensor[i].ts = st->sensor[i].previous_ts + st->sensor[i].dur;
+
+ //hifi sensor limits ts jitter to +/- 2%
+ expected_upper_duration = st->eng_info[st->sensor[i].engine_base].divider * 1020000;
+ expected_lower_duration = st->eng_info[st->sensor[i].engine_base].divider * 980000;
+#if defined(CONFIG_INV_MPU_IIO_ICM20602) || defined(CONFIG_INV_MPU_IIO_ICM20690) || defined(CONFIG_INV_MPU_IIO_IAM20680)
+ if (st->sensor[i].ts < st->sensor[i].previous_ts + expected_lower_duration)
+ st->sensor[i].ts = st->sensor[i].previous_ts + expected_lower_duration;
+ if (st->sensor[i].ts > st->sensor[i].previous_ts + expected_upper_duration)
+ st->sensor[i].ts = st->sensor[i].previous_ts + expected_upper_duration;
+#endif
+ if (st->sensor[i].ts > current_time )
+ st->sensor[i].ts = current_time;
+
+ st->sensor[i].previous_ts = st->sensor[i].ts;
+
+ pr_debug("ts=%lld, reset=%lld\n", st->sensor[i].ts, st->ts_algo.reset_ts);
+ if (st->sensor[i].ts < st->ts_algo.reset_ts) {
+ pr_debug("less than reset\n");
+ st->sensor[i].send = false;
+ } else {
+ st->sensor[i].send = true;
+ }
+
+ if (st->header_count == 1)
+ inv_update_dmp_ts(st, i);
+
+ return 0;
+}
+
+static void process_sensor_bounding(struct inv_mpu_state *st, int i)
+{
+ s64 elaps_time, thresh1, thresh2;
+ struct inv_timestamp_algo *ts_algo = &st->ts_algo;
+ u32 dur;
+
+ elaps_time = ((u64) (st->sensor[i].dur)) * st->sensor[i].count;
+ thresh1 = ts_algo->last_run_time - elaps_time;
+
+ dur = max(st->sensor[i].dur, (int)MIN_DELAY);
+ thresh2 = thresh1 - dur;
+ if (thresh1 < 0)
+ thresh1 = 0;
+ if (thresh2 < 0)
+ thresh2 = 0;
+ st->sensor[i].ts_adj = 0;
+ if ((ts_algo->calib_counter >= INV_TIME_CALIB_THRESHOLD_1) &&
+ (!ts_algo->resume_flag)) {
+ if (st->sensor[i].ts < thresh2)
+ st->sensor[i].ts_adj = thresh2 - st->sensor[i].ts;
+ } else if ((ts_algo->calib_counter >=
+ INV_TIME_CALIB_THRESHOLD_1) && ts_algo->resume_flag) {
+ if (st->sensor[i].ts < thresh2)
+ st->sensor[i].ts = ts_algo->last_run_time -
+ elaps_time - JITTER_THRESH;
+ } else {
+ st->sensor[i].ts = ts_algo->last_run_time - elaps_time -
+ JITTER_THRESH;
+ st->sensor[i].previous_ts = st->sensor[i].ts;
+ }
+
+ if (st->sensor[i].ts > thresh1)
+ st->sensor[i].ts_adj = thresh1 - st->sensor[i].ts;
+ pr_debug("cali=%d\n", st->ts_algo.calib_counter);
+ pr_debug("adj= %lld\n", st->sensor[i].ts_adj);
+ pr_debug("dur= %d count= %d last= %lld\n", st->sensor[i].dur,
+ st->sensor[i].count, ts_algo->last_run_time);
+ if (st->sensor[i].ts_adj && (st->sensor[i].count > 1))
+ st->sensor[i].ts_adj = div_s64(st->sensor[i].ts_adj,
+ st->sensor[i].count);
+}
+/* inv_bound_timestamp (struct inv_mpu_state *st)
+ The purpose this function is to give a generic bound to each
+ sensor timestamp. The timestamp cannot exceed current time.
+ The timestamp cannot backwards one sample time either, otherwise, there
+ would be another sample in between. Using this principle, we can bound
+ the sensor samples */
+int inv_bound_timestamp(struct inv_mpu_state *st)
+{
+ int i;
+ struct inv_timestamp_algo *ts_algo = &st->ts_algo;
+
+ for (i = 0; i < SENSOR_NUM_MAX; i++) {
+ if (st->sensor[i].on) {
+ if (st->sensor[i].count) {
+ process_sensor_bounding(st, i);
+ } else if (ts_algo->calib_counter <
+ INV_TIME_CALIB_THRESHOLD_1) {
+ st->sensor[i].ts = ts_algo->reset_ts;
+ st->sensor[i].previous_ts = st->sensor[i].ts;
+ }
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/iio/imu/inv_mpu/inv_test/Kconfig b/drivers/iio/imu/inv_mpu/inv_test/Kconfig
new file mode 100644
index 000000000000..a4dfd95db886
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu/inv_test/Kconfig
@@ -0,0 +1,13 @@
+#
+# Kconfig for Invensense IIO testing hooks
+#
+
+config INV_TESTING
+ boolean "Invensense IIO testing hooks"
+ depends on INV_MPU_IIO || INV_AMI306_IIO || INV_YAS530 || INV_HUB_IIO
+ default n
+ help
+ This flag enables display of additional testing information from the
+ Invensense IIO drivers
+ It also enables the I2C counters facility to perform IO profiling.
+ Some additional sysfs entries will appear when this flag is enabled.
diff --git a/drivers/iio/imu/inv_mpu/inv_test/Makefile b/drivers/iio/imu/inv_mpu/inv_test/Makefile
new file mode 100644
index 000000000000..4f0edd3de901
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu/inv_test/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for Invensense IIO testing hooks.
+#
+
+obj-$(CONFIG_INV_TESTING) += inv_counters.o
+
diff --git a/drivers/iio/imu/inv_mpu/inv_test/inv_counters.c b/drivers/iio/imu/inv_mpu/inv_test/inv_counters.c
new file mode 100644
index 000000000000..f60337caeeed
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu/inv_test/inv_counters.c
@@ -0,0 +1,159 @@
+/*
+ * Copyright (C) 2012-2017 InvenSense, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/err.h>
+#include <linux/sysfs.h>
+#include <linux/kdev_t.h>
+#include <linux/string.h>
+#include <linux/jiffies.h>
+#include <linux/spinlock.h>
+#include <linux/kernel_stat.h>
+
+#include "inv_counters.h"
+
+static int mpu_irq;
+static int accel_irq;
+static int compass_irq;
+
+struct inv_counters {
+ uint32_t i2c_tempreads;
+ uint32_t i2c_mpureads;
+ uint32_t i2c_mpuwrites;
+ uint32_t i2c_accelreads;
+ uint32_t i2c_accelwrites;
+ uint32_t i2c_compassreads;
+ uint32_t i2c_compasswrites;
+ uint32_t i2c_compassirq;
+ uint32_t i2c_accelirq;
+};
+
+static struct inv_counters Counters;
+
+static ssize_t i2c_counters_show(struct class *cls,
+ struct class_attribute *attr, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE,
+ "%ld.%03ld %u %u %u %u %u %u %u %u %u %u\n",
+ jiffies / HZ, ((jiffies % HZ) * (1024 / HZ)),
+ mpu_irq ? kstat_irqs(mpu_irq) : 0,
+ Counters.i2c_tempreads,
+ Counters.i2c_mpureads, Counters.i2c_mpuwrites,
+ accel_irq ? kstat_irqs(accel_irq) : Counters.i2c_accelirq,
+ Counters.i2c_accelreads, Counters.i2c_accelwrites,
+ compass_irq ? kstat_irqs(compass_irq) : Counters.i2c_compassirq,
+ Counters.i2c_compassreads, Counters.i2c_compasswrites);
+}
+
+void inv_iio_counters_set_i2cirq(enum irqtype type, int irq)
+{
+ switch (type) {
+ case IRQ_MPU:
+ mpu_irq = irq;
+ break;
+ case IRQ_ACCEL:
+ accel_irq = irq;
+ break;
+ case IRQ_COMPASS:
+ compass_irq = irq;
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(inv_iio_counters_set_i2cirq);
+
+void inv_iio_counters_tempread(int count)
+{
+ Counters.i2c_tempreads += count;
+}
+EXPORT_SYMBOL_GPL(inv_iio_counters_tempread);
+
+void inv_iio_counters_mpuread(int count)
+{
+ Counters.i2c_mpureads += count;
+}
+EXPORT_SYMBOL_GPL(inv_iio_counters_mpuread);
+
+void inv_iio_counters_mpuwrite(int count)
+{
+ Counters.i2c_mpuwrites += count;
+}
+EXPORT_SYMBOL_GPL(inv_iio_counters_mpuwrite);
+
+void inv_iio_counters_accelread(int count)
+{
+ Counters.i2c_accelreads += count;
+}
+EXPORT_SYMBOL_GPL(inv_iio_counters_accelread);
+
+void inv_iio_counters_accelwrite(int count)
+{
+ Counters.i2c_accelwrites += count;
+}
+EXPORT_SYMBOL_GPL(inv_iio_counters_accelwrite);
+
+void inv_iio_counters_compassread(int count)
+{
+ Counters.i2c_compassreads += count;
+}
+EXPORT_SYMBOL_GPL(inv_iio_counters_compassread);
+
+void inv_iio_counters_compasswrite(int count)
+{
+ Counters.i2c_compasswrites += count;
+}
+EXPORT_SYMBOL_GPL(inv_iio_counters_compasswrite);
+
+void inv_iio_counters_compassirq(void)
+{
+ Counters.i2c_compassirq++;
+}
+EXPORT_SYMBOL_GPL(inv_iio_counters_compassirq);
+
+void inv_iio_counters_accelirq(void)
+{
+ Counters.i2c_accelirq++;
+}
+EXPORT_SYMBOL_GPL(inv_iio_counters_accelirq);
+
+static struct class_attribute inv_class_attr[] = {
+ __ATTR(i2c_counter, S_IRUGO, i2c_counters_show, NULL),
+ __ATTR_NULL
+};
+
+static struct class inv_counters_class = {
+ .name = "inv_counters",
+ .owner = THIS_MODULE,
+ .class_attrs = (struct class_attribute *) &inv_class_attr
+};
+
+static int __init inv_counters_init(void)
+{
+ memset(&Counters, 0, sizeof(Counters));
+
+ return class_register(&inv_counters_class);
+}
+
+static void __exit inv_counters_exit(void)
+{
+ class_unregister(&inv_counters_class);
+}
+
+module_init(inv_counters_init);
+module_exit(inv_counters_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("GESL");
+MODULE_DESCRIPTION("inv_counters debug support");
+
diff --git a/drivers/iio/imu/inv_mpu/inv_test/inv_counters.h b/drivers/iio/imu/inv_mpu/inv_test/inv_counters.h
new file mode 100644
index 000000000000..62f76279e703
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu/inv_test/inv_counters.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2012-2017 InvenSense, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _INV_COUNTERS_H_
+#define _INV_COUNTERS_H_
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/sysfs.h>
+#include <linux/string.h>
+#include <linux/jiffies.h>
+#include <linux/spinlock.h>
+
+#ifdef CONFIG_INV_TESTING
+
+enum irqtype {
+ IRQ_MPU,
+ IRQ_ACCEL,
+ IRQ_COMPASS
+};
+
+#define INV_I2C_INC_MPUREAD(x) inv_iio_counters_mpuread(x)
+#define INV_I2C_INC_MPUWRITE(x) inv_iio_counters_mpuwrite(x)
+#define INV_I2C_INC_ACCELREAD(x) inv_iio_counters_accelread(x)
+#define INV_I2C_INC_ACCELWRITE(x) inv_iio_counters_accelwrite(x)
+#define INV_I2C_INC_COMPASSREAD(x) inv_iio_counters_compassread(x)
+#define INV_I2C_INC_COMPASSWRITE(x) inv_iio_counters_compasswrite(x)
+
+#define INV_I2C_INC_TEMPREAD(x) inv_iio_counters_tempread(x)
+
+#define INV_I2C_SETIRQ(type, irq) inv_iio_counters_set_i2cirq(type, irq)
+#define INV_I2C_INC_COMPASSIRQ() inv_iio_counters_compassirq()
+#define INV_I2C_INC_ACCELIRQ() inv_iio_counters_accelirq()
+
+void inv_iio_counters_mpuread(int count);
+void inv_iio_counters_mpuwrite(int count);
+void inv_iio_counters_accelread(int count);
+void inv_iio_counters_accelwrite(int count);
+void inv_iio_counters_compassread(int count);
+void inv_iio_counters_compasswrite(int count);
+
+void inv_iio_counters_tempread(int count);
+
+void inv_iio_counters_set_i2cirq(enum irqtype type, int irq);
+void inv_iio_counters_compassirq(void);
+void inv_iio_counters_accelirq(void);
+
+#else
+
+#define INV_I2C_INC_MPUREAD(x)
+#define INV_I2C_INC_MPUWRITE(x)
+#define INV_I2C_INC_ACCELREAD(x)
+#define INV_I2C_INC_ACCELWRITE(x)
+#define INV_I2C_INC_COMPASSREAD(x)
+#define INV_I2C_INC_COMPASSWRITE(x)
+
+#define INV_I2C_INC_TEMPREAD(x)
+
+#define INV_I2C_SETIRQ(type, irq)
+#define INV_I2C_INC_COMPASSIRQ()
+#define INV_I2C_INC_ACCELIRQ()
+
+#endif /* CONFIG_INV_TESTING */
+
+#endif /* _INV_COUNTERS_H_ */
+
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index 32bb036069eb..961afb5588be 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -174,7 +174,7 @@ unsigned int iio_buffer_poll(struct file *filp,
struct iio_dev *indio_dev = filp->private_data;
struct iio_buffer *rb = indio_dev->buffer;
- if (!indio_dev->info)
+ if (!indio_dev->info || rb == NULL)
return 0;
poll_wait(filp, &rb->pollq, wait);
diff --git a/drivers/iio/magnetometer/st_magn_spi.c b/drivers/iio/magnetometer/st_magn_spi.c
index 6325e7dc8e03..f3cb4dc05391 100644
--- a/drivers/iio/magnetometer/st_magn_spi.c
+++ b/drivers/iio/magnetometer/st_magn_spi.c
@@ -48,8 +48,6 @@ static int st_magn_spi_remove(struct spi_device *spi)
}
static const struct spi_device_id st_magn_id_table[] = {
- { LSM303DLHC_MAGN_DEV_NAME },
- { LSM303DLM_MAGN_DEV_NAME },
{ LIS3MDL_MAGN_DEV_NAME },
{ LSM303AGR_MAGN_DEV_NAME },
{},
diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
index 5056bd68573f..ba282ff3892d 100644
--- a/drivers/iio/pressure/st_pressure_core.c
+++ b/drivers/iio/pressure/st_pressure_core.c
@@ -436,6 +436,8 @@ static const struct iio_trigger_ops st_press_trigger_ops = {
int st_press_common_probe(struct iio_dev *indio_dev)
{
struct st_sensor_data *press_data = iio_priv(indio_dev);
+ struct st_sensors_platform_data *pdata =
+ (struct st_sensors_platform_data *)press_data->dev->platform_data;
int irq = press_data->get_irq_data_ready(indio_dev);
int err;
@@ -464,12 +466,10 @@ int st_press_common_probe(struct iio_dev *indio_dev)
press_data->odr = press_data->sensor_settings->odr.odr_avl[0].hz;
/* Some devices don't support a data ready pin. */
- if (!press_data->dev->platform_data &&
- press_data->sensor_settings->drdy_irq.addr)
- press_data->dev->platform_data =
- (struct st_sensors_platform_data *)&default_press_pdata;
+ if (!pdata && press_data->sensor_settings->drdy_irq.addr)
+ pdata = (struct st_sensors_platform_data *)&default_press_pdata;
- err = st_sensors_init_sensor(indio_dev, press_data->dev->platform_data);
+ err = st_sensors_init_sensor(indio_dev, pdata);
if (err < 0)
return err;
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 6a8024d9d742..864a7c8d82d3 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -86,6 +86,22 @@ int rdma_addr_size(struct sockaddr *addr)
}
EXPORT_SYMBOL(rdma_addr_size);
+int rdma_addr_size_in6(struct sockaddr_in6 *addr)
+{
+ int ret = rdma_addr_size((struct sockaddr *) addr);
+
+ return ret <= sizeof(*addr) ? ret : 0;
+}
+EXPORT_SYMBOL(rdma_addr_size_in6);
+
+int rdma_addr_size_kss(struct __kernel_sockaddr_storage *addr)
+{
+ int ret = rdma_addr_size((struct sockaddr *) addr);
+
+ return ret <= sizeof(*addr) ? ret : 0;
+}
+EXPORT_SYMBOL(rdma_addr_size_kss);
+
static struct rdma_addr_client self;
void rdma_addr_register_client(struct rdma_addr_client *client)
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index e354358db77b..d57a78ec7425 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -626,6 +626,7 @@ struct rdma_cm_id *rdma_create_id(struct net *net,
INIT_LIST_HEAD(&id_priv->mc_list);
get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num);
id_priv->id.route.addr.dev_addr.net = get_net(net);
+ id_priv->seq_num &= 0x00ffffff;
return &id_priv->id;
}
@@ -3742,6 +3743,9 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
struct cma_multicast *mc;
int ret;
+ if (!id->device)
+ return -EINVAL;
+
id_priv = container_of(id, struct rdma_id_private, id);
if (!cma_comp(id_priv, RDMA_CM_ADDR_BOUND) &&
!cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED))
@@ -4006,7 +4010,7 @@ static int cma_get_id_stats(struct sk_buff *skb, struct netlink_callback *cb)
RDMA_NL_RDMA_CM_ATTR_SRC_ADDR))
goto out;
if (ibnl_put_attr(skb, nlh,
- rdma_addr_size(cma_src_addr(id_priv)),
+ rdma_addr_size(cma_dst_addr(id_priv)),
cma_dst_addr(id_priv),
RDMA_NL_RDMA_CM_ATTR_DST_ADDR))
goto out;
diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c
index fb43a242847b..8d7d110d0721 100644
--- a/drivers/infiniband/core/iwpm_util.c
+++ b/drivers/infiniband/core/iwpm_util.c
@@ -663,6 +663,7 @@ int iwpm_send_mapinfo(u8 nl_client, int iwpm_pid)
}
skb_num++;
spin_lock_irqsave(&iwpm_mapinfo_lock, flags);
+ ret = -EINVAL;
for (i = 0; i < IWPM_MAPINFO_HASH_SIZE; i++) {
hlist_for_each_entry(map_info, &iwpm_hash_bucket[i],
hlist_node) {
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 886f61ea6cc7..2b9c00faca7d 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -131,7 +131,7 @@ static inline struct ucma_context *_ucma_find_context(int id,
ctx = idr_find(&ctx_idr, id);
if (!ctx)
ctx = ERR_PTR(-ENOENT);
- else if (ctx->file != file)
+ else if (ctx->file != file || !ctx->cm_id)
ctx = ERR_PTR(-EINVAL);
return ctx;
}
@@ -453,6 +453,7 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
struct rdma_ucm_create_id cmd;
struct rdma_ucm_create_id_resp resp;
struct ucma_context *ctx;
+ struct rdma_cm_id *cm_id;
enum ib_qp_type qp_type;
int ret;
@@ -473,10 +474,10 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
return -ENOMEM;
ctx->uid = cmd.uid;
- ctx->cm_id = rdma_create_id(current->nsproxy->net_ns,
- ucma_event_handler, ctx, cmd.ps, qp_type);
- if (IS_ERR(ctx->cm_id)) {
- ret = PTR_ERR(ctx->cm_id);
+ cm_id = rdma_create_id(current->nsproxy->net_ns,
+ ucma_event_handler, ctx, cmd.ps, qp_type);
+ if (IS_ERR(cm_id)) {
+ ret = PTR_ERR(cm_id);
goto err1;
}
@@ -486,14 +487,19 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
ret = -EFAULT;
goto err2;
}
+
+ ctx->cm_id = cm_id;
return 0;
err2:
- rdma_destroy_id(ctx->cm_id);
+ rdma_destroy_id(cm_id);
err1:
mutex_lock(&mut);
idr_remove(&ctx_idr, ctx->id);
mutex_unlock(&mut);
+ mutex_lock(&file->mut);
+ list_del(&ctx->list);
+ mutex_unlock(&file->mut);
kfree(ctx);
return ret;
}
@@ -623,6 +629,9 @@ static ssize_t ucma_bind_ip(struct ucma_file *file, const char __user *inbuf,
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;
+ if (!rdma_addr_size_in6(&cmd.addr))
+ return -EINVAL;
+
ctx = ucma_get_ctx(file, cmd.id);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
@@ -636,22 +645,21 @@ static ssize_t ucma_bind(struct ucma_file *file, const char __user *inbuf,
int in_len, int out_len)
{
struct rdma_ucm_bind cmd;
- struct sockaddr *addr;
struct ucma_context *ctx;
int ret;
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;
- addr = (struct sockaddr *) &cmd.addr;
- if (cmd.reserved || !cmd.addr_size || (cmd.addr_size != rdma_addr_size(addr)))
+ if (cmd.reserved || !cmd.addr_size ||
+ cmd.addr_size != rdma_addr_size_kss(&cmd.addr))
return -EINVAL;
ctx = ucma_get_ctx(file, cmd.id);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- ret = rdma_bind_addr(ctx->cm_id, addr);
+ ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
ucma_put_ctx(ctx);
return ret;
}
@@ -667,13 +675,16 @@ static ssize_t ucma_resolve_ip(struct ucma_file *file,
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;
+ if ((cmd.src_addr.sin6_family && !rdma_addr_size_in6(&cmd.src_addr)) ||
+ !rdma_addr_size_in6(&cmd.dst_addr))
+ return -EINVAL;
+
ctx = ucma_get_ctx(file, cmd.id);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
- (struct sockaddr *) &cmd.dst_addr,
- cmd.timeout_ms);
+ (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms);
ucma_put_ctx(ctx);
return ret;
}
@@ -683,24 +694,23 @@ static ssize_t ucma_resolve_addr(struct ucma_file *file,
int in_len, int out_len)
{
struct rdma_ucm_resolve_addr cmd;
- struct sockaddr *src, *dst;
struct ucma_context *ctx;
int ret;
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;
- src = (struct sockaddr *) &cmd.src_addr;
- dst = (struct sockaddr *) &cmd.dst_addr;
- if (cmd.reserved || (cmd.src_size && (cmd.src_size != rdma_addr_size(src))) ||
- !cmd.dst_size || (cmd.dst_size != rdma_addr_size(dst)))
+ if (cmd.reserved ||
+ (cmd.src_size && (cmd.src_size != rdma_addr_size_kss(&cmd.src_addr))) ||
+ !cmd.dst_size || (cmd.dst_size != rdma_addr_size_kss(&cmd.dst_addr)))
return -EINVAL;
ctx = ucma_get_ctx(file, cmd.id);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- ret = rdma_resolve_addr(ctx->cm_id, src, dst, cmd.timeout_ms);
+ ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
+ (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms);
ucma_put_ctx(ctx);
return ret;
}
@@ -1138,10 +1148,18 @@ static ssize_t ucma_init_qp_attr(struct ucma_file *file,
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;
+ if (cmd.qp_state > IB_QPS_ERR)
+ return -EINVAL;
+
ctx = ucma_get_ctx(file, cmd.id);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
+ if (!ctx->cm_id->device) {
+ ret = -EINVAL;
+ goto out;
+ }
+
resp.qp_attr_mask = 0;
memset(&qp_attr, 0, sizeof qp_attr);
qp_attr.qp_state = cmd.qp_state;
@@ -1212,6 +1230,9 @@ static int ucma_set_ib_path(struct ucma_context *ctx,
if (!optlen)
return -EINVAL;
+ if (!ctx->cm_id->device)
+ return -EINVAL;
+
memset(&sa_path, 0, sizeof(sa_path));
ib_sa_unpack_path(path_data->path_rec, &sa_path);
@@ -1274,6 +1295,9 @@ static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
if (IS_ERR(ctx))
return PTR_ERR(ctx);
+ if (unlikely(cmd.optval > KMALLOC_MAX_SIZE))
+ return -EINVAL;
+
optval = memdup_user((void __user *) (unsigned long) cmd.optval,
cmd.optlen);
if (IS_ERR(optval)) {
@@ -1295,7 +1319,7 @@ static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
{
struct rdma_ucm_notify cmd;
struct ucma_context *ctx;
- int ret;
+ int ret = -EINVAL;
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;
@@ -1304,7 +1328,9 @@ static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- ret = rdma_notify(ctx->cm_id, (enum ib_event_type) cmd.event);
+ if (ctx->cm_id->device)
+ ret = rdma_notify(ctx->cm_id, (enum ib_event_type)cmd.event);
+
ucma_put_ctx(ctx);
return ret;
}
@@ -1322,7 +1348,7 @@ static ssize_t ucma_process_join(struct ucma_file *file,
return -ENOSPC;
addr = (struct sockaddr *) &cmd->addr;
- if (cmd->reserved || !cmd->addr_size || (cmd->addr_size != rdma_addr_size(addr)))
+ if (cmd->reserved || (cmd->addr_size != rdma_addr_size(addr)))
return -EINVAL;
ctx = ucma_get_ctx(file, cmd->id);
@@ -1381,7 +1407,10 @@ static ssize_t ucma_join_ip_multicast(struct ucma_file *file,
join_cmd.response = cmd.response;
join_cmd.uid = cmd.uid;
join_cmd.id = cmd.id;
- join_cmd.addr_size = rdma_addr_size((struct sockaddr *) &cmd.addr);
+ join_cmd.addr_size = rdma_addr_size_in6(&cmd.addr);
+ if (!join_cmd.addr_size)
+ return -EINVAL;
+
join_cmd.reserved = 0;
memcpy(&join_cmd.addr, &cmd.addr, join_cmd.addr_size);
@@ -1397,6 +1426,9 @@ static ssize_t ucma_join_multicast(struct ucma_file *file,
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;
+ if (!rdma_addr_size_kss(&cmd.addr))
+ return -EINVAL;
+
return ucma_process_join(file, &cmd, out_len);
}
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 0ae337bec4f2..6790ebb366dd 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -354,7 +354,7 @@ int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
return -EINVAL;
}
- ret = sg_pcopy_to_buffer(umem->sg_head.sgl, umem->nmap, dst, length,
+ ret = sg_pcopy_to_buffer(umem->sg_head.sgl, umem->npages, dst, length,
offset + ib_umem_offset(umem));
if (ret < 0)
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index b7a73f1a8beb..3eb967521917 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -2436,9 +2436,13 @@ ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
static void *alloc_wr(size_t wr_size, __u32 num_sge)
{
+ if (num_sge >= (U32_MAX - ALIGN(wr_size, sizeof (struct ib_sge))) /
+ sizeof (struct ib_sge))
+ return NULL;
+
return kmalloc(ALIGN(wr_size, sizeof (struct ib_sge)) +
num_sge * sizeof (struct ib_sge), GFP_KERNEL);
-};
+}
ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
struct ib_device *ib_dev,
@@ -2665,6 +2669,13 @@ static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf,
goto err;
}
+ if (user_wr->num_sge >=
+ (U32_MAX - ALIGN(sizeof *next, sizeof (struct ib_sge))) /
+ sizeof (struct ib_sge)) {
+ ret = -EINVAL;
+ goto err;
+ }
+
next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
user_wr->num_sge * sizeof (struct ib_sge),
GFP_KERNEL);
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index 58fce1742b8d..337b1a5eb41c 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -809,10 +809,9 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
rdev->lldi.vr->qp.size,
rdev->lldi.vr->cq.start,
rdev->lldi.vr->cq.size);
- PDBG("udb len 0x%x udb base %p db_reg %p gts_reg %p "
+ PDBG("udb %pR db_reg %p gts_reg %p "
"qpmask 0x%x cqmask 0x%x\n",
- (unsigned)pci_resource_len(rdev->lldi.pdev, 2),
- (void *)pci_resource_start(rdev->lldi.pdev, 2),
+ &rdev->lldi.pdev->resource[2],
rdev->lldi.db_reg, rdev->lldi.gts_reg,
rdev->qpmask, rdev->cqmask);
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 8763fb832b01..67c4c73343d4 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -1041,7 +1041,7 @@ static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
/* need to protect from a race on closing the vma as part of
* mlx4_ib_vma_close().
*/
- down_read(&owning_mm->mmap_sem);
+ down_write(&owning_mm->mmap_sem);
for (i = 0; i < HW_BAR_COUNT; i++) {
vma = context->hw_bar_info[i].vma;
if (!vma)
@@ -1055,11 +1055,13 @@ static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
BUG_ON(1);
}
+ context->hw_bar_info[i].vma->vm_flags &=
+ ~(VM_SHARED | VM_MAYSHARE);
/* context going to be destroyed, should not access ops any more */
context->hw_bar_info[i].vma->vm_ops = NULL;
}
- up_read(&owning_mm->mmap_sem);
+ up_write(&owning_mm->mmap_sem);
mmput(owning_mm);
put_task_struct(owning_process);
}
@@ -2483,9 +2485,8 @@ err_steer_free_bitmap:
kfree(ibdev->ib_uc_qpns_bitmap);
err_steer_qp_release:
- if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
- mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
- ibdev->steer_qpn_count);
+ mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
+ ibdev->steer_qpn_count);
err_counter:
for (i = 0; i < ibdev->num_ports; ++i)
mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[i]);
@@ -2586,11 +2587,9 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
ibdev->iboe.nb.notifier_call = NULL;
}
- if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) {
- mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
- ibdev->steer_qpn_count);
- kfree(ibdev->ib_uc_qpns_bitmap);
- }
+ mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
+ ibdev->steer_qpn_count);
+ kfree(ibdev->ib_uc_qpns_bitmap);
iounmap(ibdev->uar_map);
for (p = 0; p < ibdev->num_ports; ++p)
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index 4d1e1c632603..ce87e9cc7eff 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -424,7 +424,6 @@ struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd,
goto err_free_mr;
mr->max_pages = max_num_sg;
-
err = mlx4_mr_enable(dev->dev, &mr->mmr);
if (err)
goto err_free_pl;
@@ -435,6 +434,7 @@ struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd,
return &mr->ibmr;
err_free_pl:
+ mr->ibmr.device = pd->device;
mlx4_free_priv_pages(mr);
err_free_mr:
(void) mlx4_mr_free(dev->dev, &mr->mmr);
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 02c8deab1fff..4a4ab433062f 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -972,7 +972,12 @@ static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
if (ucmd.reserved0 || ucmd.reserved1)
return -EINVAL;
- umem = ib_umem_get(context, ucmd.buf_addr, entries * ucmd.cqe_size,
+ /* check multiplication overflow */
+ if (ucmd.cqe_size && SIZE_MAX / ucmd.cqe_size <= entries - 1)
+ return -EINVAL;
+
+ umem = ib_umem_get(context, ucmd.buf_addr,
+ (size_t)ucmd.cqe_size * entries,
IB_ACCESS_LOCAL_WRITE, 1);
if (IS_ERR(umem)) {
err = PTR_ERR(umem);
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index cfcfbb6b84d7..c5390f6f94c5 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -231,7 +231,11 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
} else {
if (ucmd) {
qp->rq.wqe_cnt = ucmd->rq_wqe_count;
+ if (ucmd->rq_wqe_shift > BITS_PER_BYTE * sizeof(ucmd->rq_wqe_shift))
+ return -EINVAL;
qp->rq.wqe_shift = ucmd->rq_wqe_shift;
+ if ((1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) < qp->wq_sig)
+ return -EINVAL;
qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
qp->rq.max_post = qp->rq.wqe_cnt;
} else {
@@ -1348,18 +1352,18 @@ enum {
static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
{
- if (rate == IB_RATE_PORT_CURRENT) {
+ if (rate == IB_RATE_PORT_CURRENT)
return 0;
- } else if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS) {
+
+ if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS)
return -EINVAL;
- } else {
- while (rate != IB_RATE_2_5_GBPS &&
- !(1 << (rate + MLX5_STAT_RATE_OFFSET) &
- MLX5_CAP_GEN(dev->mdev, stat_rate_support)))
- --rate;
- }
- return rate + MLX5_STAT_RATE_OFFSET;
+ while (rate != IB_RATE_PORT_CURRENT &&
+ !(1 << (rate + MLX5_STAT_RATE_OFFSET) &
+ MLX5_CAP_GEN(dev->mdev, stat_rate_support)))
+ --rate;
+
+ return rate ? rate + MLX5_STAT_RATE_OFFSET : rate;
}
static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah,
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
index 86c303a620c1..748b63b86cbc 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
@@ -834,7 +834,7 @@ void ocrdma_add_port_stats(struct ocrdma_dev *dev)
dev->reset_stats.type = OCRDMA_RESET_STATS;
dev->reset_stats.dev = dev;
- if (!debugfs_create_file("reset_stats", S_IRUSR, dev->dir,
+ if (!debugfs_create_file("reset_stats", 0200, dev->dir,
&dev->reset_stats, &ocrdma_dbg_ops))
goto err;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index d3f0a384faad..f6b06729f4ea 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -945,6 +945,19 @@ static inline int update_parent_pkey(struct ipoib_dev_priv *priv)
*/
priv->dev->broadcast[8] = priv->pkey >> 8;
priv->dev->broadcast[9] = priv->pkey & 0xff;
+
+ /*
+ * Update the broadcast address in the priv->broadcast object,
+ * in case it already exists, otherwise no one will do that.
+ */
+ if (priv->broadcast) {
+ spin_lock_irq(&priv->lock);
+ memcpy(priv->broadcast->mcmember.mgid.raw,
+ priv->dev->broadcast + 4,
+ sizeof(union ib_gid));
+ spin_unlock_irq(&priv->lock);
+ }
+
return 0;
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index bad76eed06b3..37b42447045d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -724,6 +724,22 @@ static void path_rec_completion(int status,
spin_lock_irqsave(&priv->lock, flags);
if (!IS_ERR_OR_NULL(ah)) {
+ /*
+ * pathrec.dgid is used as the database key from the LLADDR,
+ * it must remain unchanged even if the SA returns a different
+ * GID to use in the AH.
+ */
+ if (memcmp(pathrec->dgid.raw, path->pathrec.dgid.raw,
+ sizeof(union ib_gid))) {
+ ipoib_dbg(
+ priv,
+ "%s got PathRec for gid %pI6 while asked for %pI6\n",
+ dev->name, pathrec->dgid.raw,
+ path->pathrec.dgid.raw);
+ memcpy(pathrec->dgid.raw, path->pathrec.dgid.raw,
+ sizeof(union ib_gid));
+ }
+
path->pathrec = *pathrec;
old_ah = path->ah;
@@ -844,8 +860,8 @@ static int path_rec_start(struct net_device *dev,
return 0;
}
-static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
- struct net_device *dev)
+static struct ipoib_neigh *neigh_add_path(struct sk_buff *skb, u8 *daddr,
+ struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_path *path;
@@ -858,7 +874,15 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
spin_unlock_irqrestore(&priv->lock, flags);
++dev->stats.tx_dropped;
dev_kfree_skb_any(skb);
- return;
+ return NULL;
+ }
+
+ /* To avoid race condition, make sure that the
+ * neigh will be added only once.
+ */
+ if (unlikely(!list_empty(&neigh->list))) {
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return neigh;
}
path = __path_find(dev, daddr + 4);
@@ -896,7 +920,7 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
spin_unlock_irqrestore(&priv->lock, flags);
ipoib_send(dev, skb, path->ah, IPOIB_QPN(daddr));
ipoib_neigh_put(neigh);
- return;
+ return NULL;
}
} else {
neigh->ah = NULL;
@@ -913,7 +937,7 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
spin_unlock_irqrestore(&priv->lock, flags);
ipoib_neigh_put(neigh);
- return;
+ return NULL;
err_path:
ipoib_neigh_free(neigh);
@@ -923,6 +947,8 @@ err_drop:
spin_unlock_irqrestore(&priv->lock, flags);
ipoib_neigh_put(neigh);
+
+ return NULL;
}
static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
@@ -1028,8 +1054,9 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
case htons(ETH_P_TIPC):
neigh = ipoib_neigh_get(dev, phdr->hwaddr);
if (unlikely(!neigh)) {
- neigh_add_path(skb, phdr->hwaddr, dev);
- return NETDEV_TX_OK;
+ neigh = neigh_add_path(skb, phdr->hwaddr, dev);
+ if (likely(!neigh))
+ return NETDEV_TX_OK;
}
break;
case htons(ETH_P_ARP):
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 5580ab0b5781..21e688d55da6 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -473,6 +473,9 @@ static int ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast)
!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
return -EINVAL;
+ init_completion(&mcast->done);
+ set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
+
ipoib_dbg_mcast(priv, "joining MGID %pI6\n", mcast->mcmember.mgid.raw);
rec.mgid = mcast->mcmember.mgid;
@@ -631,8 +634,6 @@ void ipoib_mcast_join_task(struct work_struct *work)
if (mcast->backoff == 1 ||
time_after_eq(jiffies, mcast->delay_until)) {
/* Found the next unjoined group */
- init_completion(&mcast->done);
- set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
if (ipoib_mcast_join(dev, mcast)) {
spin_unlock_irq(&priv->lock);
return;
@@ -652,11 +653,9 @@ out:
queue_delayed_work(priv->wq, &priv->mcast_task,
delay_until - jiffies);
}
- if (mcast) {
- init_completion(&mcast->done);
- set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
+ if (mcast)
ipoib_mcast_join(dev, mcast);
- }
+
spin_unlock_irq(&priv->lock);
}
@@ -775,7 +774,10 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
spin_lock_irqsave(&priv->lock, flags);
if (!neigh) {
neigh = ipoib_neigh_alloc(daddr, dev);
- if (neigh) {
+ /* Make sure that the neigh will be added only
+ * once to mcast list.
+ */
+ if (neigh && list_empty(&neigh->list)) {
kref_get(&mcast->ah->ref);
neigh->ah = mcast->ah;
list_add_tail(&neigh->list, &mcast->neigh_list);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 9a99cee2665a..4fd2892613dd 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -2581,9 +2581,11 @@ static int srp_abort(struct scsi_cmnd *scmnd)
ret = FAST_IO_FAIL;
else
ret = FAILED;
- srp_free_req(ch, req, scmnd, 0);
- scmnd->result = DID_ABORT << 16;
- scmnd->scsi_done(scmnd);
+ if (ret == SUCCESS) {
+ srp_free_req(ch, req, scmnd, 0);
+ scmnd->result = DID_ABORT << 16;
+ scmnd->scsi_done(scmnd);
+ }
return ret;
}
@@ -3309,12 +3311,10 @@ static ssize_t srp_create_target(struct device *dev,
num_online_nodes());
const int ch_end = ((node_idx + 1) * target->ch_count /
num_online_nodes());
- const int cv_start = (node_idx * ibdev->num_comp_vectors /
- num_online_nodes() + target->comp_vector)
- % ibdev->num_comp_vectors;
- const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors /
- num_online_nodes() + target->comp_vector)
- % ibdev->num_comp_vectors;
+ const int cv_start = node_idx * ibdev->num_comp_vectors /
+ num_online_nodes();
+ const int cv_end = (node_idx + 1) * ibdev->num_comp_vectors /
+ num_online_nodes();
int cpu_idx = 0;
for_each_online_cpu(cpu) {
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index a73874508c3a..cb3a8623ff54 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -2974,12 +2974,8 @@ static void srpt_queue_response(struct se_cmd *cmd)
}
spin_unlock_irqrestore(&ioctx->spinlock, flags);
- if (unlikely(transport_check_aborted_status(&ioctx->cmd, false)
- || WARN_ON_ONCE(state == SRPT_STATE_CMD_RSP_SENT))) {
- atomic_inc(&ch->req_lim_delta);
- srpt_abort_cmd(ioctx);
+ if (unlikely(WARN_ON_ONCE(state == SRPT_STATE_CMD_RSP_SENT)))
return;
- }
dir = ioctx->cmd.data_direction;
diff --git a/drivers/input/input-leds.c b/drivers/input/input-leds.c
index 766bf2660116..5f04b2d94635 100644
--- a/drivers/input/input-leds.c
+++ b/drivers/input/input-leds.c
@@ -88,6 +88,7 @@ static int input_leds_connect(struct input_handler *handler,
const struct input_device_id *id)
{
struct input_leds *leds;
+ struct input_led *led;
unsigned int num_leds;
unsigned int led_code;
int led_no;
@@ -119,14 +120,13 @@ static int input_leds_connect(struct input_handler *handler,
led_no = 0;
for_each_set_bit(led_code, dev->ledbit, LED_CNT) {
- struct input_led *led = &leds->leds[led_no];
+ if (!input_led_info[led_code].name)
+ continue;
+ led = &leds->leds[led_no];
led->handle = &leds->handle;
led->code = led_code;
- if (!input_led_info[led_code].name)
- continue;
-
led->cdev.name = kasprintf(GFP_KERNEL, "%s::%s",
dev_name(&dev->dev),
input_led_info[led_code].name);
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index ddd8148d51d7..75ff4c965573 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -525,6 +525,16 @@ config KEYBOARD_GOLDFISH_EVENTS
To compile this driver as a module, choose M here: the
module will be called goldfish-events.
+config KEYBOARD_GOLDFISH_ROTARY
+ depends on GOLDFISH
+ tristate "Rotary encoder device for Goldfish"
+ help
+ Say Y here to get an input event device for the Goldfish virtual
+ device emulator that acts as a rotary encoder.
+
+ To compile this driver as a module, choose M here: the
+ module will be called goldfish-rotary.
+
config KEYBOARD_STOWAWAY
tristate "Stowaway keyboard"
select SERIO
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index 1d416ddf84e4..a5d43fc8fab6 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_KEYBOARD_CROS_EC) += cros_ec_keyb.o
obj-$(CONFIG_KEYBOARD_DAVINCI) += davinci_keyscan.o
obj-$(CONFIG_KEYBOARD_EP93XX) += ep93xx_keypad.o
obj-$(CONFIG_KEYBOARD_GOLDFISH_EVENTS) += goldfish_events.o
+obj-$(CONFIG_KEYBOARD_GOLDFISH_ROTARY) += goldfish_rotary.o
obj-$(CONFIG_KEYBOARD_GPIO) += gpio_keys.o
obj-$(CONFIG_KEYBOARD_GPIO_POLLED) += gpio_keys_polled.o
obj-$(CONFIG_KEYBOARD_TCA6416) += tca6416-keypad.o
diff --git a/drivers/input/keyboard/goldfish_rotary.c b/drivers/input/keyboard/goldfish_rotary.c
new file mode 100644
index 000000000000..485727d44684
--- /dev/null
+++ b/drivers/input/keyboard/goldfish_rotary.c
@@ -0,0 +1,200 @@
+/*
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (C) 2012 Intel, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/input.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/acpi.h>
+
+enum {
+ REG_READ = 0x00,
+ REG_SET_PAGE = 0x00,
+ REG_LEN = 0x04,
+ REG_DATA = 0x08,
+
+ PAGE_NAME = 0x00000,
+ PAGE_EVBITS = 0x10000,
+ PAGE_ABSDATA = 0x20000 | EV_ABS,
+};
+
+struct event_dev {
+ struct input_dev *input;
+ int irq;
+ void __iomem *addr;
+ char name[0];
+};
+
+static irqreturn_t rotary_interrupt(int irq, void *dev_id)
+{
+ struct event_dev *edev = dev_id;
+ unsigned type, code, value;
+
+ type = __raw_readl(edev->addr + REG_READ);
+ code = __raw_readl(edev->addr + REG_READ);
+ value = __raw_readl(edev->addr + REG_READ);
+
+ input_event(edev->input, type, code, value);
+ return IRQ_HANDLED;
+}
+
+static void rotary_import_bits(struct event_dev *edev,
+ unsigned long bits[], unsigned type, size_t count)
+{
+ void __iomem *addr = edev->addr;
+ int i, j;
+ size_t size;
+ uint8_t val;
+
+ __raw_writel(PAGE_EVBITS | type, addr + REG_SET_PAGE);
+
+ size = __raw_readl(addr + REG_LEN) * 8;
+ if (size < count)
+ count = size;
+
+ addr += REG_DATA;
+ for (i = 0; i < count; i += 8) {
+ val = __raw_readb(addr++);
+ for (j = 0; j < 8; j++)
+ if (val & 1 << j)
+ set_bit(i + j, bits);
+ }
+}
+
+static void rotary_import_abs_params(struct event_dev *edev)
+{
+ struct input_dev *input_dev = edev->input;
+ void __iomem *addr = edev->addr;
+ u32 val[4];
+ int count;
+ int i, j;
+
+ __raw_writel(PAGE_ABSDATA, addr + REG_SET_PAGE);
+
+ count = __raw_readl(addr + REG_LEN) / sizeof(val);
+ if (count > ABS_MAX)
+ count = ABS_MAX;
+
+ for (i = 0; i < count; i++) {
+ if (!test_bit(i, input_dev->absbit))
+ continue;
+
+ for (j = 0; j < ARRAY_SIZE(val); j++) {
+ int offset = (i * ARRAY_SIZE(val) + j) * sizeof(u32);
+ val[j] = __raw_readl(edev->addr + REG_DATA + offset);
+ }
+
+ input_set_abs_params(input_dev, i,
+ val[0], val[1], val[2], val[3]);
+ }
+}
+
+static int rotary_probe(struct platform_device *pdev)
+{
+ struct input_dev *input_dev;
+ struct event_dev *edev;
+ struct resource *res;
+ unsigned keymapnamelen;
+ void __iomem *addr;
+ int irq;
+ int i;
+ int error;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return -EINVAL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+
+ addr = devm_ioremap(&pdev->dev, res->start, 4096);
+ if (!addr)
+ return -ENOMEM;
+
+ __raw_writel(PAGE_NAME, addr + REG_SET_PAGE);
+ keymapnamelen = __raw_readl(addr + REG_LEN);
+
+ edev = devm_kzalloc(&pdev->dev,
+ sizeof(struct event_dev) + keymapnamelen + 1,
+ GFP_KERNEL);
+ if (!edev)
+ return -ENOMEM;
+
+ input_dev = devm_input_allocate_device(&pdev->dev);
+ if (!input_dev)
+ return -ENOMEM;
+
+ edev->input = input_dev;
+ edev->addr = addr;
+ edev->irq = irq;
+
+ for (i = 0; i < keymapnamelen; i++)
+ edev->name[i] = __raw_readb(edev->addr + REG_DATA + i);
+
+ pr_debug("rotary_probe() keymap=%s\n", edev->name);
+
+ input_dev->name = edev->name;
+ input_dev->id.bustype = BUS_HOST;
+ rotary_import_bits(edev, input_dev->evbit, EV_SYN, EV_MAX);
+ rotary_import_bits(edev, input_dev->relbit, EV_REL, REL_MAX);
+ rotary_import_bits(edev, input_dev->absbit, EV_ABS, ABS_MAX);
+
+ rotary_import_abs_params(edev);
+
+ error = devm_request_irq(&pdev->dev, edev->irq, rotary_interrupt, 0,
+ "goldfish-rotary", edev);
+ if (error)
+ return error;
+
+ error = input_register_device(input_dev);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+static const struct of_device_id goldfish_rotary_of_match[] = {
+ { .compatible = "generic,goldfish-rotary", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, goldfish_rotary_of_match);
+
+static const struct acpi_device_id goldfish_rotary_acpi_match[] = {
+ { "GFSH0008", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, goldfish_rotary_acpi_match);
+
+static struct platform_driver rotary_driver = {
+ .probe = rotary_probe,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "goldfish_rotary",
+ .of_match_table = goldfish_rotary_of_match,
+ .acpi_match_table = ACPI_PTR(goldfish_rotary_acpi_match),
+ },
+};
+
+module_platform_driver(rotary_driver);
+
+MODULE_AUTHOR("Nimrod Gileadi");
+MODULE_DESCRIPTION("Goldfish Rotary Encoder Device");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c
index 7f12b6579f82..795fa353de7c 100644
--- a/drivers/input/keyboard/matrix_keypad.c
+++ b/drivers/input/keyboard/matrix_keypad.c
@@ -216,8 +216,10 @@ static void matrix_keypad_stop(struct input_dev *dev)
{
struct matrix_keypad *keypad = input_get_drvdata(dev);
+ spin_lock_irq(&keypad->lock);
keypad->stopped = true;
- mb();
+ spin_unlock_irq(&keypad->lock);
+
flush_work(&keypad->work.work);
/*
* matrix_keypad_scan() will leave IRQs enabled;
diff --git a/drivers/input/keyboard/qt1070.c b/drivers/input/keyboard/qt1070.c
index 5a5778729e37..76bb51309a78 100644
--- a/drivers/input/keyboard/qt1070.c
+++ b/drivers/input/keyboard/qt1070.c
@@ -274,9 +274,18 @@ static const struct i2c_device_id qt1070_id[] = {
};
MODULE_DEVICE_TABLE(i2c, qt1070_id);
+#ifdef CONFIG_OF
+static const struct of_device_id qt1070_of_match[] = {
+ { .compatible = "qt1070", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, qt1070_of_match);
+#endif
+
static struct i2c_driver qt1070_driver = {
.driver = {
.name = "qt1070",
+ .of_match_table = of_match_ptr(qt1070_of_match),
.pm = &qt1070_pm_ops,
},
.id_table = qt1070_id,
diff --git a/drivers/input/keyboard/tca8418_keypad.c b/drivers/input/keyboard/tca8418_keypad.c
index 9002298698fc..a5e8998047fe 100644
--- a/drivers/input/keyboard/tca8418_keypad.c
+++ b/drivers/input/keyboard/tca8418_keypad.c
@@ -164,11 +164,18 @@ static void tca8418_read_keypad(struct tca8418_keypad *keypad_data)
int error, col, row;
u8 reg, state, code;
- /* Initial read of the key event FIFO */
- error = tca8418_read_byte(keypad_data, REG_KEY_EVENT_A, &reg);
+ do {
+ error = tca8418_read_byte(keypad_data, REG_KEY_EVENT_A, &reg);
+ if (error < 0) {
+ dev_err(&keypad_data->client->dev,
+ "unable to read REG_KEY_EVENT_A\n");
+ break;
+ }
+
+ /* Assume that key code 0 signifies empty FIFO */
+ if (reg <= 0)
+ break;
- /* Assume that key code 0 signifies empty FIFO */
- while (error >= 0 && reg > 0) {
state = reg & KEY_EVENT_VALUE;
code = reg & KEY_EVENT_CODE;
@@ -182,13 +189,7 @@ static void tca8418_read_keypad(struct tca8418_keypad *keypad_data)
input_event(input, EV_MSC, MSC_SCAN, code);
input_report_key(input, keymap[code], state);
- /* Read for next loop */
- error = tca8418_read_byte(keypad_data, REG_KEY_EVENT_A, &reg);
- }
-
- if (error < 0)
- dev_err(&keypad_data->client->dev,
- "unable to read REG_KEY_EVENT_A\n");
+ } while (1);
input_sync(input);
}
diff --git a/drivers/input/misc/drv260x.c b/drivers/input/misc/drv260x.c
index 930424e55439..251d64ca41ce 100644
--- a/drivers/input/misc/drv260x.c
+++ b/drivers/input/misc/drv260x.c
@@ -521,7 +521,7 @@ static int drv260x_probe(struct i2c_client *client,
if (!haptics)
return -ENOMEM;
- haptics->rated_voltage = DRV260X_DEF_OD_CLAMP_VOLT;
+ haptics->overdrive_voltage = DRV260X_DEF_OD_CLAMP_VOLT;
haptics->rated_voltage = DRV260X_DEF_RATED_VOLT;
if (pdata) {
diff --git a/drivers/input/misc/hbtp_input.c b/drivers/input/misc/hbtp_input.c
index 9a4b07c8cf60..ca6286a36604 100644
--- a/drivers/input/misc/hbtp_input.c
+++ b/drivers/input/misc/hbtp_input.c
@@ -1264,28 +1264,28 @@ static int hbtp_fb_suspend(struct hbtp_data *ts)
goto err_power_disable;
}
ts->power_suspended = true;
+ }
- if (ts->input_dev) {
- kobject_uevent_env(&ts->input_dev->dev.kobj,
- KOBJ_OFFLINE, envp);
+ if (ts->input_dev) {
+ kobject_uevent_env(&ts->input_dev->dev.kobj,
+ KOBJ_OFFLINE, envp);
- if (ts->power_sig_enabled) {
- pr_debug("%s: power_sig is enabled, wait for signal\n",
- __func__);
- mutex_unlock(&hbtp->mutex);
- rc = wait_for_completion_interruptible(
- &hbtp->power_suspend_sig);
- if (rc != 0) {
- pr_err("%s: wait for suspend is interrupted\n",
- __func__);
- }
- mutex_lock(&hbtp->mutex);
- pr_debug("%s: Wait is done for suspend\n",
- __func__);
- } else {
- pr_debug("%s: power_sig is NOT enabled",
+ if (ts->power_sig_enabled) {
+ pr_debug("%s: power_sig is enabled, wait for signal\n",
+ __func__);
+ mutex_unlock(&hbtp->mutex);
+ rc = wait_for_completion_interruptible(
+ &hbtp->power_suspend_sig);
+ if (rc != 0) {
+ pr_err("%s: wait for suspend is interrupted\n",
__func__);
}
+ mutex_lock(&hbtp->mutex);
+ pr_debug("%s: Wait is done for suspend\n",
+ __func__);
+ } else {
+ pr_debug("%s: power_sig is NOT enabled",
+ __func__);
}
}
diff --git a/drivers/input/misc/keychord.c b/drivers/input/misc/keychord.c
index fdcc14653b64..82fefdff366a 100644
--- a/drivers/input/misc/keychord.c
+++ b/drivers/input/misc/keychord.c
@@ -276,7 +276,7 @@ static ssize_t keychord_write(struct file *file, const char __user *buffer,
size_t resid = count;
size_t key_bytes;
- if (count < sizeof(struct input_keychord))
+ if (count < sizeof(struct input_keychord) || count > PAGE_SIZE)
return -EINVAL;
keychords = kzalloc(count, GFP_KERNEL);
if (!keychords)
diff --git a/drivers/input/misc/twl4030-pwrbutton.c b/drivers/input/misc/twl4030-pwrbutton.c
index 603fc2fadf05..12b20840fb74 100644
--- a/drivers/input/misc/twl4030-pwrbutton.c
+++ b/drivers/input/misc/twl4030-pwrbutton.c
@@ -70,7 +70,7 @@ static int twl4030_pwrbutton_probe(struct platform_device *pdev)
pwr->phys = "twl4030_pwrbutton/input0";
pwr->dev.parent = &pdev->dev;
- err = devm_request_threaded_irq(&pwr->dev, irq, NULL, powerbutton_irq,
+ err = devm_request_threaded_irq(&pdev->dev, irq, NULL, powerbutton_irq,
IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING |
IRQF_ONESHOT,
"twl4030_pwrbutton", pwr);
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index c9d491bc85e0..3851d5715772 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -1082,6 +1082,13 @@ static int elan_probe(struct i2c_client *client,
return error;
}
+ /* Make sure there is something at this address */
+ error = i2c_smbus_read_byte(client);
+ if (error < 0) {
+ dev_dbg(&client->dev, "nothing at this address: %d\n", error);
+ return -ENXIO;
+ }
+
/* Initialize the touchpad. */
error = elan_initialize(data);
if (error)
diff --git a/drivers/input/mouse/elan_i2c_i2c.c b/drivers/input/mouse/elan_i2c_i2c.c
index a679e56c44cd..765879dcaf85 100644
--- a/drivers/input/mouse/elan_i2c_i2c.c
+++ b/drivers/input/mouse/elan_i2c_i2c.c
@@ -557,7 +557,14 @@ static int elan_i2c_finish_fw_update(struct i2c_client *client,
long ret;
int error;
int len;
- u8 buffer[ETP_I2C_INF_LENGTH];
+ u8 buffer[ETP_I2C_REPORT_LEN];
+
+ len = i2c_master_recv(client, buffer, ETP_I2C_REPORT_LEN);
+ if (len != ETP_I2C_REPORT_LEN) {
+ error = len < 0 ? len : -EIO;
+ dev_warn(dev, "failed to read I2C data after FW WDT reset: %d (%d)\n",
+ error, len);
+ }
reinit_completion(completion);
enable_irq(client->irq);
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 51b96e9bf793..06ea28e5d7b4 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1715,6 +1715,17 @@ int elantech_init(struct psmouse *psmouse)
etd->samples[0], etd->samples[1], etd->samples[2]);
}
+ if (etd->samples[1] == 0x74 && etd->hw_version == 0x03) {
+ /*
+ * This module has a bug which makes absolute mode
+ * unusable, so let's abort so we'll be using standard
+ * PS/2 protocol.
+ */
+ psmouse_info(psmouse,
+ "absolute mode broken, forcing standard PS/2 protocol\n");
+ goto init_fail;
+ }
+
if (elantech_set_absolute_mode(psmouse)) {
psmouse_err(psmouse,
"failed to put touchpad into absolute mode.\n");
diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
index b604564dec5c..30328e57fdda 100644
--- a/drivers/input/mousedev.c
+++ b/drivers/input/mousedev.c
@@ -15,6 +15,7 @@
#define MOUSEDEV_MINORS 31
#define MOUSEDEV_MIX 63
+#include <linux/bitops.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/poll.h>
@@ -103,7 +104,7 @@ struct mousedev_client {
spinlock_t packet_lock;
int pos_x, pos_y;
- signed char ps2[6];
+ u8 ps2[6];
unsigned char ready, buffer, bufsiz;
unsigned char imexseq, impsseq;
enum mousedev_emul mode;
@@ -291,11 +292,10 @@ static void mousedev_notify_readers(struct mousedev *mousedev,
}
client->pos_x += packet->dx;
- client->pos_x = client->pos_x < 0 ?
- 0 : (client->pos_x >= xres ? xres : client->pos_x);
+ client->pos_x = clamp_val(client->pos_x, 0, xres);
+
client->pos_y += packet->dy;
- client->pos_y = client->pos_y < 0 ?
- 0 : (client->pos_y >= yres ? yres : client->pos_y);
+ client->pos_y = clamp_val(client->pos_y, 0, yres);
p->dx += packet->dx;
p->dy += packet->dy;
@@ -571,44 +571,50 @@ static int mousedev_open(struct inode *inode, struct file *file)
return error;
}
-static inline int mousedev_limit_delta(int delta, int limit)
-{
- return delta > limit ? limit : (delta < -limit ? -limit : delta);
-}
-
-static void mousedev_packet(struct mousedev_client *client,
- signed char *ps2_data)
+static void mousedev_packet(struct mousedev_client *client, u8 *ps2_data)
{
struct mousedev_motion *p = &client->packets[client->tail];
+ s8 dx, dy, dz;
+
+ dx = clamp_val(p->dx, -127, 127);
+ p->dx -= dx;
+
+ dy = clamp_val(p->dy, -127, 127);
+ p->dy -= dy;
- ps2_data[0] = 0x08 |
- ((p->dx < 0) << 4) | ((p->dy < 0) << 5) | (p->buttons & 0x07);
- ps2_data[1] = mousedev_limit_delta(p->dx, 127);
- ps2_data[2] = mousedev_limit_delta(p->dy, 127);
- p->dx -= ps2_data[1];
- p->dy -= ps2_data[2];
+ ps2_data[0] = BIT(3);
+ ps2_data[0] |= ((dx & BIT(7)) >> 3) | ((dy & BIT(7)) >> 2);
+ ps2_data[0] |= p->buttons & 0x07;
+ ps2_data[1] = dx;
+ ps2_data[2] = dy;
switch (client->mode) {
case MOUSEDEV_EMUL_EXPS:
- ps2_data[3] = mousedev_limit_delta(p->dz, 7);
- p->dz -= ps2_data[3];
- ps2_data[3] = (ps2_data[3] & 0x0f) | ((p->buttons & 0x18) << 1);
+ dz = clamp_val(p->dz, -7, 7);
+ p->dz -= dz;
+
+ ps2_data[3] = (dz & 0x0f) | ((p->buttons & 0x18) << 1);
client->bufsiz = 4;
break;
case MOUSEDEV_EMUL_IMPS:
- ps2_data[0] |=
- ((p->buttons & 0x10) >> 3) | ((p->buttons & 0x08) >> 1);
- ps2_data[3] = mousedev_limit_delta(p->dz, 127);
- p->dz -= ps2_data[3];
+ dz = clamp_val(p->dz, -127, 127);
+ p->dz -= dz;
+
+ ps2_data[0] |= ((p->buttons & 0x10) >> 3) |
+ ((p->buttons & 0x08) >> 1);
+ ps2_data[3] = dz;
+
client->bufsiz = 4;
break;
case MOUSEDEV_EMUL_PS2:
default:
- ps2_data[0] |=
- ((p->buttons & 0x10) >> 3) | ((p->buttons & 0x08) >> 1);
p->dz = 0;
+
+ ps2_data[0] |= ((p->buttons & 0x10) >> 3) |
+ ((p->buttons & 0x08) >> 1);
+
client->bufsiz = 3;
break;
}
@@ -714,7 +720,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
{
struct mousedev_client *client = file->private_data;
struct mousedev *mousedev = client->mousedev;
- signed char data[sizeof(client->ps2)];
+ u8 data[sizeof(client->ps2)];
int retval = 0;
if (!client->ready && !client->buffer && mousedev->exist &&
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index d1051e3ce819..e484ea2dc787 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -530,6 +530,20 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
{ }
};
+static const struct dmi_system_id i8042_dmi_forcemux_table[] __initconst = {
+ {
+ /*
+ * Sony Vaio VGN-CS series require MUX or the touch sensor
+ * buttons will disturb touchpad operation
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-CS"),
+ },
+ },
+ { }
+};
+
/*
* On some Asus laptops, just running self tests cause problems.
*/
@@ -693,6 +707,13 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = {
},
},
{
+ /* Lenovo ThinkPad L460 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L460"),
+ },
+ },
+ {
/* Clevo P650RS, 650RP6, Sager NP8152-S, and others */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
@@ -1223,6 +1244,9 @@ static int __init i8042_platform_init(void)
if (dmi_check_system(i8042_dmi_nomux_table))
i8042_nomux = true;
+ if (dmi_check_system(i8042_dmi_forcemux_table))
+ i8042_nomux = false;
+
if (dmi_check_system(i8042_dmi_notimeout_table))
i8042_notimeout = true;
diff --git a/drivers/input/touchscreen/ar1021_i2c.c b/drivers/input/touchscreen/ar1021_i2c.c
index 71b5a634cf6d..e7bb155911d0 100644
--- a/drivers/input/touchscreen/ar1021_i2c.c
+++ b/drivers/input/touchscreen/ar1021_i2c.c
@@ -152,7 +152,7 @@ static int __maybe_unused ar1021_i2c_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(ar1021_i2c_pm, ar1021_i2c_suspend, ar1021_i2c_resume);
static const struct i2c_device_id ar1021_i2c_id[] = {
- { "MICROCHIP_AR1021_I2C", 0 },
+ { "ar1021", 0 },
{ },
};
MODULE_DEVICE_TABLE(i2c, ar1021_i2c_id);
diff --git a/drivers/input/touchscreen/atmel_maxtouch_ts.c b/drivers/input/touchscreen/atmel_maxtouch_ts.c
index f58fc8555156..423a055bbec6 100644
--- a/drivers/input/touchscreen/atmel_maxtouch_ts.c
+++ b/drivers/input/touchscreen/atmel_maxtouch_ts.c
@@ -1,7 +1,7 @@
/*
* Atmel maXTouch Touchscreen driver
*
- * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2015, 2018 The Linux Foundation. All rights reserved.
*
* Linux foundation chooses to take subject only to the GPLv2 license terms,
* and distributes only under these terms.
@@ -529,6 +529,8 @@ static ssize_t mxt_debug_msg_read(struct file *filp, struct kobject *kobj,
static int mxt_debug_msg_init(struct mxt_data *data)
{
+ int ret;
+
sysfs_bin_attr_init(&data->debug_msg_attr);
data->debug_msg_attr.attr.name = "debug_msg";
data->debug_msg_attr.attr.mode = 0666;
@@ -536,11 +538,20 @@ static int mxt_debug_msg_init(struct mxt_data *data)
data->debug_msg_attr.write = mxt_debug_msg_write;
data->debug_msg_attr.size = data->T5_msg_size * DEBUG_MSG_MAX;
- if (sysfs_create_bin_file(&data->client->dev.kobj,
- &data->debug_msg_attr) < 0)
- dev_info(&data->client->dev, "Debugfs already exists\n");
+ ret = sysfs_create_bin_file(&data->client->dev.kobj,
+ &data->debug_msg_attr);
+ if (ret < 0) {
+ if (ret == -EEXIST) {
+ dev_info(&data->client->dev,
+ "Debugfs already exists\n");
+ ret = 0;
+ } else {
+ dev_err(&data->client->dev,
+ "Failed to create 'debug_msg' file\n");
+ }
+ }
- return 0;
+ return ret;
}
static void mxt_debug_msg_remove(struct mxt_data *data)
@@ -1462,7 +1473,7 @@ static int mxt_t6_command(struct mxt_data *data, u16 cmd_offset,
u8 value, bool wait)
{
u16 reg;
- u8 command_register;
+ u8 command_register = 0;
int timeout_counter = 0;
int ret;
@@ -1567,7 +1578,7 @@ static int mxt_check_retrigen(struct mxt_data *data)
{
struct i2c_client *client = data->client;
int error;
- int val;
+ int val = 0;
if (data->pdata->irqflags & IRQF_TRIGGER_LOW)
return 0;
@@ -1612,8 +1623,11 @@ static int mxt_update_t100_resolution(struct mxt_data *data)
struct i2c_client *client = data->client;
int error;
struct mxt_object *object;
- u16 range_x, range_y, temp;
- u8 cfg, tchaux;
+ u16 range_x = 0;
+ u16 range_y = 0;
+ u16 temp;
+ u8 cfg = 0;
+ u8 tchaux = 0;
u8 aux;
bool update = false;
@@ -1653,10 +1667,6 @@ static int mxt_update_t100_resolution(struct mxt_data *data)
if (range_x == 0)
range_x = 1023;
- /* Handle default values */
- if (range_x == 0)
- range_x = 1023;
-
if (range_y == 0)
range_y = 1023;
@@ -1728,8 +1738,8 @@ static int mxt_update_t9_resolution(struct mxt_data *data)
{
struct i2c_client *client = data->client;
int error;
- struct t9_range range;
- unsigned char orient;
+ struct t9_range range = {0};
+ unsigned char orient = 0;
struct mxt_object *object;
u16 temp;
bool update = false;
@@ -4014,6 +4024,7 @@ static int mxt_probe(struct i2c_client *client,
len = strlen(data->pdata->cfg_name);
if (len > MXT_NAME_MAX_LEN - 1) {
dev_err(&client->dev, "Invalid config name\n");
+ error = -EINVAL;
goto err_destroy_mutex;
}
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index 2d5794ec338b..88dfe3008cf4 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -2523,6 +2523,15 @@ static const struct dmi_system_id mxt_dmi_table[] = {
.driver_data = samus_platform_data,
},
{
+ /* Samsung Chromebook Pro */
+ .ident = "Samsung Chromebook Pro",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Google"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Caroline"),
+ },
+ .driver_data = samus_platform_data,
+ },
+ {
/* Other Google Chromebooks */
.ident = "Chromebook",
.matches = {
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.c b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.c
index 93c9c3c373b8..6f6cb35d90da 100644
--- a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.c
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.c
@@ -5,7 +5,7 @@
*
* Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
* Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
- * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -472,7 +472,7 @@ static int synaptics_i2c_change_pipe_owner(
struct synaptics_rmi4_data *rmi4_data, enum subsystem subsystem)
{
/*scm call descriptor */
- struct scm_desc desc;
+ struct scm_desc desc = {0};
struct i2c_client *i2c = to_i2c_client(rmi4_data->pdev->dev.parent);
int ret = 0;
@@ -3808,6 +3808,13 @@ static int synaptics_rmi4_probe(struct platform_device *pdev)
}
exp_data.workqueue = create_singlethread_workqueue("dsx_exp_workqueue");
+ if (exp_data.workqueue == NULL) {
+ dev_err(&pdev->dev,
+ "%s: Failed to create workqueue\n", __func__);
+ retval = -ENOMEM;
+ goto err_create_wq;
+ }
+
INIT_DELAYED_WORK(&exp_data.work, synaptics_rmi4_exp_fn_work);
exp_data.rmi4_data = rmi4_data;
exp_data.queue_work = true;
@@ -3859,10 +3866,9 @@ err_create_debugfs_file:
debugfs_remove_recursive(rmi4_data->dir);
err_create_debugfs_dir:
cancel_delayed_work_sync(&exp_data.work);
- if (exp_data.workqueue != NULL) {
- flush_workqueue(exp_data.workqueue);
- destroy_workqueue(exp_data.workqueue);
- }
+ flush_workqueue(exp_data.workqueue);
+ destroy_workqueue(exp_data.workqueue);
+err_create_wq:
synaptics_rmi4_irq_enable(rmi4_data, false);
free_irq(rmi4_data->irq, rmi4_data);
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_fw_update.c b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_fw_update.c
index 4787f2bcd768..13680130c2de 100644
--- a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_fw_update.c
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_fw_update.c
@@ -681,10 +681,12 @@ static enum flash_area fwu_go_nogo(struct image_header_data *header)
goto exit;
}
- while (strptr[index] >= '0' && strptr[index] <= '9') {
+ while ((index < MAX_FIRMWARE_ID_LEN - 1) && strptr[index] >= '0'
+ && strptr[index] <= '9') {
firmware_id[index] = strptr[index];
index++;
}
+ firmware_id[index] = '\0';
retval = sstrtoul(firmware_id, 10, &image_fw_id);
kfree(firmware_id);
diff --git a/drivers/input/touchscreen/tsc2007.c b/drivers/input/touchscreen/tsc2007.c
index 5d0cd51c6f41..a4b7b4c3d27b 100644
--- a/drivers/input/touchscreen/tsc2007.c
+++ b/drivers/input/touchscreen/tsc2007.c
@@ -455,6 +455,14 @@ static int tsc2007_probe(struct i2c_client *client,
tsc2007_stop(ts);
+ /* power down the chip (TSC2007_SETUP does not ACK on I2C) */
+ err = tsc2007_xfer(ts, PWRDOWN);
+ if (err < 0) {
+ dev_err(&client->dev,
+ "Failed to setup chip: %d\n", err);
+ return err; /* usually, chip does not respond */
+ }
+
err = input_register_device(input_dev);
if (err) {
dev_err(&client->dev,
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 6317478916ef..56f2980adc28 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -2256,8 +2256,17 @@ static int arm_smmu_attach_dynamic(struct iommu_domain *domain,
smmu_domain->pgtbl_ops = pgtbl_ops;
ret = 0;
out:
- if (ret)
+ if (ret) {
free_io_pgtable_ops(pgtbl_ops);
+ /* unassign any freed page table memory */
+ if (arm_smmu_is_master_side_secure(smmu_domain)) {
+ arm_smmu_secure_domain_lock(smmu_domain);
+ arm_smmu_secure_pool_destroy(smmu_domain);
+ arm_smmu_unassign_table(smmu_domain);
+ arm_smmu_secure_domain_unlock(smmu_domain);
+ }
+ smmu_domain->pgtbl_ops = NULL;
+ }
mutex_unlock(&smmu->attach_lock);
return ret;
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index f929879ecae6..10068a481e22 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -127,6 +127,7 @@ int intel_svm_enable_prq(struct intel_iommu *iommu)
pr_err("IOMMU: %s: Failed to request IRQ for page request queue\n",
iommu->name);
dmar_free_hwirq(irq);
+ iommu->pr_irq = 0;
goto err;
}
dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL);
@@ -142,9 +143,11 @@ int intel_svm_finish_prq(struct intel_iommu *iommu)
dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL);
dmar_writeq(iommu->reg + DMAR_PQA_REG, 0ULL);
- free_irq(iommu->pr_irq, iommu);
- dmar_free_hwirq(iommu->pr_irq);
- iommu->pr_irq = 0;
+ if (iommu->pr_irq) {
+ free_irq(iommu->pr_irq, iommu);
+ dmar_free_hwirq(iommu->pr_irq);
+ iommu->pr_irq = 0;
+ }
free_pages((unsigned long)iommu->prq, PRQ_ORDER);
iommu->prq = NULL;
@@ -386,6 +389,7 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
pasid_max - 1, GFP_KERNEL);
if (ret < 0) {
kfree(svm);
+ kfree(sdev);
goto out;
}
svm->pasid = ret;
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index fa0adef32bd6..62739766b60b 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -126,7 +126,7 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
break; /* found a free slot */
}
adjust_limit_pfn:
- limit_pfn = curr_iova->pfn_lo - 1;
+ limit_pfn = curr_iova->pfn_lo ? (curr_iova->pfn_lo - 1) : 0;
move_left:
prev = curr;
curr = rb_prev(curr);
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index 3dc5b65f3990..b98d38f95237 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -1295,6 +1295,7 @@ static int __init omap_iommu_init(void)
const unsigned long flags = SLAB_HWCACHE_ALIGN;
size_t align = 1 << 10; /* L2 pagetable alignement */
struct device_node *np;
+ int ret;
np = of_find_matching_node(NULL, omap_iommu_of_match);
if (!np)
@@ -1308,11 +1309,25 @@ static int __init omap_iommu_init(void)
return -ENOMEM;
iopte_cachep = p;
- bus_set_iommu(&platform_bus_type, &omap_iommu_ops);
-
omap_iommu_debugfs_init();
- return platform_driver_register(&omap_iommu_driver);
+ ret = platform_driver_register(&omap_iommu_driver);
+ if (ret) {
+ pr_err("%s: failed to register driver\n", __func__);
+ goto fail_driver;
+ }
+
+ ret = bus_set_iommu(&platform_bus_type, &omap_iommu_ops);
+ if (ret)
+ goto fail_bus;
+
+ return 0;
+
+fail_bus:
+ platform_driver_unregister(&omap_iommu_driver);
+fail_driver:
+ kmem_cache_destroy(iopte_cachep);
+ return ret;
}
subsys_initcall(omap_iommu_init);
/* must be ready before omap3isp is probed */
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index bb992583cd2b..57063279729f 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -227,3 +227,11 @@ config IRQ_MXS
config MSM_IRQ
bool
select IRQ_DOMAIN
+
+config GOLDFISH_PIC
+ bool "Goldfish programmable interrupt controller"
+ depends on MIPS && (GOLDFISH || COMPILE_TEST)
+ select IRQ_DOMAIN
+ help
+ Say yes here to enable Goldfish interrupt controller driver used
+ for Goldfish based virtual platforms.
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index a9ac2b28f4cf..67aedf02e991 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -56,3 +56,4 @@ obj-$(CONFIG_ARCH_SA1100) += irq-sa11x0.o
obj-$(CONFIG_INGENIC_IRQ) += irq-ingenic.o
obj-$(CONFIG_IMX_GPCV2) += irq-imx-gpcv2.o
obj-$(CONFIG_QCOM_SHOW_RESUME_IRQ) += msm_show_resume_irq.o
+obj-$(CONFIG_GOLDFISH_PIC) += irq-goldfish-pic.o
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index c5f1757ac61d..82e00e3ad0e0 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -663,7 +663,7 @@ static struct irq_chip its_irq_chip = {
* This gives us (((1UL << id_bits) - 8192) >> 5) possible allocations.
*/
#define IRQS_PER_CHUNK_SHIFT 5
-#define IRQS_PER_CHUNK (1 << IRQS_PER_CHUNK_SHIFT)
+#define IRQS_PER_CHUNK (1UL << IRQS_PER_CHUNK_SHIFT)
static unsigned long *lpi_bitmap;
static u32 lpi_chunks;
@@ -1168,11 +1168,10 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
/*
- * At least one bit of EventID is being used, hence a minimum
- * of two entries. No, the architecture doesn't let you
- * express an ITT with a single entry.
+ * We allocate at least one chunk worth of LPIs bet device,
+ * and thus that many ITEs. The device may require less though.
*/
- nr_ites = max(2UL, roundup_pow_of_two(nvecs));
+ nr_ites = max(IRQS_PER_CHUNK, roundup_pow_of_two(nvecs));
sz = nr_ites * its->ite_size;
sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
itt = kzalloc(sz, GFP_KERNEL);
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 9e96d81bc5cd..9a22494a2371 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -765,7 +765,7 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
* Ensure that stores to Normal memory are visible to the
* other CPUs before issuing the IPI.
*/
- smp_wmb();
+ wmb();
for_each_cpu(cpu, mask) {
unsigned long cluster_id = cpu_logical_map(cpu) & ~0xffUL;
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 10b73d9bea78..b0b534622734 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -41,6 +41,7 @@
#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqchip/arm-gic.h>
+#include <linux/syscore_ops.h>
#include <asm/cputype.h>
#include <asm/irq.h>
@@ -69,6 +70,7 @@ union gic_base {
};
struct gic_chip_data {
+ unsigned int irq_offset;
union gic_base dist_base;
union gic_base cpu_base;
#ifdef CONFIG_CPU_PM
@@ -85,6 +87,10 @@ struct gic_chip_data {
#ifdef CONFIG_GIC_NON_BANKED
void __iomem *(*get_base)(union gic_base *);
#endif
+#ifdef CONFIG_PM
+ unsigned int wakeup_irqs[32];
+ unsigned int enabled_irqs[32];
+#endif
};
static DEFINE_RAW_SPINLOCK(irq_controller_lock);
@@ -222,6 +228,109 @@ static void gic_unmask_irq(struct irq_data *d)
raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
}
+#ifdef CONFIG_PM
+static int gic_suspend_one(struct gic_chip_data *gic)
+{
+ unsigned int i;
+ void __iomem *base = gic_data_dist_base(gic);
+
+ for (i = 0; i * 32 < gic->gic_irqs; i++) {
+ gic->enabled_irqs[i]
+ = readl_relaxed(base + GIC_DIST_ENABLE_SET + i * 4);
+ /* disable all of them */
+ writel_relaxed(0xffffffff,
+ base + GIC_DIST_ENABLE_CLEAR + i * 4);
+ /* enable the wakeup set */
+ writel_relaxed(gic->wakeup_irqs[i],
+ base + GIC_DIST_ENABLE_SET + i * 4);
+ }
+ /* make sure all gic setting finished */
+ mb();
+ return 0;
+}
+
+static int gic_suspend(void)
+{
+ int i;
+
+ for (i = 0; i < MAX_GIC_NR; i++)
+ gic_suspend_one(&gic_data[i]);
+ return 0;
+}
+
+static void gic_show_resume_irq(struct gic_chip_data *gic)
+{
+ unsigned int i;
+ u32 enabled;
+ u32 pending[32];
+ void __iomem *base = gic_data_dist_base(gic);
+
+ raw_spin_lock(&irq_controller_lock);
+ for (i = 0; i * 32 < gic->gic_irqs; i++) {
+ enabled = readl_relaxed(base + GIC_DIST_ENABLE_CLEAR + i * 4);
+ pending[i] = readl_relaxed(base + GIC_DIST_PENDING_SET + i * 4);
+ pending[i] &= enabled;
+ }
+ raw_spin_unlock(&irq_controller_lock);
+
+ for (i = find_first_bit((unsigned long *)pending, gic->gic_irqs);
+ i < gic->gic_irqs;
+ i = find_next_bit((unsigned long *)pending,
+ gic->gic_irqs, i+1)) {
+ unsigned int irq = irq_find_mapping(gic->domain,
+ i + gic->irq_offset);
+ struct irq_desc *desc = irq_to_desc(irq);
+ const char *name = "null";
+
+ if (desc == NULL)
+ name = "stray irq";
+ else if (desc->action && desc->action->name)
+ name = desc->action->name;
+
+ pr_warn("%s: %d triggered %s\n", __func__,
+ i + gic->irq_offset, name);
+ }
+}
+
+static void gic_resume_one(struct gic_chip_data *gic)
+{
+ unsigned int i;
+ void __iomem *base = gic_data_dist_base(gic);
+
+ gic_show_resume_irq(gic);
+ for (i = 0; i * 32 < gic->gic_irqs; i++) {
+ /* disable all of them */
+ writel_relaxed(0xffffffff,
+ base + GIC_DIST_ENABLE_CLEAR + i * 4);
+ /* enable the enabled set */
+ writel_relaxed(gic->enabled_irqs[i],
+ base + GIC_DIST_ENABLE_SET + i * 4);
+ }
+ /* make sure all gic setting finished */
+ mb();
+}
+
+static void gic_resume(void)
+{
+ int i;
+
+ for (i = 0; i < MAX_GIC_NR; i++)
+ gic_resume_one(&gic_data[i]);
+}
+
+static struct syscore_ops gic_syscore_ops = {
+ .suspend = gic_suspend,
+ .resume = gic_resume,
+};
+
+static int __init gic_init_sys(void)
+{
+ register_syscore_ops(&gic_syscore_ops);
+ return 0;
+}
+arch_initcall(gic_init_sys);
+#endif
+
static void gic_eoi_irq(struct irq_data *d)
{
if (gic_arch_extn.irq_eoi) {
@@ -373,6 +482,20 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
static int gic_set_wake(struct irq_data *d, unsigned int on)
{
int ret = -ENXIO;
+ unsigned int reg_offset, bit_offset;
+ unsigned int gicirq = gic_irq(d);
+ struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
+
+ /* per-cpu interrupts cannot be wakeup interrupts */
+ WARN_ON(gicirq < 32);
+
+ reg_offset = gicirq / 32;
+ bit_offset = gicirq % 32;
+
+ if (on)
+ gic_data->wakeup_irqs[reg_offset] |= 1 << bit_offset;
+ else
+ gic_data->wakeup_irqs[reg_offset] &= ~(1 << bit_offset);
if (gic_arch_extn.irq_set_wake)
ret = gic_arch_extn.irq_set_wake(d, on);
diff --git a/drivers/irqchip/irq-goldfish-pic.c b/drivers/irqchip/irq-goldfish-pic.c
new file mode 100644
index 000000000000..ac18926b68b7
--- /dev/null
+++ b/drivers/irqchip/irq-goldfish-pic.c
@@ -0,0 +1,136 @@
+/*
+ * Driver for MIPS Goldfish Programmable Interrupt Controller.
+ *
+ * Author: Miodrag Dinic <miodrag.dinic@mips.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#define GFPIC_NR_IRQS 32
+
+/* 8..39 Cascaded Goldfish PIC interrupts */
+#define GFPIC_IRQ_BASE 8
+
+#define GFPIC_REG_IRQ_PENDING 0x04
+#define GFPIC_REG_IRQ_DISABLE_ALL 0x08
+#define GFPIC_REG_IRQ_DISABLE 0x0c
+#define GFPIC_REG_IRQ_ENABLE 0x10
+
+struct goldfish_pic_data {
+ void __iomem *base;
+ struct irq_domain *irq_domain;
+};
+
+static void goldfish_pic_cascade(struct irq_desc *desc)
+{
+ struct goldfish_pic_data *gfpic = irq_desc_get_handler_data(desc);
+ struct irq_chip *host_chip = irq_desc_get_chip(desc);
+ u32 pending, hwirq, virq;
+
+ chained_irq_enter(host_chip, desc);
+
+ pending = readl(gfpic->base + GFPIC_REG_IRQ_PENDING);
+ while (pending) {
+ hwirq = __fls(pending);
+ virq = irq_linear_revmap(gfpic->irq_domain, hwirq);
+ generic_handle_irq(virq);
+ pending &= ~(1 << hwirq);
+ }
+
+ chained_irq_exit(host_chip, desc);
+}
+
+static const struct irq_domain_ops goldfish_irq_domain_ops = {
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static int __init goldfish_pic_of_init(struct device_node *of_node,
+ struct device_node *parent)
+{
+ struct goldfish_pic_data *gfpic;
+ struct irq_chip_generic *gc;
+ struct irq_chip_type *ct;
+ unsigned int parent_irq;
+ int ret = 0;
+
+ gfpic = kzalloc(sizeof(*gfpic), GFP_KERNEL);
+ if (!gfpic) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ parent_irq = irq_of_parse_and_map(of_node, 0);
+ if (!parent_irq) {
+ pr_err("Failed to map parent IRQ!\n");
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ gfpic->base = of_iomap(of_node, 0);
+ if (!gfpic->base) {
+ pr_err("Failed to map base address!\n");
+ ret = -ENOMEM;
+ goto out_unmap_irq;
+ }
+
+ /* Mask interrupts. */
+ writel(1, gfpic->base + GFPIC_REG_IRQ_DISABLE_ALL);
+
+ gc = irq_alloc_generic_chip("GFPIC", 1, GFPIC_IRQ_BASE, gfpic->base,
+ handle_level_irq);
+ if (!gc) {
+ pr_err("Failed to allocate chip structures!\n");
+ ret = -ENOMEM;
+ goto out_iounmap;
+ }
+
+ ct = gc->chip_types;
+ ct->regs.enable = GFPIC_REG_IRQ_ENABLE;
+ ct->regs.disable = GFPIC_REG_IRQ_DISABLE;
+ ct->chip.irq_unmask = irq_gc_unmask_enable_reg;
+ ct->chip.irq_mask = irq_gc_mask_disable_reg;
+
+ irq_setup_generic_chip(gc, IRQ_MSK(GFPIC_NR_IRQS), 0,
+ IRQ_NOPROBE | IRQ_LEVEL, 0);
+
+ gfpic->irq_domain = irq_domain_add_legacy(of_node, GFPIC_NR_IRQS,
+ GFPIC_IRQ_BASE, 0,
+ &goldfish_irq_domain_ops,
+ NULL);
+ if (!gfpic->irq_domain) {
+ pr_err("Failed to add irqdomain!\n");
+ ret = -ENOMEM;
+ goto out_iounmap;
+ }
+
+ irq_set_chained_handler_and_data(parent_irq,
+ goldfish_pic_cascade, gfpic);
+
+ pr_info("Successfully registered.\n");
+ return 0;
+
+out_iounmap:
+ iounmap(gfpic->base);
+out_unmap_irq:
+ irq_dispose_mapping(parent_irq);
+out_free:
+ kfree(gfpic);
+out_err:
+ pr_err("Failed to initialize! (errno = %d)\n", ret);
+ return ret;
+}
+
+IRQCHIP_DECLARE(google_gf_pic, "google,goldfish-pic", goldfish_pic_of_init);
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 6f1dbd52ec91..3f79b3a203aa 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -181,7 +181,7 @@ void gic_write_cpu_compare(cycle_t cnt, int cpu)
local_irq_save(flags);
- gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), cpu);
+ gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), mips_cm_vp_id(cpu));
if (mips_cm_is64) {
gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE), cnt);
@@ -230,6 +230,14 @@ void gic_stop_count(void)
#endif
+unsigned gic_read_local_vp_id(void)
+{
+ unsigned long ident;
+
+ ident = gic_read(GIC_REG(VPE_LOCAL, GIC_VP_IDENT));
+ return ident & GIC_VP_IDENT_VCNUM_MSK;
+}
+
static bool gic_local_irq_is_routable(int intr)
{
u32 vpe_ctl;
@@ -534,7 +542,8 @@ static void gic_mask_local_irq_all_vpes(struct irq_data *d)
spin_lock_irqsave(&gic_lock, flags);
for (i = 0; i < gic_vpes; i++) {
- gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
+ gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR),
+ mips_cm_vp_id(i));
gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << intr);
}
spin_unlock_irqrestore(&gic_lock, flags);
@@ -548,7 +557,8 @@ static void gic_unmask_local_irq_all_vpes(struct irq_data *d)
spin_lock_irqsave(&gic_lock, flags);
for (i = 0; i < gic_vpes; i++) {
- gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
+ gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR),
+ mips_cm_vp_id(i));
gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_SMASK), 1 << intr);
}
spin_unlock_irqrestore(&gic_lock, flags);
@@ -665,7 +675,8 @@ static void __init gic_basic_init(void)
for (i = 0; i < gic_vpes; i++) {
unsigned int j;
- gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
+ gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR),
+ mips_cm_vp_id(i));
for (j = 0; j < GIC_NUM_LOCAL_INTRS; j++) {
if (!gic_local_irq_is_routable(j))
continue;
@@ -710,7 +721,8 @@ static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq,
for (i = 0; i < gic_vpes; i++) {
u32 val = GIC_MAP_TO_PIN_MSK | gic_cpu_pin;
- gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
+ gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR),
+ mips_cm_vp_id(i));
switch (intr) {
case GIC_LOCAL_INT_WD:
diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c
index 7b4ddf0a39ec..2d28530b7e82 100644
--- a/drivers/isdn/hardware/eicon/message.c
+++ b/drivers/isdn/hardware/eicon/message.c
@@ -147,7 +147,7 @@ static word plci_remove_check(PLCI *);
static void listen_check(DIVA_CAPI_ADAPTER *);
static byte AddInfo(byte **, byte **, byte *, byte *);
static byte getChannel(API_PARSE *);
-static void IndParse(PLCI *, word *, byte **, byte);
+static void IndParse(PLCI *, const word *, byte **, byte);
static byte ie_compare(byte *, byte *);
static word find_cip(DIVA_CAPI_ADAPTER *, byte *, byte *);
static word CPN_filter_ok(byte *cpn, DIVA_CAPI_ADAPTER *, word);
@@ -4860,7 +4860,7 @@ static void sig_ind(PLCI *plci)
/* included before the ESC_MSGTYPE and MAXPARMSIDS has to be incremented */
/* SMSG is situated at the end because its 0 (for compatibility reasons */
/* (see Info_Mask Bit 4, first IE. then the message type) */
- word parms_id[] =
+ static const word parms_id[] =
{MAXPARMSIDS, CPN, 0xff, DSA, OSA, BC, LLC, HLC, ESC_CAUSE, DSP, DT, CHA,
UUI, CONG_RR, CONG_RNR, ESC_CHI, KEY, CHI, CAU, ESC_LAW,
RDN, RDX, CONN_NR, RIN, NI, CAI, ESC_CR,
@@ -4868,12 +4868,12 @@ static void sig_ind(PLCI *plci)
/* 14 FTY repl by ESC_CHI */
/* 18 PI repl by ESC_LAW */
/* removed OAD changed to 0xff for future use, OAD is multiIE now */
- word multi_fac_id[] = {1, FTY};
- word multi_pi_id[] = {1, PI};
- word multi_CiPN_id[] = {1, OAD};
- word multi_ssext_id[] = {1, ESC_SSEXT};
+ static const word multi_fac_id[] = {1, FTY};
+ static const word multi_pi_id[] = {1, PI};
+ static const word multi_CiPN_id[] = {1, OAD};
+ static const word multi_ssext_id[] = {1, ESC_SSEXT};
- word multi_vswitch_id[] = {1, ESC_VSWITCH};
+ static const word multi_vswitch_id[] = {1, ESC_VSWITCH};
byte *cau;
word ncci;
@@ -8926,7 +8926,7 @@ static void listen_check(DIVA_CAPI_ADAPTER *a)
/* functions for all parameters sent in INDs */
/*------------------------------------------------------------------*/
-static void IndParse(PLCI *plci, word *parms_id, byte **parms, byte multiIEsize)
+static void IndParse(PLCI *plci, const word *parms_id, byte **parms, byte multiIEsize)
{
word ploc; /* points to current location within packet */
byte w;
diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
index 358a574d9e8b..46d957c34be1 100644
--- a/drivers/isdn/icn/icn.c
+++ b/drivers/isdn/icn/icn.c
@@ -718,7 +718,7 @@ icn_sendbuf(int channel, int ack, struct sk_buff *skb, icn_card *card)
return 0;
if (card->sndcount[channel] > ICN_MAX_SQUEUE)
return 0;
-#warning TODO test headroom or use skb->nb to flag ACK
+ /* TODO test headroom or use skb->nb to flag ACK */
nskb = skb_clone(skb, GFP_ATOMIC);
if (nskb) {
/* Push ACK flag as one
diff --git a/drivers/isdn/mISDN/stack.c b/drivers/isdn/mISDN/stack.c
index 9cb4b621fbc3..b92a19a594a1 100644
--- a/drivers/isdn/mISDN/stack.c
+++ b/drivers/isdn/mISDN/stack.c
@@ -72,7 +72,7 @@ send_socklist(struct mISDN_sock_list *sl, struct sk_buff *skb)
if (sk->sk_state != MISDN_BOUND)
continue;
if (!cskb)
- cskb = skb_copy(skb, GFP_KERNEL);
+ cskb = skb_copy(skb, GFP_ATOMIC);
if (!cskb) {
printk(KERN_WARNING "%s no skb\n", __func__);
break;
diff --git a/drivers/isdn/sc/init.c b/drivers/isdn/sc/init.c
index 3597ef47b28a..09fc129ef2fa 100644
--- a/drivers/isdn/sc/init.c
+++ b/drivers/isdn/sc/init.c
@@ -441,6 +441,7 @@ static int identify_board(unsigned long rambase, unsigned int iobase)
RspMessage rcvmsg;
ReqMessage sndmsg;
HWConfig_pl hwci;
+ void __iomem *rambase_sig = (void __iomem *)rambase + SIG_OFFSET;
int x;
pr_debug("Attempting to identify adapter @ 0x%lx io 0x%x\n",
@@ -481,7 +482,7 @@ static int identify_board(unsigned long rambase, unsigned int iobase)
*/
outb(PRI_BASEPG_VAL, pgport);
msleep_interruptible(1000);
- sig = readl(rambase + SIG_OFFSET);
+ sig = readl(rambase_sig);
pr_debug("Looking for a signature, got 0x%lx\n", sig);
if (sig == SIGNATURE)
return PRI_BOARD;
@@ -491,7 +492,7 @@ static int identify_board(unsigned long rambase, unsigned int iobase)
*/
outb(BRI_BASEPG_VAL, pgport);
msleep_interruptible(1000);
- sig = readl(rambase + SIG_OFFSET);
+ sig = readl(rambase_sig);
pr_debug("Looking for a signature, got 0x%lx\n", sig);
if (sig == SIGNATURE)
return BRI_BOARD;
@@ -501,7 +502,7 @@ static int identify_board(unsigned long rambase, unsigned int iobase)
/*
* Try to spot a card
*/
- sig = readl(rambase + SIG_OFFSET);
+ sig = readl(rambase_sig);
pr_debug("Looking for a signature, got 0x%lx\n", sig);
if (sig != SIGNATURE)
return -1;
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index e8b1120f486d..eef3e64ca0a8 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -88,21 +88,23 @@ ssize_t led_trigger_show(struct device *dev, struct device_attribute *attr,
down_read(&led_cdev->trigger_lock);
if (!led_cdev->trigger)
- len += sprintf(buf+len, "[none] ");
+ len += scnprintf(buf+len, PAGE_SIZE - len, "[none] ");
else
- len += sprintf(buf+len, "none ");
+ len += scnprintf(buf+len, PAGE_SIZE - len, "none ");
list_for_each_entry(trig, &trigger_list, next_trig) {
if (led_cdev->trigger && !strcmp(led_cdev->trigger->name,
trig->name))
- len += sprintf(buf+len, "[%s] ", trig->name);
+ len += scnprintf(buf+len, PAGE_SIZE - len, "[%s] ",
+ trig->name);
else
- len += sprintf(buf+len, "%s ", trig->name);
+ len += scnprintf(buf+len, PAGE_SIZE - len, "%s ",
+ trig->name);
}
up_read(&led_cdev->trigger_lock);
up_read(&triggers_list_lock);
- len += sprintf(len+buf, "\n");
+ len += scnprintf(len+buf, PAGE_SIZE - len, "\n");
return len;
}
EXPORT_SYMBOL_GPL(led_trigger_show);
diff --git a/drivers/leds/leds-pca955x.c b/drivers/leds/leds-pca955x.c
index b775e1efecd3..b9f71a87b7e1 100644
--- a/drivers/leds/leds-pca955x.c
+++ b/drivers/leds/leds-pca955x.c
@@ -281,7 +281,7 @@ static int pca955x_probe(struct i2c_client *client,
"slave address 0x%02x\n",
id->name, chip->bits, client->addr);
- if (!i2c_check_functionality(adapter, I2C_FUNC_I2C))
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -EIO;
if (pdata) {
diff --git a/drivers/leds/leds-qpnp-flash-v2.c b/drivers/leds/leds-qpnp-flash-v2.c
index b4ec36fd3cdf..c90633b16fad 100644
--- a/drivers/leds/leds-qpnp-flash-v2.c
+++ b/drivers/leds/leds-qpnp-flash-v2.c
@@ -319,7 +319,7 @@ static inline int get_current_reg_code(int target_curr_ma, int ires_ua)
if (!ires_ua || !target_curr_ma || (target_curr_ma < (ires_ua / 1000)))
return 0;
- return DIV_ROUND_UP(target_curr_ma * 1000, ires_ua) - 1;
+ return DIV_ROUND_CLOSEST(target_curr_ma * 1000, ires_ua) - 1;
}
static int qpnp_flash_led_read(struct qpnp_flash_led *led, u16 addr, u8 *data)
@@ -391,7 +391,7 @@ led_brightness qpnp_flash_led_brightness_get(struct led_classdev *led_cdev)
static int qpnp_flash_led_init_settings(struct qpnp_flash_led *led)
{
int rc, i, addr_offset;
- u8 val = 0, mask;
+ u8 val = 0, mask, strobe_mask = 0, strobe_ctrl;
for (i = 0; i < led->num_fnodes; i++) {
addr_offset = led->fnode[i].id;
@@ -402,6 +402,51 @@ static int qpnp_flash_led_init_settings(struct qpnp_flash_led *led)
return rc;
val |= 0x1 << led->fnode[i].id;
+
+ if (led->fnode[i].strobe_sel == HW_STROBE) {
+ if (led->fnode[i].id == LED3)
+ strobe_mask |= LED3_FLASH_ONCE_ONLY_BIT;
+ else
+ strobe_mask |= LED1N2_FLASH_ONCE_ONLY_BIT;
+ }
+
+ if (led->fnode[i].id == LED3 &&
+ led->fnode[i].strobe_sel == LPG_STROBE)
+ strobe_mask |= LED3_FLASH_ONCE_ONLY_BIT;
+ /*
+ * As per the hardware recommendation, to use LED2/LED3 in HW
+ * strobe mode, LED1 should be set to HW strobe mode as well.
+ */
+ if (led->fnode[i].strobe_sel == HW_STROBE &&
+ (led->fnode[i].id == LED2 || led->fnode[i].id == LED3)) {
+ mask = FLASH_HW_STROBE_MASK;
+ addr_offset = led->fnode[LED1].id;
+ /*
+ * HW_STROBE: enable, TRIGGER: level,
+ * POLARITY: active high
+ */
+ strobe_ctrl = BIT(2) | BIT(0);
+ rc = qpnp_flash_led_masked_write(led,
+ FLASH_LED_REG_STROBE_CTRL(
+ led->base + addr_offset),
+ mask, strobe_ctrl);
+ if (rc < 0)
+ return rc;
+ }
+ }
+
+ rc = qpnp_flash_led_masked_write(led,
+ FLASH_LED_REG_MULTI_STROBE_CTRL(led->base),
+ strobe_mask, 0);
+ if (rc < 0)
+ return rc;
+
+ if (led->fnode[LED3].strobe_sel == LPG_STROBE) {
+ rc = qpnp_flash_led_masked_write(led,
+ FLASH_LED_REG_LPG_INPUT_CTRL(led->base),
+ LPG_INPUT_SEL_BIT, LPG_INPUT_SEL_BIT);
+ if (rc < 0)
+ return rc;
}
rc = qpnp_flash_led_write(led,
@@ -595,19 +640,6 @@ static int qpnp_flash_led_init_settings(struct qpnp_flash_led *led)
return rc;
}
- if (led->fnode[LED3].strobe_sel == LPG_STROBE) {
- rc = qpnp_flash_led_masked_write(led,
- FLASH_LED_REG_MULTI_STROBE_CTRL(led->base),
- LED3_FLASH_ONCE_ONLY_BIT, 0);
- if (rc < 0)
- return rc;
-
- rc = qpnp_flash_led_masked_write(led,
- FLASH_LED_REG_LPG_INPUT_CTRL(led->base),
- LPG_INPUT_SEL_BIT, LPG_INPUT_SEL_BIT);
- if (rc < 0)
- return rc;
- }
return 0;
}
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index 4d46f2ce606f..aa84fcfd59fc 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -514,15 +514,21 @@ struct open_bucket {
/*
* We keep multiple buckets open for writes, and try to segregate different
- * write streams for better cache utilization: first we look for a bucket where
- * the last write to it was sequential with the current write, and failing that
- * we look for a bucket that was last used by the same task.
+ * write streams for better cache utilization: first we try to segregate flash
+ * only volume write streams from cached devices, secondly we look for a bucket
+ * where the last write to it was sequential with the current write, and
+ * failing that we look for a bucket that was last used by the same task.
*
* The ideas is if you've got multiple tasks pulling data into the cache at the
* same time, you'll get better cache utilization if you try to segregate their
* data and preserve locality.
*
- * For example, say you've starting Firefox at the same time you're copying a
+ * For example, dirty sectors of flash only volume is not reclaimable, if their
+ * dirty sectors mixed with dirty sectors of cached device, such buckets will
+ * be marked as dirty and won't be reclaimed, though the dirty data of cached
+ * device have been written back to backend device.
+ *
+ * And say you've starting Firefox at the same time you're copying a
* bunch of files. Firefox will likely end up being fairly hot and stay in the
* cache awhile, but the data you copied might not be; if you wrote all that
* data to the same buckets it'd get invalidated at the same time.
@@ -539,7 +545,10 @@ static struct open_bucket *pick_data_bucket(struct cache_set *c,
struct open_bucket *ret, *ret_task = NULL;
list_for_each_entry_reverse(ret, &c->data_buckets, list)
- if (!bkey_cmp(&ret->key, search))
+ if (UUID_FLASH_ONLY(&c->uuids[KEY_INODE(&ret->key)]) !=
+ UUID_FLASH_ONLY(&c->uuids[KEY_INODE(search)]))
+ continue;
+ else if (!bkey_cmp(&ret->key, search))
goto found;
else if (ret->last_write_point == write_point)
ret_task = ret;
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index c2248b75f2da..b9a526271f02 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -890,6 +890,12 @@ static void cached_dev_detach_finish(struct work_struct *w)
mutex_lock(&bch_register_lock);
+ cancel_delayed_work_sync(&dc->writeback_rate_update);
+ if (!IS_ERR_OR_NULL(dc->writeback_thread)) {
+ kthread_stop(dc->writeback_thread);
+ dc->writeback_thread = NULL;
+ }
+
memset(&dc->sb.set_uuid, 0, 16);
SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE);
@@ -935,6 +941,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
uint32_t rtime = cpu_to_le32(get_seconds());
struct uuid_entry *u;
char buf[BDEVNAME_SIZE];
+ struct cached_dev *exist_dc, *t;
bdevname(dc->bdev, buf);
@@ -958,6 +965,16 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
return -EINVAL;
}
+ /* Check whether already attached */
+ list_for_each_entry_safe(exist_dc, t, &c->cached_devs, list) {
+ if (!memcmp(dc->sb.uuid, exist_dc->sb.uuid, 16)) {
+ pr_err("Tried to attach %s but duplicate UUID already attached",
+ buf);
+
+ return -EINVAL;
+ }
+ }
+
u = uuid_find(c, dc->sb.uuid);
if (u &&
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 969c815c90b6..d566c32e222a 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -818,7 +818,8 @@ static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client
* dm-bufio is resistant to allocation failures (it just keeps
* one buffer reserved in cases all the allocations fail).
* So set flags to not try too hard:
- * GFP_NOIO: don't recurse into the I/O layer
+ * GFP_NOWAIT: don't wait; if we need to sleep we'll release our
+ * mutex and wait ourselves.
* __GFP_NORETRY: don't retry and rather return failure
* __GFP_NOMEMALLOC: don't use emergency reserves
* __GFP_NOWARN: don't print a warning in case of failure
@@ -828,7 +829,7 @@ static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client
*/
while (1) {
if (dm_bufio_cache_size_latch != 1) {
- b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
+ b = alloc_buffer(c, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
if (b)
return b;
}
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 81c5e1a1f363..1b84d2890fbf 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -300,6 +300,7 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
else if (rw & REQ_WRITE_SAME)
special_cmd_max_sectors = q->limits.max_write_same_sectors;
if ((rw & (REQ_DISCARD | REQ_WRITE_SAME)) && special_cmd_max_sectors == 0) {
+ atomic_inc(&io->count);
dec_count(io, region, -EOPNOTSUPP);
return;
}
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 7baeeafa059d..065d7cee0d21 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1773,12 +1773,12 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
cmd == DM_LIST_VERSIONS_CMD)
return 0;
- if ((cmd == DM_DEV_CREATE_CMD)) {
+ if (cmd == DM_DEV_CREATE_CMD) {
if (!*param->name) {
DMWARN("name not supplied when creating device");
return -EINVAL;
}
- } else if ((*param->uuid && *param->name)) {
+ } else if (*param->uuid && *param->name) {
DMWARN("only supply one of name or uuid, cmd(%u)", cmd);
return -EINVAL;
}
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index e34cf53bd068..ceff074b3b74 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/reboot.h>
+#include <linux/vmalloc.h>
#define DM_MSG_PREFIX "verity"
@@ -32,6 +33,7 @@
#define DM_VERITY_OPT_LOGGING "ignore_corruption"
#define DM_VERITY_OPT_RESTART "restart_on_corruption"
#define DM_VERITY_OPT_IGN_ZEROES "ignore_zero_blocks"
+#define DM_VERITY_OPT_AT_MOST_ONCE "check_at_most_once"
#define DM_VERITY_OPTS_MAX (2 + DM_VERITY_OPTS_FEC)
@@ -399,6 +401,18 @@ static int verity_bv_zero(struct dm_verity *v, struct dm_verity_io *io,
}
/*
+ * Moves the bio iter one data block forward.
+ */
+static inline void verity_bv_skip_block(struct dm_verity *v,
+ struct dm_verity_io *io,
+ struct bvec_iter *iter)
+{
+ struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_bio_data_size);
+
+ bio_advance_iter(bio, iter, 1 << v->data_dev_block_bits);
+}
+
+/*
* Verify one "dm_verity_io" structure.
*/
static int verity_verify_io(struct dm_verity_io *io)
@@ -410,9 +424,16 @@ static int verity_verify_io(struct dm_verity_io *io)
for (b = 0; b < io->n_blocks; b++) {
int r;
+ sector_t cur_block = io->block + b;
struct shash_desc *desc = verity_io_hash_desc(v, io);
- r = verity_hash_for_block(v, io, io->block + b,
+ if (v->validated_blocks &&
+ likely(test_bit(cur_block, v->validated_blocks))) {
+ verity_bv_skip_block(v, io, &io->iter);
+ continue;
+ }
+
+ r = verity_hash_for_block(v, io, cur_block,
verity_io_want_digest(v, io),
&is_zero);
if (unlikely(r < 0))
@@ -445,13 +466,16 @@ static int verity_verify_io(struct dm_verity_io *io)
return r;
if (likely(memcmp(verity_io_real_digest(v, io),
- verity_io_want_digest(v, io), v->digest_size) == 0))
+ verity_io_want_digest(v, io), v->digest_size) == 0)) {
+ if (v->validated_blocks)
+ set_bit(cur_block, v->validated_blocks);
continue;
+ }
else if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_DATA,
- io->block + b, NULL, &start) == 0)
+ cur_block, NULL, &start) == 0)
continue;
else if (verity_handle_err(v, DM_VERITY_BLOCK_TYPE_DATA,
- io->block + b))
+ cur_block))
return -EIO;
}
@@ -645,6 +669,8 @@ void verity_status(struct dm_target *ti, status_type_t type,
args += DM_VERITY_OPTS_FEC;
if (v->zero_digest)
args++;
+ if (v->validated_blocks)
+ args++;
if (!args)
return;
DMEMIT(" %u", args);
@@ -663,6 +689,8 @@ void verity_status(struct dm_target *ti, status_type_t type,
}
if (v->zero_digest)
DMEMIT(" " DM_VERITY_OPT_IGN_ZEROES);
+ if (v->validated_blocks)
+ DMEMIT(" " DM_VERITY_OPT_AT_MOST_ONCE);
sz = verity_fec_status_table(v, sz, result, maxlen);
break;
}
@@ -716,6 +744,7 @@ void verity_dtr(struct dm_target *ti)
if (v->bufio)
dm_bufio_client_destroy(v->bufio);
+ vfree(v->validated_blocks);
kfree(v->salt);
kfree(v->root_digest);
kfree(v->zero_digest);
@@ -737,6 +766,26 @@ void verity_dtr(struct dm_target *ti)
}
EXPORT_SYMBOL_GPL(verity_dtr);
+static int verity_alloc_most_once(struct dm_verity *v)
+{
+ struct dm_target *ti = v->ti;
+
+ /* the bitset can only handle INT_MAX blocks */
+ if (v->data_blocks > INT_MAX) {
+ ti->error = "device too large to use check_at_most_once";
+ return -E2BIG;
+ }
+
+ v->validated_blocks = vzalloc(BITS_TO_LONGS(v->data_blocks) *
+ sizeof(unsigned long));
+ if (!v->validated_blocks) {
+ ti->error = "failed to allocate bitset for check_at_most_once";
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
static int verity_alloc_zero_digest(struct dm_verity *v)
{
int r = -ENOMEM;
@@ -806,6 +855,12 @@ static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v)
}
continue;
+ } else if (!strcasecmp(arg_name, DM_VERITY_OPT_AT_MOST_ONCE)) {
+ r = verity_alloc_most_once(v);
+ if (r)
+ return r;
+ continue;
+
} else if (verity_is_fec_opt_arg(arg_name)) {
r = verity_fec_parse_opt_args(as, v, &argc, arg_name);
if (r)
@@ -1074,7 +1129,7 @@ EXPORT_SYMBOL_GPL(verity_ctr);
static struct target_type verity_target = {
.name = "verity",
- .version = {1, 3, 0},
+ .version = {1, 4, 0},
.module = THIS_MODULE,
.ctr = verity_ctr,
.dtr = verity_dtr,
diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h
index a90d1d416107..d216fc76d350 100644
--- a/drivers/md/dm-verity.h
+++ b/drivers/md/dm-verity.h
@@ -63,6 +63,7 @@ struct dm_verity {
sector_t hash_level_block[DM_VERITY_MAX_LEVELS];
struct dm_verity_fec *fec; /* forward error correction */
+ unsigned long *validated_blocks; /* bitset blocks validated */
};
struct dm_verity_io {
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index f7f560f5f056..f002d2ce9c9f 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -974,7 +974,8 @@ static void dec_pending(struct dm_io *io, int error)
} else {
/* done with normal IO or empty flush */
trace_block_bio_complete(md->queue, bio, io_error);
- bio->bi_error = io_error;
+ if (io_error)
+ bio->bi_error = io_error;
bio_endio(bio);
}
}
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index 494d01d0e92a..a7a561af05c9 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -945,8 +945,10 @@ static int add_new_disk(struct mddev *mddev, struct md_rdev *rdev)
cmsg.raid_slot = cpu_to_le32(rdev->desc_nr);
lock_comm(cinfo);
ret = __sendmsg(cinfo, &cmsg);
- if (ret)
+ if (ret) {
+ unlock_comm(cinfo);
return ret;
+ }
cinfo->no_new_dev_lockres->flags |= DLM_LKF_NOQUEUE;
ret = dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_EX);
cinfo->no_new_dev_lockres->flags &= ~DLM_LKF_NOQUEUE;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 1cd819202553..3a9685fe115c 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1028,8 +1028,9 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
* (not needed for Linear and RAID0 as metadata doesn't
* record this size)
*/
- if (rdev->sectors >= (2ULL << 32) && sb->level >= 1)
- rdev->sectors = (2ULL << 32) - 2;
+ if (IS_ENABLED(CONFIG_LBDAF) && (u64)rdev->sectors >= (2ULL << 32) &&
+ sb->level >= 1)
+ rdev->sectors = (sector_t)(2ULL << 32) - 2;
if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
/* "this cannot possibly happen" ... */
@@ -1322,8 +1323,9 @@ super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
/* Limit to 4TB as metadata cannot record more than that.
* 4TB == 2^32 KB, or 2*2^32 sectors.
*/
- if (num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1)
- num_sectors = (2ULL << 32) - 2;
+ if (IS_ENABLED(CONFIG_LBDAF) && (u64)num_sectors >= (2ULL << 32) &&
+ rdev->mddev->level >= 1)
+ num_sectors = (sector_t)(2ULL << 32) - 2;
md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
rdev->sb_page);
md_super_wait(rdev->mddev);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index a67e1a36733f..45e7a47e5f7b 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -2698,6 +2698,11 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
list_add(&r10_bio->retry_list, &conf->bio_end_io_list);
conf->nr_queued++;
spin_unlock_irq(&conf->device_lock);
+ /*
+ * In case freeze_array() is waiting for condition
+ * nr_pending == nr_queued + extra to be true.
+ */
+ wake_up(&conf->wait_barrier);
md_wakeup_thread(conf->mddev->thread);
} else {
if (test_bit(R10BIO_WriteError,
@@ -3633,6 +3638,7 @@ static int run(struct mddev *mddev)
if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
discard_supported = true;
+ first = 0;
}
if (mddev->queue) {
@@ -4039,6 +4045,7 @@ static int raid10_start_reshape(struct mddev *mddev)
diff = 0;
if (first || diff < min_offset_diff)
min_offset_diff = diff;
+ first = 0;
}
}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 77403228e098..9284acea4f7b 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -110,8 +110,7 @@ static inline void unlock_device_hash_lock(struct r5conf *conf, int hash)
static inline void lock_all_device_hash_locks_irq(struct r5conf *conf)
{
int i;
- local_irq_disable();
- spin_lock(conf->hash_locks);
+ spin_lock_irq(conf->hash_locks);
for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++)
spin_lock_nest_lock(conf->hash_locks + i, conf->hash_locks);
spin_lock(&conf->device_lock);
@@ -121,9 +120,9 @@ static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf)
{
int i;
spin_unlock(&conf->device_lock);
- for (i = NR_STRIPE_HASH_LOCKS; i; i--)
- spin_unlock(conf->hash_locks + i - 1);
- local_irq_enable();
+ for (i = NR_STRIPE_HASH_LOCKS - 1; i; i--)
+ spin_unlock(conf->hash_locks + i);
+ spin_unlock_irq(conf->hash_locks);
}
/* bio's attached to a stripe+device for I/O are linked together in bi_sector
@@ -726,12 +725,11 @@ static bool is_full_stripe_write(struct stripe_head *sh)
static void lock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
{
- local_irq_disable();
if (sh1 > sh2) {
- spin_lock(&sh2->stripe_lock);
+ spin_lock_irq(&sh2->stripe_lock);
spin_lock_nested(&sh1->stripe_lock, 1);
} else {
- spin_lock(&sh1->stripe_lock);
+ spin_lock_irq(&sh1->stripe_lock);
spin_lock_nested(&sh2->stripe_lock, 1);
}
}
@@ -739,8 +737,7 @@ static void lock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
static void unlock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
{
spin_unlock(&sh1->stripe_lock);
- spin_unlock(&sh2->stripe_lock);
- local_irq_enable();
+ spin_unlock_irq(&sh2->stripe_lock);
}
/* Only freshly new full stripe normal write stripe can be added to a batch list */
@@ -3372,9 +3369,20 @@ static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s,
BUG_ON(test_bit(R5_Wantcompute, &dev->flags));
BUG_ON(test_bit(R5_Wantread, &dev->flags));
BUG_ON(sh->batch_head);
+
+ /*
+ * In the raid6 case if the only non-uptodate disk is P
+ * then we already trusted P to compute the other failed
+ * drives. It is safe to compute rather than re-read P.
+ * In other cases we only compute blocks from failed
+ * devices, otherwise check/repair might fail to detect
+ * a real inconsistency.
+ */
+
if ((s->uptodate == disks - 1) &&
+ ((sh->qd_idx >= 0 && sh->pd_idx == disk_idx) ||
(s->failed && (disk_idx == s->failed_num[0] ||
- disk_idx == s->failed_num[1]))) {
+ disk_idx == s->failed_num[1])))) {
/* have disk failed, and we're requested to fetch it;
* do compute it
*/
diff --git a/drivers/media/common/b2c2/flexcop-fe-tuner.c b/drivers/media/common/b2c2/flexcop-fe-tuner.c
index 9c59f4306883..f5956402fc69 100644
--- a/drivers/media/common/b2c2/flexcop-fe-tuner.c
+++ b/drivers/media/common/b2c2/flexcop-fe-tuner.c
@@ -38,7 +38,7 @@ static int flexcop_fe_request_firmware(struct dvb_frontend *fe,
#endif
/* lnb control */
-#if FE_SUPPORTED(MT312) || FE_SUPPORTED(STV0299)
+#if (FE_SUPPORTED(MT312) || FE_SUPPORTED(STV0299)) && FE_SUPPORTED(PLL)
static int flexcop_set_voltage(struct dvb_frontend *fe,
enum fe_sec_voltage voltage)
{
@@ -68,7 +68,7 @@ static int flexcop_set_voltage(struct dvb_frontend *fe,
#endif
#if FE_SUPPORTED(S5H1420) || FE_SUPPORTED(STV0299) || FE_SUPPORTED(MT312)
-static int flexcop_sleep(struct dvb_frontend* fe)
+static int __maybe_unused flexcop_sleep(struct dvb_frontend* fe)
{
struct flexcop_device *fc = fe->dvb->priv;
if (fc->fe_sleep)
diff --git a/drivers/media/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb-core/dvb_ca_en50221.c
index fb66184dc9b6..77cf211e842e 100644
--- a/drivers/media/dvb-core/dvb_ca_en50221.c
+++ b/drivers/media/dvb-core/dvb_ca_en50221.c
@@ -750,6 +750,29 @@ static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, int slot, u8 * b
goto exit;
}
+ /*
+ * It may need some time for the CAM to settle down, or there might
+ * be a race condition between the CAM, writing HC and our last
+ * check for DA. This happens, if the CAM asserts DA, just after
+ * checking DA before we are setting HC. In this case it might be
+ * a bug in the CAM to keep the FR bit, the lower layer/HW
+ * communication requires a longer timeout or the CAM needs more
+ * time internally. But this happens in reality!
+ * We need to read the status from the HW again and do the same
+ * we did for the previous check for DA
+ */
+ status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_STATUS);
+ if (status < 0)
+ goto exit;
+
+ if (status & (STATUSREG_DA | STATUSREG_RE)) {
+ if (status & STATUSREG_DA)
+ dvb_ca_en50221_thread_wakeup(ca);
+
+ status = -EAGAIN;
+ goto exit;
+ }
+
/* send the amount of data */
if ((status = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_SIZE_HIGH, bytes_write >> 8)) != 0)
goto exit;
diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c
index feeeb70d841e..d14d075ab1d6 100644
--- a/drivers/media/dvb-frontends/m88ds3103.c
+++ b/drivers/media/dvb-frontends/m88ds3103.c
@@ -1281,11 +1281,12 @@ static int m88ds3103_select(struct i2c_adapter *adap, void *mux_priv, u32 chan)
* New users must use I2C client binding directly!
*/
struct dvb_frontend *m88ds3103_attach(const struct m88ds3103_config *cfg,
- struct i2c_adapter *i2c, struct i2c_adapter **tuner_i2c_adapter)
+ struct i2c_adapter *i2c,
+ struct i2c_adapter **tuner_i2c_adapter)
{
struct i2c_client *client;
struct i2c_board_info board_info;
- struct m88ds3103_platform_data pdata;
+ struct m88ds3103_platform_data pdata = {};
pdata.clk = cfg->clock;
pdata.i2c_wr_max = cfg->i2c_wr_max;
@@ -1428,6 +1429,8 @@ static int m88ds3103_probe(struct i2c_client *client,
case M88DS3103_CHIP_ID:
break;
default:
+ ret = -ENODEV;
+ dev_err(&client->dev, "Unknown device. Chip_id=%02x\n", dev->chip_id);
goto err_kfree;
}
diff --git a/drivers/media/dvb-frontends/si2168.c b/drivers/media/dvb-frontends/si2168.c
index 821a8f481507..9d6270591858 100644
--- a/drivers/media/dvb-frontends/si2168.c
+++ b/drivers/media/dvb-frontends/si2168.c
@@ -14,6 +14,8 @@
* GNU General Public License for more details.
*/
+#include <linux/delay.h>
+
#include "si2168_priv.h"
static const struct dvb_frontend_ops si2168_ops;
@@ -420,6 +422,7 @@ static int si2168_init(struct dvb_frontend *fe)
if (ret)
goto err;
+ udelay(100);
memcpy(cmd.args, "\x85", 1);
cmd.wlen = 1;
cmd.rlen = 1;
diff --git a/drivers/media/dvb-frontends/ts2020.c b/drivers/media/dvb-frontends/ts2020.c
index 7979e5d6498b..7ca359391535 100644
--- a/drivers/media/dvb-frontends/ts2020.c
+++ b/drivers/media/dvb-frontends/ts2020.c
@@ -369,7 +369,7 @@ static int ts2020_read_tuner_gain(struct dvb_frontend *fe, unsigned v_agc,
gain2 = clamp_t(long, gain2, 0, 13);
v_agc = clamp_t(long, v_agc, 400, 1100);
- *_gain = -(gain1 * 2330 +
+ *_gain = -((__s64)gain1 * 2330 +
gain2 * 3500 +
v_agc * 24 / 10 * 10 +
10000);
@@ -387,7 +387,7 @@ static int ts2020_read_tuner_gain(struct dvb_frontend *fe, unsigned v_agc,
gain3 = clamp_t(long, gain3, 0, 6);
v_agc = clamp_t(long, v_agc, 600, 1600);
- *_gain = -(gain1 * 2650 +
+ *_gain = -((__s64)gain1 * 2650 +
gain2 * 3380 +
gain3 * 2850 +
v_agc * 176 / 100 * 10 -
diff --git a/drivers/media/i2c/adv7481.c b/drivers/media/i2c/adv7481.c
index 7cac0a8abd81..e51fd653d8c9 100644
--- a/drivers/media/i2c/adv7481.c
+++ b/drivers/media/i2c/adv7481.c
@@ -41,25 +41,32 @@
#include "msm_camera_i2c.h"
#include "msm_camera_io_util.h"
#include "msm_camera_dt_util.h"
+#include "linux/hdmi.h"
#define DRIVER_NAME "adv7481"
-#define I2C_RW_DELAY 1
-#define I2C_SW_RST_DELAY 5000
+#define I2C_RW_DELAY 1
+#define I2C_SW_RST_DELAY 5000
#define GPIO_HW_RST_DELAY_HI 10000
#define GPIO_HW_RST_DELAY_LOW 10000
#define SDP_MIN_SLEEP 5000
#define SDP_MAX_SLEEP 6000
-#define SDP_NUM_TRIES 30
+#define SDP_NUM_TRIES 50
#define LOCK_MIN_SLEEP 5000
#define LOCK_MAX_SLEEP 6000
#define LOCK_NUM_TRIES 200
+#define MAX_DEFAULT_WIDTH 1280
+#define MAX_DEFAULT_HEIGHT 720
#define MAX_DEFAULT_FRAME_RATE 60
#define MAX_DEFAULT_PIX_CLK_HZ 74240000
-#define ONE_MHZ_TO_HZ 1000000
-#define I2C_BLOCK_WRITE_SIZE 1024
+#define ONE_MHZ_TO_HZ 1000000
+#define I2C_BLOCK_WRITE_SIZE 1024
+#define ADV_REG_STABLE_DELAY 70 /* ms*/
+
+#define AVI_INFOFRAME_SIZE 31
+#define INFOFRAME_DATA_SIZE 28
enum adv7481_gpio_t {
@@ -117,6 +124,7 @@ struct adv7481_state {
uint8_t i2c_csi_txa_addr;
uint8_t i2c_csi_txb_addr;
uint8_t i2c_hdmi_addr;
+ uint8_t i2c_hdmi_inf_addr;
uint8_t i2c_edid_addr;
uint8_t i2c_cp_addr;
uint8_t i2c_sdp_addr;
@@ -138,6 +146,9 @@ struct adv7481_state {
int csib_src;
int mode;
+ /* AVI Infoframe Params */
+ struct avi_infoframe_params hdmi_avi_infoframe;
+
/* resolution configuration */
struct resolution_config res_configs[RES_MAX];
@@ -182,19 +193,20 @@ const uint8_t adv7481_default_edid_data[] = {
/* Display Parameters */
0x80, 0x10, 0x09, 0x78, 0x0A,
/* Color characteristics */
-0x0D, 0xC9, 0xA0, 0x57, 0x47, 0x98, 0x27, 0x12, 0x48, 0x4C,
+0x0D, 0xC9, 0xA0, 0x57, 0x47, 0x98, 0x27, 0x12,
+0x48, 0x4C,
/* Established Timings */
0x21, 0x08, 0x00,
/* Standard Timings */
-0x81, 0xC0, 0x81, 0x40, 0x3B, 0xC0, 0x3B, 0x40,
-0x31, 0xC0, 0x31, 0x40, 0x01, 0x01, 0x01, 0x01,
+0xD1, 0xC0, 0xD1, 0x40, 0x81, 0xC0, 0x81, 0x40,
+0x3B, 0xC0, 0x3B, 0x40, 0x31, 0xC0, 0x31, 0x40,
/* Detailed Timings Block */
-0x01, 0x1D, 0x00, 0xBC, 0x52, 0xD0, 0x1E, 0x20,
-0xB8, 0x28, 0x55, 0x40, 0xA0, 0x5A, 0x00, 0x00,
+0x1A, 0x36, 0x80, 0xA0, 0x70, 0x38, 0x1F, 0x40,
+0x30, 0x20, 0x35, 0x00, 0x40, 0x44, 0x21, 0x00,
0x00, 0x1E,
/* Monitor Descriptor Block 2 */
-0x8C, 0x0A, 0xD0, 0xB4, 0x20, 0xE0, 0x14, 0x10,
-0x12, 0x48, 0x3A, 0x00, 0xD8, 0xA2, 0x00, 0x00,
+0x00, 0x19, 0x00, 0xA0, 0x50, 0xD0, 0x15, 0x20,
+0x30, 0x20, 0x35, 0x00, 0x80, 0xD8, 0x10, 0x00,
0x00, 0x1E,
/* Monitor Descriptor Block 3 */
0x00, 0x00, 0x00, 0xFD, 0x00, 0x17, 0x4B, 0x0F,
@@ -207,16 +219,16 @@ const uint8_t adv7481_default_edid_data[] = {
/* Extension Flag CEA */
0x01,
/* Checksum */
-0x5B,
+0x16,
/* Block 1 (Extension Block) */
/* Extension Header */
-0x02, 0x03, 0x1E,
+0x02, 0x03, 0x22,
/* Display supports */
0x71,
-/* Video Data Bock */
-0x48, 0x84, 0x13, 0x3C, 0x03, 0x02, 0x11, 0x12,
-0x01,
+/* Video Data Block */
+0x4C, 0x84, 0x13, 0x3C, 0x03, 0x02, 0x11, 0x12,
+0x01, 0x90, 0x1F, 0x20, 0x22,
/* HDMI VSDB */
/* Deep color All, Max_TMDS_Clock = 150 MHz */
0x68, 0x03, 0x0C, 0x00, 0x10, 0x00, 0x80,
@@ -228,17 +240,17 @@ const uint8_t adv7481_default_edid_data[] = {
/* Speaker Allocation Data Block */
0x83, 0x01, 0x00, 0x00,
/* Detailed Timing Descriptor */
-0x01, 0x1D, 0x00, 0x72, 0x51, 0xD0, 0x1E, 0x20,
-0x6E, 0x28, 0x55, 0x00, 0xA0, 0x2A, 0x53, 0x00,
+0x1A, 0x36, 0x80, 0xA0, 0x70, 0x38, 0x1F, 0x40,
+0x30, 0x20, 0x35, 0x00, 0x40, 0x44, 0x21, 0x00,
0x00, 0x1E,
/* Detailed Timing Descriptor */
-0x8C, 0x0A, 0xD0, 0xB4, 0x20, 0xE0, 0x14, 0x10,
-0x12, 0x48, 0x3A, 0x00, 0xD8, 0xA2, 0x00, 0x00,
+0x00, 0x19, 0x00, 0xA0, 0x50, 0xD0, 0x15, 0x20,
+0x30, 0x20, 0x35, 0x00, 0x80, 0xD8, 0x10, 0x00,
0x00, 0x1E,
/* Detailed Timing Descriptor */
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00,
+0x41, 0x0A, 0xD0, 0xA0, 0x20, 0xE0, 0x13, 0x10,
+0x30, 0x20, 0x3A, 0x00, 0xD8, 0x90, 0x00, 0x00,
+0x00, 0x18,
/* Detailed Timing Descriptor */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -248,9 +260,9 @@ const uint8_t adv7481_default_edid_data[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00,
/* DTD padding */
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00,
/* Checksum */
-0xC6
+0x8C
};
#define ADV7481_EDID_SIZE ARRAY_SIZE(adv7481_default_edid_data)
@@ -301,6 +313,14 @@ static int32_t adv7481_cci_i2c_read(struct msm_camera_i2c_client *i2c_client,
data, data_type);
}
+static int32_t adv7481_cci_i2c_read_seq(
+ struct msm_camera_i2c_client *i2c_client,
+ uint8_t reg, uint8_t *data, uint32_t size)
+{
+ return i2c_client->i2c_func_tbl->i2c_read_seq(i2c_client, reg,
+ data, size);
+}
+
static int32_t adv7481_wr_byte(struct msm_camera_i2c_client *c_i2c_client,
uint8_t sid, uint8_t reg, uint8_t data)
{
@@ -331,6 +351,20 @@ static int32_t adv7481_wr_block(struct msm_camera_i2c_client *c_i2c_client,
return ret;
}
+static int32_t adv7481_rd_block(struct msm_camera_i2c_client *c_i2c_client,
+ uint8_t sid, uint8_t reg, uint8_t *data, uint32_t size)
+{
+ int ret = 0;
+
+ c_i2c_client->cci_client->sid = sid;
+
+ ret = adv7481_cci_i2c_read_seq(c_i2c_client, reg, data, size);
+ if (ret < 0)
+ pr_err("Error %d reading cci i2c block data\n", ret);
+
+ return ret;
+}
+
static uint8_t adv7481_rd_byte(struct msm_camera_i2c_client *c_i2c_client,
uint8_t sid, uint8_t reg)
{
@@ -389,6 +423,7 @@ static int adv7481_set_irq(struct adv7481_state *state)
ADV_REG_SETFIELD(1, IO_CP_UNLOCK_CP_MB1) |
ADV_REG_SETFIELD(1, IO_VMUTE_REQUEST_HDMI_MB1) |
ADV_REG_SETFIELD(1, IO_INT_SD_MB1));
+
/* Set cable detect */
ret |= adv7481_wr_byte(&state->i2c_client, state->i2c_io_addr,
IO_HDMI_LVL_INT_MASKB_3_ADDR,
@@ -476,7 +511,7 @@ static irqreturn_t adv7481_irq(int irq, void *dev)
struct adv7481_state *state = dev;
schedule_delayed_work(&state->irq_delayed_work,
- msecs_to_jiffies(0));
+ msecs_to_jiffies(ADV_REG_STABLE_DELAY));
return IRQ_HANDLED;
}
@@ -578,7 +613,8 @@ static void adv7481_irq_delay_work(struct work_struct *work)
pr_debug("%s: dev: %d got datapath raw status: 0x%x\n",
__func__, state->device_num, raw_status);
- if (ADV_REG_GETFIELD(int_status, IO_INT_SD_ST) &&
+ if ((state->mode == ADV7481_IP_CVBS_1) &&
+ ADV_REG_GETFIELD(int_status, IO_INT_SD_ST) &&
ADV_REG_GETFIELD(raw_status, IO_INT_SD_RAW)) {
uint8_t sdp_sts = 0;
@@ -614,7 +650,7 @@ static void adv7481_irq_delay_work(struct work_struct *work)
adv7481_wr_byte(&state->i2c_client,
state->i2c_sdp_addr, SDP_RW_MAP_REG,
0x00);
- } else {
+ } else if (state->mode == ADV7481_IP_HDMI) {
if (ADV_REG_GETFIELD(int_status,
IO_CP_LOCK_CP_ST) &&
ADV_REG_GETFIELD(raw_status,
@@ -741,18 +777,12 @@ static int adv7481_dev_init(struct adv7481_state *state)
mutex_lock(&state->mutex);
- /* Soft reset */
- ret = adv7481_wr_byte(&state->i2c_client, state->i2c_io_addr,
- IO_REG_MAIN_RST_ADDR, IO_REG_MAIN_RST_VALUE);
- /* Delay required following I2C reset and I2C transactions */
- udelay(I2C_SW_RST_DELAY);
-
chip_rev_id = adv7481_rd_word(&state->i2c_client, state->i2c_io_addr,
IO_REG_CHIP_REV_ID_1_ADDR);
pr_debug("%s: ADV7481 chip rev id: 0x%x", __func__, chip_rev_id);
/* Disable CEC wake up in power-down mode */
- ret |= adv7481_cec_wakeup(state, 0);
+ ret = adv7481_cec_wakeup(state, 0);
/* Setting Vid_Std to 720x480p60 */
ret |= adv7481_wr_byte(&state->i2c_client, state->i2c_io_addr,
IO_REG_CP_VID_STD_ADDR, 0x4A);
@@ -804,6 +834,7 @@ static int adv7481_dev_init(struct adv7481_state *state)
state->i2c_csi_txb_addr = IO_REG_CSI_TXB_SADDR >> 1;
state->i2c_cp_addr = IO_REG_CP_SADDR >> 1;
state->i2c_hdmi_addr = IO_REG_HDMI_SADDR >> 1;
+ state->i2c_hdmi_inf_addr = IO_REG_HDMI_INF_SADDR >> 1;
state->i2c_edid_addr = IO_REG_EDID_SADDR >> 1;
state->i2c_sdp_addr = IO_REG_SDP_SADDR >> 1;
state->i2c_rep_addr = IO_REG_HDMI_REP_SADDR >> 1;
@@ -1038,10 +1069,16 @@ static long adv7481_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
struct adv7481_vid_params vid_params;
struct adv7481_hdmi_params hdmi_params;
+ struct device *dev = state->dev;
+ union hdmi_infoframe hdmi_info_frame;
+ uint8_t inf_buffer[AVI_INFOFRAME_SIZE];
+
pr_debug("Enter %s with command: 0x%x", __func__, cmd);
memset(&vid_params, 0, sizeof(struct adv7481_vid_params));
memset(&hdmi_params, 0, sizeof(struct adv7481_hdmi_params));
+ memset(&hdmi_info_frame, 0, sizeof(union hdmi_infoframe));
+ memset(inf_buffer, 0, AVI_INFOFRAME_SIZE);
if (!sd)
return -EINVAL;
@@ -1094,6 +1131,58 @@ static long adv7481_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
}
break;
}
+ case VIDIOC_G_AVI_INFOFRAME: {
+ int int_raw = adv7481_rd_byte(&state->i2c_client,
+ state->i2c_io_addr,
+ IO_HDMI_EDG_RAW_STATUS_1_ADDR);
+ adv7481_wr_byte(&state->i2c_client,
+ state->i2c_io_addr,
+ IO_HDMI_EDG_INT_CLEAR_1_ADDR, int_raw);
+ if (ADV_REG_GETFIELD(int_raw, IO_NEW_AVI_INFO_RAW)) {
+ inf_buffer[0] = adv7481_rd_byte(&state->i2c_client,
+ state->i2c_hdmi_inf_addr,
+ HDMI_REG_AVI_PACKET_ID_ADDR);
+ inf_buffer[1] = adv7481_rd_byte(&state->i2c_client,
+ state->i2c_hdmi_inf_addr,
+ HDMI_REG_AVI_INF_VERS_ADDR);
+ inf_buffer[2] = adv7481_rd_byte(&state->i2c_client,
+ state->i2c_hdmi_inf_addr,
+ HDMI_REG_AVI_INF_LEN_ADDR);
+ ret = adv7481_rd_block(&state->i2c_client,
+ state->i2c_hdmi_inf_addr,
+ HDMI_REG_AVI_INF_PB_ADDR,
+ &inf_buffer[3],
+ INFOFRAME_DATA_SIZE);
+ if (ret) {
+ pr_err("%s:Error in VIDIOC_G_AVI_INFOFRAME\n",
+ __func__);
+ return -EINVAL;
+ }
+ if (hdmi_infoframe_unpack(&hdmi_info_frame,
+ (void *)inf_buffer) < 0) {
+ pr_err("%s: infoframe unpack fail\n", __func__);
+ return -EINVAL;
+ }
+ hdmi_infoframe_log(KERN_ERR, dev, &hdmi_info_frame);
+ state->hdmi_avi_infoframe.picture_aspect =
+ (enum picture_aspect_ratio)
+ hdmi_info_frame.avi.picture_aspect;
+ state->hdmi_avi_infoframe.active_aspect =
+ (enum active_format_aspect_ratio)
+ hdmi_info_frame.avi.active_aspect;
+ state->hdmi_avi_infoframe.video_code =
+ hdmi_info_frame.avi.video_code;
+ } else {
+ pr_err("%s: No new AVI Infoframe\n", __func__);
+ }
+ if (copy_to_user((void __user *)adv_arg.ptr,
+ (void *)&state->hdmi_avi_infoframe,
+ sizeof(struct avi_infoframe_params))) {
+ pr_err("%s: Failed to copy Infoframe\n", __func__);
+ return -EINVAL;
+ }
+ break;
+ }
case VIDIOC_G_FIELD_INFO:
/* Select SDP read-only Map 1 */
adv7481_wr_byte(&state->i2c_client, state->i2c_sdp_addr,
@@ -1124,10 +1213,12 @@ static long adv7481_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
return ret;
}
-static int adv7481_get_sd_timings(struct adv7481_state *state, int *sd_standard)
+static int adv7481_get_sd_timings(struct adv7481_state *state,
+ int *sd_standard, struct adv7481_vid_params *vid_params)
{
int ret = 0;
int sdp_stat, sdp_stat2;
+ int interlace_reg = 0;
int timeout = 0;
if (sd_standard == NULL)
@@ -1144,6 +1235,25 @@ static int adv7481_get_sd_timings(struct adv7481_state *state, int *sd_standard)
sdp_stat2 = adv7481_rd_byte(&state->i2c_client,
state->i2c_sdp_addr, SDP_RO_MAIN_STATUS1_ADDR);
} while ((sdp_stat != sdp_stat2) && (timeout < SDP_NUM_TRIES));
+
+ interlace_reg = adv7481_rd_byte(&state->i2c_client,
+ state->i2c_sdp_addr, SDP_RO_MAIN_INTERLACE_STATE_ADDR);
+
+ if (ADV_REG_GETFIELD(interlace_reg, SDP_RO_MAIN_INTERLACE_STATE))
+ pr_debug("%s: Interlaced video detected\n", __func__);
+ else
+ pr_debug("%s: Interlaced video not detected\n", __func__);
+
+ if (ADV_REG_GETFIELD(interlace_reg, SDP_RO_MAIN_FIELD_LEN))
+ pr_debug("%s: Field length is correct\n", __func__);
+ else
+ pr_debug("%s: Field length is not correct\n", __func__);
+
+ if (ADV_REG_GETFIELD(interlace_reg, SDP_RO_MAIN_SD_FIELD_RATE))
+ pr_debug("%s: SD 50 Hz detected\n", __func__);
+ else
+ pr_debug("%s: SD 60 Hz detected\n", __func__);
+
adv7481_wr_byte(&state->i2c_client, state->i2c_sdp_addr,
SDP_RW_MAP_REG, 0x00);
@@ -1157,36 +1267,58 @@ static int adv7481_get_sd_timings(struct adv7481_state *state, int *sd_standard)
__func__, __LINE__, sdp_stat);
return -EBUSY;
}
+ vid_params->act_pix = 720;
+ vid_params->intrlcd = 1;
switch (ADV_REG_GETFIELD(sdp_stat, SDP_RO_MAIN_AD_RESULT)) {
case AD_NTSM_M_J:
*sd_standard = V4L2_STD_NTSC;
+ pr_debug("%s, V4L2_STD_NTSC\n", __func__);
+ vid_params->act_lines = 507;
break;
case AD_NTSC_4_43:
*sd_standard = V4L2_STD_NTSC_443;
+ pr_debug("%s, V4L2_STD_NTSC_443\n", __func__);
+ vid_params->act_lines = 507;
break;
case AD_PAL_M:
*sd_standard = V4L2_STD_PAL_M;
+ pr_debug("%s, V4L2_STD_PAL_M\n", __func__);
+ vid_params->act_lines = 576;
break;
case AD_PAL_60:
*sd_standard = V4L2_STD_PAL_60;
+ pr_debug("%s, V4L2_STD_PAL_60\n", __func__);
+ vid_params->act_lines = 576;
break;
case AD_PAL_B_G:
*sd_standard = V4L2_STD_PAL;
+ pr_debug("%s, V4L2_STD_PAL\n", __func__);
+ vid_params->act_lines = 576;
break;
case AD_SECAM:
*sd_standard = V4L2_STD_SECAM;
+ pr_debug("%s, V4L2_STD_SECAM\n", __func__);
+ vid_params->act_lines = 576;
break;
case AD_PAL_COMB_N:
*sd_standard = V4L2_STD_PAL_Nc | V4L2_STD_PAL_N;
+ pr_debug("%s, V4L2_STD_PAL_Nc | V4L2_STD_PAL_N\n", __func__);
+ vid_params->act_lines = 576;
break;
case AD_SECAM_525:
*sd_standard = V4L2_STD_SECAM;
+ pr_debug("%s, V4L2_STD_SECAM (AD_SECAM_525)\n", __func__);
+ vid_params->act_lines = 576;
break;
default:
*sd_standard = V4L2_STD_UNKNOWN;
+ pr_debug("%s, V4L2_STD_UNKNOWN\n", __func__);
+ vid_params->act_lines = 507;
break;
}
+ pr_debug("%s(%d), adv7481 TMDS Resolution: %d x %d\n",
+ __func__, __LINE__, vid_params->act_pix, vid_params->act_lines);
return ret;
}
@@ -1710,7 +1842,8 @@ static int adv7481_get_hdmi_timings(struct adv7481_state *state,
} else {
pr_err("%s(%d): PLL not locked return EBUSY\n",
__func__, __LINE__);
- return -EBUSY;
+ ret = -EBUSY;
+ goto set_default;
}
/* Check Timing Lock */
@@ -1804,6 +1937,8 @@ static int adv7481_get_hdmi_timings(struct adv7481_state *state,
vid_params->pix_clk = hdmi_params->tmds_freq;
+ vid_params->act_lines = vid_params->act_lines * fieldfactor;
+
switch (hdmi_params->color_depth) {
case CD_10BIT:
vid_params->pix_clk = ((vid_params->pix_clk*4)/5);
@@ -1830,6 +1965,17 @@ static int adv7481_get_hdmi_timings(struct adv7481_state *state,
(hdmi_params->pix_rep + 1));
}
+set_default:
+ if (ret) {
+ pr_debug("%s(%d), error %d resort to default fmt\n",
+ __func__, __LINE__, ret);
+ vid_params->act_pix = MAX_DEFAULT_WIDTH;
+ vid_params->act_lines = MAX_DEFAULT_HEIGHT;
+ vid_params->fr_rate = MAX_DEFAULT_FRAME_RATE;
+ vid_params->pix_clk = MAX_DEFAULT_PIX_CLK_HZ;
+ vid_params->intrlcd = 0;
+ ret = 0;
+ }
pr_debug("%s(%d), adv7481 TMDS Resolution: %d x %d @ %d fps\n",
__func__, __LINE__,
@@ -1893,6 +2039,9 @@ static int adv7481_query_sd_std(struct v4l2_subdev *sd, v4l2_std_id *std)
struct adv7481_state *state = to_state(sd);
uint8_t tStatus = 0x0;
uint32_t count = 0;
+ struct adv7481_vid_params vid_params;
+
+ memset(&vid_params, 0, sizeof(vid_params));
pr_debug("Enter %s\n", __func__);
/* Select SDP read-only main Map */
@@ -1933,7 +2082,7 @@ static int adv7481_query_sd_std(struct v4l2_subdev *sd, v4l2_std_id *std)
case ADV7481_IP_CVBS_6_HDMI_SIM:
case ADV7481_IP_CVBS_7_HDMI_SIM:
case ADV7481_IP_CVBS_8_HDMI_SIM:
- ret = adv7481_get_sd_timings(state, &temp);
+ ret = adv7481_get_sd_timings(state, &temp, &vid_params);
break;
default:
return -EINVAL;
@@ -1963,6 +2112,7 @@ static int adv7481_get_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_format *format)
{
int ret;
+ int sd_standard;
struct adv7481_vid_params vid_params;
struct adv7481_hdmi_params hdmi_params;
struct adv7481_state *state = to_state(sd);
@@ -1987,8 +2137,9 @@ static int adv7481_get_fmt(struct v4l2_subdev *sd,
if (!ret) {
fmt->width = vid_params.act_pix;
fmt->height = vid_params.act_lines;
+ fmt->field = V4L2_FIELD_NONE;
if (vid_params.intrlcd)
- fmt->height /= 2;
+ fmt->field = V4L2_FIELD_INTERLACED;
} else {
pr_err("%s: Error %d in adv7481_get_hdmi_timings\n",
__func__, ret);
@@ -1997,8 +2148,14 @@ static int adv7481_get_fmt(struct v4l2_subdev *sd,
case ADV7481_IP_CVBS_1:
fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
fmt->colorspace = V4L2_COLORSPACE_SMPTE170M;
- fmt->width = 720;
- fmt->height = 576;
+ ret = adv7481_get_sd_timings(state, &sd_standard, &vid_params);
+ if (!ret) {
+ fmt->width = vid_params.act_pix;
+ fmt->height = vid_params.act_lines;
+ fmt->field = V4L2_FIELD_INTERLACED;
+ } else {
+ pr_err("%s: Unable to get sd_timings\n", __func__);
+ }
break;
default:
return -EINVAL;
@@ -2316,7 +2473,12 @@ static int adv7481_init_v4l2_controls(struct adv7481_state *state)
{
int ret = 0;
- v4l2_ctrl_handler_init(&state->ctrl_hdl, 4);
+ ret = v4l2_ctrl_handler_init(&state->ctrl_hdl, 4);
+ if (ret) {
+ pr_err("%s: v4l2_ctrl_handler_init failed, ret: %d\n",
+ __func__, ret);
+ return ret;
+ }
v4l2_ctrl_new_std(&state->ctrl_hdl, &adv7481_ctrl_ops,
V4L2_CID_BRIGHTNESS, -128, 127, 1, 0);
@@ -2333,7 +2495,10 @@ static int adv7481_init_v4l2_controls(struct adv7481_state *state)
v4l2_ctrl_handler_free(&state->ctrl_hdl);
} else {
- v4l2_ctrl_handler_setup(&state->ctrl_hdl);
+ ret = v4l2_ctrl_handler_setup(&state->ctrl_hdl);
+ if (ret)
+ pr_err("%s: v4l2_ctrl_handler_init failed, ret: %d\n",
+ __func__, ret);
}
pr_err("%s: Exit with ret: %d\n", __func__, ret);
@@ -2573,11 +2738,22 @@ static int adv7481_probe(struct platform_device *pdev)
goto err_media_entity;
}
+ /* Soft reset */
+ ret = adv7481_wr_byte(&state->i2c_client, state->i2c_io_addr,
+ IO_REG_MAIN_RST_ADDR, IO_REG_MAIN_RST_VALUE);
+ if (ret) {
+ pr_err("%s: Failed Soft reset %d\n", __func__, ret);
+ goto err_media_entity;
+ }
+ /* Delay required following I2C reset and I2C transactions */
+ udelay(I2C_SW_RST_DELAY);
+
/* Register V4l2 Control Functions */
ret = adv7481_init_v4l2_controls(state);
if (ret) {
pr_err("%s: V4L2 Controls Initialisation Failed %d\n",
__func__, ret);
+ goto err_media_entity;
}
/* Initial ADV7481 State Settings */
@@ -2600,7 +2776,7 @@ static int adv7481_probe(struct platform_device *pdev)
goto err_media_entity;
}
enable_irq(state->irq);
- pr_debug("Probe successful!\n");
+ pr_info("ADV7481 Probe successful!\n");
return ret;
diff --git a/drivers/media/i2c/adv7481_reg.h b/drivers/media/i2c/adv7481_reg.h
index 76c992cf4394..403e538b6127 100644
--- a/drivers/media/i2c/adv7481_reg.h
+++ b/drivers/media/i2c/adv7481_reg.h
@@ -342,6 +342,12 @@
#define HDMI_EDID_A_ENABLE_BMSK 0x0001
#define HDMI_EDID_A_ENABLE_SHFT 0
+/* HDMI RX INFOFRAME Map Registers (Read Only) */
+#define HDMI_REG_AVI_INF_PB_ADDR 0x00
+#define HDMI_REG_AVI_PACKET_ID_ADDR 0xE0
+#define HDMI_REG_AVI_INF_VERS_ADDR 0xE1
+#define HDMI_REG_AVI_INF_LEN_ADDR 0xE2
+
/* CEC Map Registers */
#define CEC_REG_LOG_ADDR_MASK_ADDR 0x27
#define CEC_REG_LOG_ADDR_MASK2_BMSK 0x0040
@@ -410,6 +416,13 @@
#define SDP_RO_MAIN_LOST_LOCK_SHFT 1
#define SDP_RO_MAIN_IN_LOCK_BMSK 0x0001
#define SDP_RO_MAIN_IN_LOCK_SHFT 0
+#define SDP_RO_MAIN_INTERLACE_STATE_ADDR 0x13
+#define SDP_RO_MAIN_INTERLACE_STATE_BMSK 0x0040
+#define SDP_RO_MAIN_INTERLACE_STATE_SHFT 6
+#define SDP_RO_MAIN_FIELD_LEN_BMSK 0x0020
+#define SDP_RO_MAIN_FIELD_LEN_SHFT 5
+#define SDP_RO_MAIN_SD_FIELD_RATE_BMSK 0x0004
+#define SDP_RO_MAIN_SD_FIELD_RATE_SHFT 2
/* SDP R/O Map 1 Registers */
#define SDP_RO_MAP_1_FIELD_ADDR 0x45
diff --git a/drivers/media/i2c/cx25840/cx25840-core.c b/drivers/media/i2c/cx25840/cx25840-core.c
index fe6eb78b6914..a47ab1947cc4 100644
--- a/drivers/media/i2c/cx25840/cx25840-core.c
+++ b/drivers/media/i2c/cx25840/cx25840-core.c
@@ -420,11 +420,13 @@ static void cx25840_initialize(struct i2c_client *client)
INIT_WORK(&state->fw_work, cx25840_work_handler);
init_waitqueue_head(&state->fw_wait);
q = create_singlethread_workqueue("cx25840_fw");
- prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE);
- queue_work(q, &state->fw_work);
- schedule();
- finish_wait(&state->fw_wait, &wait);
- destroy_workqueue(q);
+ if (q) {
+ prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE);
+ queue_work(q, &state->fw_work);
+ schedule();
+ finish_wait(&state->fw_wait, &wait);
+ destroy_workqueue(q);
+ }
/* 6. */
cx25840_write(client, 0x115, 0x8c);
@@ -631,11 +633,13 @@ static void cx23885_initialize(struct i2c_client *client)
INIT_WORK(&state->fw_work, cx25840_work_handler);
init_waitqueue_head(&state->fw_wait);
q = create_singlethread_workqueue("cx25840_fw");
- prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE);
- queue_work(q, &state->fw_work);
- schedule();
- finish_wait(&state->fw_wait, &wait);
- destroy_workqueue(q);
+ if (q) {
+ prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE);
+ queue_work(q, &state->fw_work);
+ schedule();
+ finish_wait(&state->fw_wait, &wait);
+ destroy_workqueue(q);
+ }
/* Call the cx23888 specific std setup func, we no longer rely on
* the generic cx24840 func.
@@ -746,11 +750,13 @@ static void cx231xx_initialize(struct i2c_client *client)
INIT_WORK(&state->fw_work, cx25840_work_handler);
init_waitqueue_head(&state->fw_wait);
q = create_singlethread_workqueue("cx25840_fw");
- prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE);
- queue_work(q, &state->fw_work);
- schedule();
- finish_wait(&state->fw_wait, &wait);
- destroy_workqueue(q);
+ if (q) {
+ prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE);
+ queue_work(q, &state->fw_work);
+ schedule();
+ finish_wait(&state->fw_wait, &wait);
+ destroy_workqueue(q);
+ }
cx25840_std_setup(client);
diff --git a/drivers/media/i2c/s5k6aa.c b/drivers/media/i2c/s5k6aa.c
index d0ad6a25bdab..5ac2babe123b 100644
--- a/drivers/media/i2c/s5k6aa.c
+++ b/drivers/media/i2c/s5k6aa.c
@@ -421,6 +421,7 @@ static int s5k6aa_set_ahb_address(struct i2c_client *client)
/**
* s5k6aa_configure_pixel_clock - apply ISP main clock/PLL configuration
+ * @s5k6aa: pointer to &struct s5k6aa describing the device
*
* Configure the internal ISP PLL for the required output frequency.
* Locking: called with s5k6aa.lock mutex held.
@@ -669,6 +670,7 @@ static int s5k6aa_set_input_params(struct s5k6aa *s5k6aa)
/**
* s5k6aa_configure_video_bus - configure the video output interface
+ * @s5k6aa: pointer to &struct s5k6aa describing the device
* @bus_type: video bus type: parallel or MIPI-CSI
* @nlanes: number of MIPI lanes to be used (MIPI-CSI only)
*
@@ -724,6 +726,8 @@ static int s5k6aa_new_config_sync(struct i2c_client *client, int timeout,
/**
* s5k6aa_set_prev_config - write user preview register set
+ * @s5k6aa: pointer to &struct s5k6aa describing the device
+ * @preset: s5kaa preset to be applied
*
* Configure output resolution and color fromat, pixel clock
* frequency range, device frame rate type and frame period range.
@@ -777,6 +781,7 @@ static int s5k6aa_set_prev_config(struct s5k6aa *s5k6aa,
/**
* s5k6aa_initialize_isp - basic ISP MCU initialization
+ * @sd: pointer to V4L2 sub-device descriptor
*
* Configure AHB addresses for registers read/write; configure PLLs for
* required output pixel clock. The ISP power supply needs to be already
diff --git a/drivers/media/i2c/soc_camera/ov6650.c b/drivers/media/i2c/soc_camera/ov6650.c
index 1f8af1ee8352..1e4783b51a35 100644
--- a/drivers/media/i2c/soc_camera/ov6650.c
+++ b/drivers/media/i2c/soc_camera/ov6650.c
@@ -1033,7 +1033,7 @@ static int ov6650_probe(struct i2c_client *client,
priv->code = MEDIA_BUS_FMT_YUYV8_2X8;
priv->colorspace = V4L2_COLORSPACE_JPEG;
- priv->clk = v4l2_clk_get(&client->dev, "mclk");
+ priv->clk = v4l2_clk_get(&client->dev, NULL);
if (IS_ERR(priv->clk)) {
ret = PTR_ERR(priv->clk);
goto eclkget;
diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
index 9ef5baaf8646..bc630a719776 100644
--- a/drivers/media/i2c/tc358743.c
+++ b/drivers/media/i2c/tc358743.c
@@ -197,57 +197,61 @@ static void i2c_wr(struct v4l2_subdev *sd, u16 reg, u8 *values, u32 n)
}
}
-static u8 i2c_rd8(struct v4l2_subdev *sd, u16 reg)
+static noinline u32 i2c_rdreg(struct v4l2_subdev *sd, u16 reg, u32 n)
{
- u8 val;
+ __le32 val = 0;
- i2c_rd(sd, reg, &val, 1);
+ i2c_rd(sd, reg, (u8 __force *)&val, n);
- return val;
+ return le32_to_cpu(val);
+}
+
+static noinline void i2c_wrreg(struct v4l2_subdev *sd, u16 reg, u32 val, u32 n)
+{
+ __le32 raw = cpu_to_le32(val);
+
+ i2c_wr(sd, reg, (u8 __force *)&raw, n);
+}
+
+static u8 i2c_rd8(struct v4l2_subdev *sd, u16 reg)
+{
+ return i2c_rdreg(sd, reg, 1);
}
static void i2c_wr8(struct v4l2_subdev *sd, u16 reg, u8 val)
{
- i2c_wr(sd, reg, &val, 1);
+ i2c_wrreg(sd, reg, val, 1);
}
static void i2c_wr8_and_or(struct v4l2_subdev *sd, u16 reg,
u8 mask, u8 val)
{
- i2c_wr8(sd, reg, (i2c_rd8(sd, reg) & mask) | val);
+ i2c_wrreg(sd, reg, (i2c_rdreg(sd, reg, 1) & mask) | val, 1);
}
static u16 i2c_rd16(struct v4l2_subdev *sd, u16 reg)
{
- u16 val;
-
- i2c_rd(sd, reg, (u8 *)&val, 2);
-
- return val;
+ return i2c_rdreg(sd, reg, 2);
}
static void i2c_wr16(struct v4l2_subdev *sd, u16 reg, u16 val)
{
- i2c_wr(sd, reg, (u8 *)&val, 2);
+ i2c_wrreg(sd, reg, val, 2);
}
static void i2c_wr16_and_or(struct v4l2_subdev *sd, u16 reg, u16 mask, u16 val)
{
- i2c_wr16(sd, reg, (i2c_rd16(sd, reg) & mask) | val);
+ i2c_wrreg(sd, reg, (i2c_rdreg(sd, reg, 2) & mask) | val, 2);
}
static u32 i2c_rd32(struct v4l2_subdev *sd, u16 reg)
{
- u32 val;
-
- i2c_rd(sd, reg, (u8 *)&val, 4);
-
- return val;
+ return i2c_rdreg(sd, reg, 4);
}
static void i2c_wr32(struct v4l2_subdev *sd, u16 reg, u32 val)
{
- i2c_wr(sd, reg, (u8 *)&val, 4);
+ i2c_wrreg(sd, reg, val, 4);
}
/* --------------- STATUS --------------- */
@@ -1240,7 +1244,7 @@ static int tc358743_g_register(struct v4l2_subdev *sd,
reg->size = tc358743_get_reg_size(reg->reg);
- i2c_rd(sd, reg->reg, (u8 *)&reg->val, reg->size);
+ reg->val = i2c_rdreg(sd, reg->reg, reg->size);
return 0;
}
@@ -1266,7 +1270,7 @@ static int tc358743_s_register(struct v4l2_subdev *sd,
reg->reg == BCAPS)
return 0;
- i2c_wr(sd, (u16)reg->reg, (u8 *)&reg->val,
+ i2c_wrreg(sd, (u16)reg->reg, reg->val,
tc358743_get_reg_size(reg->reg));
return 0;
diff --git a/drivers/media/pci/bt8xx/bt878.c b/drivers/media/pci/bt8xx/bt878.c
index 8aa726651630..90fcccc05b56 100644
--- a/drivers/media/pci/bt8xx/bt878.c
+++ b/drivers/media/pci/bt8xx/bt878.c
@@ -422,8 +422,7 @@ static int bt878_probe(struct pci_dev *dev, const struct pci_device_id *pci_id)
bt878_num);
if (bt878_num >= BT878_MAX) {
printk(KERN_ERR "bt878: Too many devices inserted\n");
- result = -ENOMEM;
- goto fail0;
+ return -ENOMEM;
}
if (pci_enable_device(dev))
return -EIO;
diff --git a/drivers/media/pci/solo6x10/solo6x10-v4l2.c b/drivers/media/pci/solo6x10/solo6x10-v4l2.c
index f7ce493b1fee..a0b61e88c838 100644
--- a/drivers/media/pci/solo6x10/solo6x10-v4l2.c
+++ b/drivers/media/pci/solo6x10/solo6x10-v4l2.c
@@ -342,6 +342,17 @@ static void solo_stop_streaming(struct vb2_queue *q)
struct solo_dev *solo_dev = vb2_get_drv_priv(q);
solo_stop_thread(solo_dev);
+
+ spin_lock(&solo_dev->slock);
+ while (!list_empty(&solo_dev->vidq_active)) {
+ struct solo_vb2_buf *buf = list_entry(
+ solo_dev->vidq_active.next,
+ struct solo_vb2_buf, list);
+
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ }
+ spin_unlock(&solo_dev->slock);
INIT_LIST_HEAD(&solo_dev->vidq_active);
}
diff --git a/drivers/media/platform/msm/ais/Makefile b/drivers/media/platform/msm/ais/Makefile
index 4387b96f01d0..8c596dfcdcd8 100644
--- a/drivers/media/platform/msm/ais/Makefile
+++ b/drivers/media/platform/msm/ais/Makefile
@@ -22,4 +22,5 @@ obj-$(CONFIG_MSM_AIS_JPEG) += jpeg_10/
obj-$(CONFIG_MSM_AIS_JPEGDMA) += jpeg_dma/
obj-$(CONFIG_MSM_AIS) += msm_buf_mgr/
obj-$(CONFIG_MSM_AIS) += msm_ais_mgr/
+obj-$(CONFIG_MSM_AIS) += msm_ais_diag/
obj-$(CONFIG_MSM_AIS_FD) += fd/
diff --git a/drivers/media/platform/msm/ais/camera/camera.c b/drivers/media/platform/msm/ais/camera/camera.c
index afba7386a82b..353b74794bc8 100644
--- a/drivers/media/platform/msm/ais/camera/camera.c
+++ b/drivers/media/platform/msm/ais/camera/camera.c
@@ -550,7 +550,7 @@ static int camera_v4l2_fh_open(struct file *filep)
{
struct msm_video_device *pvdev = video_drvdata(filep);
struct camera_v4l2_private *sp;
- unsigned int stream_id;
+ unsigned long stream_id;
sp = kzalloc(sizeof(*sp), GFP_KERNEL);
if (!sp)
@@ -627,7 +627,7 @@ static int camera_v4l2_open(struct file *filep)
int rc = 0;
struct v4l2_event event;
struct msm_video_device *pvdev = video_drvdata(filep);
- unsigned int opn_idx, idx;
+ unsigned long opn_idx, idx;
if (WARN_ON(!pvdev))
return -EIO;
diff --git a/drivers/media/platform/msm/ais/common/Makefile b/drivers/media/platform/msm/ais/common/Makefile
index e1fa3f2ea848..1849d9c9af4c 100644
--- a/drivers/media/platform/msm/ais/common/Makefile
+++ b/drivers/media/platform/msm/ais/common/Makefile
@@ -1,2 +1,2 @@
ccflags-y += -Idrivers/media/platform/msm/ais/
-obj-$(CONFIG_MSM_AIS) += msm_camera_io_util.o cam_smmu_api.o cam_hw_ops.o cam_soc_api.o
+obj-$(CONFIG_MSM_AIS) += msm_camera_io_util.o cam_smmu_api.o cam_hw_ops.o cam_soc_api.o msm_camera_diag_util.o
diff --git a/drivers/media/platform/msm/ais/common/cam_hw_ops.c b/drivers/media/platform/msm/ais/common/cam_hw_ops.c
index cf28e0ca6536..9110c88f9d8a 100644
--- a/drivers/media/platform/msm/ais/common/cam_hw_ops.c
+++ b/drivers/media/platform/msm/ais/common/cam_hw_ops.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -19,6 +19,7 @@
#include <linux/pm_opp.h>
#include <linux/regulator/rpm-smd-regulator.h>
#include "cam_hw_ops.h"
+#include "msm_camera_diag_util.h"
#ifdef CONFIG_CAM_AHB_DBG
#define CDBG(fmt, args...) pr_err(fmt, ##args)
@@ -242,6 +243,8 @@ static int cam_consolidate_ahb_vote(enum cam_ahb_clk_client id,
data.ahb_clk_state = max;
CDBG("dbg: state : %u, vector : %d\n",
data.ahb_clk_state, max);
+
+ msm_camera_diag_update_ahb_state(data.ahb_clk_state);
}
} else {
pr_err("err: no bus vector found\n");
diff --git a/drivers/media/platform/msm/ais/common/cam_hw_ops.h b/drivers/media/platform/msm/ais/common/cam_hw_ops.h
index 32f93f7b6e0e..e3e9f1381ad8 100644
--- a/drivers/media/platform/msm/ais/common/cam_hw_ops.h
+++ b/drivers/media/platform/msm/ais/common/cam_hw_ops.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -12,16 +12,7 @@
#ifndef _CAM_HW_OPS_H_
#define _CAM_HW_OPS_H_
-enum cam_ahb_clk_vote {
- /* need to update the voting requests
- * according to dtsi entries.
- */
- CAM_AHB_SUSPEND_VOTE = 0x0,
- CAM_AHB_SVS_VOTE = 0x01,
- CAM_AHB_NOMINAL_VOTE = 0x02,
- CAM_AHB_TURBO_VOTE = 0x03,
- CAM_AHB_DYNAMIC_VOTE = 0xFF,
-};
+#include <media/ais/msm_ais_mgr.h>
enum cam_ahb_clk_client {
CAM_AHB_CLIENT_CSIPHY,
diff --git a/drivers/media/platform/msm/ais/common/cam_soc_api.c b/drivers/media/platform/msm/ais/common/cam_soc_api.c
index 92f3e4007390..520940c74d69 100644
--- a/drivers/media/platform/msm/ais/common/cam_soc_api.c
+++ b/drivers/media/platform/msm/ais/common/cam_soc_api.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -25,6 +25,7 @@
#include <linux/of_platform.h>
#include <linux/msm-bus.h>
#include "cam_soc_api.h"
+#include "msm_camera_diag_util.h"
struct msm_cam_bus_pscale_data {
struct msm_bus_scale_pdata *pdata;
@@ -374,6 +375,8 @@ int msm_camera_clk_enable(struct device *dev,
if (clk_rate == 0) {
clk_rate =
clk_round_rate(clk_ptr[i], 0);
+
+
if (clk_rate < 0) {
pr_err("%s round rate failed\n",
clk_info[i].clk_name);
@@ -410,6 +413,8 @@ int msm_camera_clk_enable(struct device *dev,
}
}
}
+
+ msm_camera_diag_update_clklist(clk_info, clk_ptr, num_clk, enable);
return rc;
cam_clk_enable_err:
diff --git a/drivers/media/platform/msm/ais/common/msm_camera_diag_util.c b/drivers/media/platform/msm/ais/common/msm_camera_diag_util.c
new file mode 100644
index 000000000000..d4d417090210
--- /dev/null
+++ b/drivers/media/platform/msm/ais/common/msm_camera_diag_util.c
@@ -0,0 +1,364 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm_camera_diag_util.h"
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/of.h>
+#include <linux/irqreturn.h>
+#include <linux/mutex.h>
+#include <linux/io.h>
+#include <media/v4l2-subdev.h>
+#include <linux/ratelimit.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include "msm_camera_io_util.h"
+#include "msm.h"
+
+
+#define MAX_CLK_NUM 100
+struct camera_diag_clk_list {
+ struct msm_ais_diag_clk_info_t *clk_infolist;
+ struct clk **ppclk;
+ uint32_t clk_num;
+ uint32_t clk_capacity;
+ struct mutex lock;
+};
+
+struct camera_diag_ddrbw {
+ struct msm_ais_diag_bus_info_t bus_info;
+ struct mutex lock;
+};
+
+static struct camera_diag_clk_list s_diag_clk_list;
+static struct camera_diag_ddrbw s_ddrbw;
+
+int msm_camera_get_reg_list(void __iomem *base,
+ struct msm_camera_reg_list_cmd *reg_list)
+{
+ int rc = 0;
+ uint32_t i;
+ uint32_t *reg_values = NULL;
+ uint32_t addrs_size = sizeof(uint32_t) * reg_list->reg_num;
+ uint32_t *reg_addrs = kzalloc(addrs_size, GFP_KERNEL);
+
+ if (!reg_addrs) {
+ rc = -ENOMEM;
+ goto alloc_addr_failed;
+ }
+
+ if (copy_from_user(reg_addrs,
+ (void __user *)(reg_list->regaddr_list),
+ sizeof(uint32_t) * reg_list->reg_num)) {
+ rc = -EFAULT;
+ pr_err("%s copy_from_user fail\n", __func__);
+ goto copy_addr_failed;
+ }
+
+ reg_values = kzalloc(addrs_size, GFP_KERNEL);
+ if (!reg_values) {
+ rc = -ENOMEM;
+ goto copy_addr_failed;
+ }
+
+ for (i = 0 ; i < reg_list->reg_num; ++i) {
+ reg_values[i] = msm_camera_io_r(base + reg_addrs[i]);
+ pr_debug("reg 0x%x 0x%x\n",
+ reg_addrs[i],
+ reg_values[i]);
+ }
+
+ if (copy_to_user(reg_list->value_list, reg_values,
+ sizeof(uint32_t) * reg_list->reg_num)) {
+ rc = -EFAULT;
+ pr_err("%s copy_to_user fail %u\n",
+ __func__,
+ reg_list->reg_num);
+ goto copy_value_failed;
+ }
+
+copy_value_failed:
+ kfree(reg_values);
+
+copy_addr_failed:
+ kfree(reg_addrs);
+
+alloc_addr_failed:
+
+ return rc;
+}
+
+int msm_camera_diag_init(void)
+{
+ s_diag_clk_list.clk_num = 0;
+ s_diag_clk_list.clk_capacity = MAX_CLK_NUM;
+ s_diag_clk_list.clk_infolist = kzalloc(
+ sizeof(struct msm_ais_diag_clk_info_t) *
+ s_diag_clk_list.clk_capacity,
+ GFP_KERNEL);
+
+ if (!s_diag_clk_list.clk_infolist)
+ return -ENOMEM;
+
+ s_diag_clk_list.ppclk = kzalloc(sizeof(struct clk *) *
+ s_diag_clk_list.clk_capacity,
+ GFP_KERNEL);
+ if (!s_diag_clk_list.ppclk) {
+ kfree(s_diag_clk_list.clk_infolist);
+ return -ENOMEM;
+ }
+
+ mutex_init(&s_diag_clk_list.lock);
+ mutex_init(&s_ddrbw.lock);
+ return 0;
+}
+
+int msm_camera_diag_uninit(void)
+{
+ mutex_destroy(&s_ddrbw.lock);
+ mutex_destroy(&s_diag_clk_list.lock);
+
+ kfree(s_diag_clk_list.clk_infolist);
+ s_diag_clk_list.clk_infolist = NULL;
+
+ kfree(s_diag_clk_list.ppclk);
+ s_diag_clk_list.ppclk = NULL;
+
+ return 0;
+}
+
+static uint32_t msm_camera_diag_find_clk_idx(
+ struct msm_cam_clk_info *clk_info,
+ struct clk *clk_ptr)
+{
+ uint32_t i = 0;
+
+ for (; i < s_diag_clk_list.clk_num; ++i) {
+ if (clk_ptr == s_diag_clk_list.ppclk[i])
+ return i;
+ }
+
+ return s_diag_clk_list.clk_capacity;
+}
+
+int msm_camera_diag_update_clklist(
+ struct msm_cam_clk_info *clk_info,
+ struct clk **clk_ptr, int num_clk, int enable)
+{
+ uint32_t i = 0;
+ uint32_t idx = 0;
+ uint32_t actual_idx = 0;
+ struct msm_ais_diag_clk_info_t *pclk_info = NULL;
+
+ mutex_lock(&s_diag_clk_list.lock);
+ for (; i < num_clk; ++i) {
+ idx = msm_camera_diag_find_clk_idx(&clk_info[i], clk_ptr[i]);
+ if (idx < s_diag_clk_list.clk_num) {
+ actual_idx = idx;
+ pclk_info =
+ &s_diag_clk_list.clk_infolist[actual_idx];
+ } else if (s_diag_clk_list.clk_num <
+ s_diag_clk_list.clk_capacity) {
+ actual_idx = s_diag_clk_list.clk_num++;
+ memset(&s_diag_clk_list.clk_infolist[actual_idx],
+ 0,
+ sizeof(struct msm_ais_diag_clk_info_t));
+ pclk_info =
+ &s_diag_clk_list.clk_infolist[actual_idx];
+ memcpy(pclk_info->clk_name,
+ clk_info[i].clk_name,
+ sizeof(pclk_info->clk_name));
+ s_diag_clk_list.ppclk[actual_idx] = clk_ptr[i];
+ pr_debug("%s new clk %s clk_num %u\n",
+ __func__,
+ clk_info[i].clk_name,
+ s_diag_clk_list.clk_num);
+ } else {
+ pr_err("%s too many clks\n", __func__);
+ continue;
+ }
+
+ pclk_info->clk_rate = clk_get_rate(clk_ptr[i]);
+ if (enable) {
+ ++pclk_info->enable;
+ } else {
+ int cnt = pclk_info->enable;
+
+ if (cnt > 0)
+ --pclk_info->enable;
+ }
+ }
+
+ mutex_unlock(&s_diag_clk_list.lock);
+ return 0;
+}
+
+int msm_camera_diag_get_clk_list(
+ struct msm_ais_diag_clk_list_t *clk_infolist)
+{
+ int rc = 0;
+
+ mutex_lock(&s_diag_clk_list.lock);
+ clk_infolist->clk_num = s_diag_clk_list.clk_num;
+ if (copy_to_user(clk_infolist->clk_info,
+ s_diag_clk_list.clk_infolist,
+ sizeof(struct msm_ais_diag_clk_info_t) *
+ s_diag_clk_list.clk_num)) {
+ rc = -EFAULT;
+ }
+ mutex_unlock(&s_diag_clk_list.lock);
+ return rc;
+}
+
+int msm_camera_diag_get_gpio_list(
+ struct msm_ais_diag_gpio_list_t *gpio_list)
+{
+ int rc = 0;
+ uint32_t gpio_num = gpio_list->gpio_num;
+ uint32_t i = 0;
+ int32_t *vals = NULL;
+ uint32_t idxs_size = sizeof(uint32_t) * gpio_num;
+ uint32_t vals_size = sizeof(int32_t) * gpio_num;
+ uint32_t *idxs = kzalloc(idxs_size, GFP_KERNEL);
+
+ if (!idxs) {
+ rc = -ENOMEM;
+ goto alloc_idxs_failed;
+ }
+
+ if (copy_from_user(idxs,
+ (void __user *)(gpio_list->gpio_idx_list),
+ idxs_size)) {
+ rc = -EFAULT;
+ pr_err("%s copy_from_user fail\n", __func__);
+ goto copy_idxs_failed;
+ }
+
+ vals = kzalloc(vals_size, GFP_KERNEL);
+ if (!vals) {
+ rc = -ENOMEM;
+ goto copy_idxs_failed;
+ }
+
+ for (; i < gpio_num; ++i)
+ vals[i] =
+ gpio_get_value(idxs[i]);
+
+ if (copy_to_user(gpio_list->gpio_val_list, vals,
+ vals_size)) {
+ rc = -EFAULT;
+ pr_err("%s copy_to_user fail %u\n",
+ __func__,
+ gpio_num);
+ }
+
+ kfree(vals);
+
+copy_idxs_failed:
+ kfree(idxs);
+
+alloc_idxs_failed:
+ return rc;
+}
+
+int msm_camera_diag_set_gpio_list(
+ struct msm_ais_diag_gpio_list_t *gpio_list)
+{
+ int rc = 0;
+ uint32_t gpio_num = gpio_list->gpio_num;
+ uint32_t i = 0;
+ int32_t val;
+ int32_t *vals = NULL;
+ uint32_t idxs_size = sizeof(uint32_t) * gpio_num;
+ uint32_t vals_size = sizeof(int32_t) * gpio_num;
+ uint32_t *idxs = kzalloc(idxs_size, GFP_KERNEL);
+
+ if (!idxs) {
+ rc = -ENOMEM;
+ goto alloc_idxs_failed;
+ }
+
+ if (copy_from_user(idxs,
+ (void __user *)(gpio_list->gpio_idx_list),
+ idxs_size)) {
+ rc = -EFAULT;
+ pr_err("%s copy_from_user fail\n", __func__);
+ goto copy_idxs_failed;
+ }
+
+ vals = kzalloc(vals_size, GFP_KERNEL);
+ if (!vals) {
+ rc = -ENOMEM;
+ goto copy_idxs_failed;
+ }
+
+ if (copy_from_user(vals,
+ (void __user *)(gpio_list->gpio_val_list),
+ vals_size)) {
+ rc = -EFAULT;
+ pr_err("%s copy_from_user fail\n", __func__);
+ goto copy_vals_failed;
+ }
+
+ for (; i < gpio_num; ++i) {
+ gpio_set_value(idxs[i], vals[i]);
+ val = gpio_get_value(idxs[i]);
+ pr_debug("val set %d after %d\n", vals[i], val);
+ }
+
+copy_vals_failed:
+ kfree(vals);
+
+copy_idxs_failed:
+ kfree(idxs);
+
+alloc_idxs_failed:
+ return rc;
+}
+
+int msm_camera_diag_update_ahb_state(enum cam_ahb_clk_vote vote)
+{
+ mutex_lock(&s_ddrbw.lock);
+ s_ddrbw.bus_info.ahb_clk_vote_state = vote;
+ mutex_unlock(&s_ddrbw.lock);
+ return 0;
+}
+
+int msm_camera_diag_update_isp_state(
+ uint32_t isp_bus_vector_idx,
+ uint64_t isp_ab, uint64_t isp_ib)
+{
+ mutex_lock(&s_ddrbw.lock);
+ s_ddrbw.bus_info.isp_bus_vector_idx = isp_bus_vector_idx;
+ s_ddrbw.bus_info.isp_ab = isp_ab;
+ s_ddrbw.bus_info.isp_ib = isp_ib;
+ mutex_unlock(&s_ddrbw.lock);
+ return 0;
+}
+
+int msm_camera_diag_get_ddrbw(struct msm_ais_diag_bus_info_t *info)
+{
+ int rc = 0;
+
+ mutex_lock(&s_ddrbw.lock);
+ info->ahb_clk_vote_state = s_ddrbw.bus_info.ahb_clk_vote_state;
+ info->isp_bus_vector_idx = s_ddrbw.bus_info.isp_bus_vector_idx;
+ info->isp_ab = s_ddrbw.bus_info.isp_ab;
+ info->isp_ib = s_ddrbw.bus_info.isp_ib;
+
+ mutex_unlock(&s_ddrbw.lock);
+ return rc;
+}
+
+
diff --git a/drivers/media/platform/msm/ais/common/msm_camera_diag_util.h b/drivers/media/platform/msm/ais/common/msm_camera_diag_util.h
new file mode 100644
index 000000000000..1d4b09d726e6
--- /dev/null
+++ b/drivers/media/platform/msm/ais/common/msm_camera_diag_util.h
@@ -0,0 +1,47 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_CAMERA_DIAG_UTIL_H
+#define __MSM_CAMERA_DIAG_UTIL_H
+
+#include <media/ais/msm_ais_mgr.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/msm-bus.h>
+#include <linux/mutex.h>
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <soc/qcom/ais.h>
+
+int msm_camera_get_reg_list(void __iomem *base,
+ struct msm_camera_reg_list_cmd *reg_list);
+int msm_camera_diag_init(void);
+int msm_camera_diag_uninit(void);
+
+int msm_camera_diag_update_clklist(struct msm_cam_clk_info *clk_info,
+ struct clk **clk_ptr, int num_clk, int enable);
+int msm_camera_diag_get_clk_list(
+ struct msm_ais_diag_clk_list_t *clk_infolist);
+
+int msm_camera_diag_update_ahb_state(enum cam_ahb_clk_vote vote);
+int msm_camera_diag_update_isp_state(uint32_t isp_bus_vector_idx,
+ uint64_t isp_ab, uint64_t isp_ib);
+int msm_camera_diag_get_ddrbw(struct msm_ais_diag_bus_info_t *info);
+int msm_camera_diag_get_gpio_list(
+ struct msm_ais_diag_gpio_list_t *gpio_list);
+int msm_camera_diag_set_gpio_list(
+ struct msm_ais_diag_gpio_list_t *gpio_list);
+
+#endif
diff --git a/drivers/media/platform/msm/ais/common/msm_camera_io_util.c b/drivers/media/platform/msm/ais/common/msm_camera_io_util.c
index a09237f3d5ef..c6c2e0d02b65 100644
--- a/drivers/media/platform/msm/ais/common/msm_camera_io_util.c
+++ b/drivers/media/platform/msm/ais/common/msm_camera_io_util.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -19,6 +19,7 @@
#include <soc/qcom/ais.h>
#include <linux/msm-bus.h>
#include "msm_camera_io_util.h"
+#include "msm_camera_diag_util.h"
#define BUFF_SIZE_128 128
@@ -365,6 +366,8 @@ int msm_cam_clk_enable(struct device *dev, struct msm_cam_clk_info *clk_info,
}
}
}
+
+ msm_camera_diag_update_clklist(clk_info, clk_ptr, num_clk, enable);
return rc;
diff --git a/drivers/media/platform/msm/ais/isp/msm_buf_mgr.c b/drivers/media/platform/msm/ais/isp/msm_buf_mgr.c
index 585865b12387..c23fddf6e52f 100644
--- a/drivers/media/platform/msm/ais/isp/msm_buf_mgr.c
+++ b/drivers/media/platform/msm/ais/isp/msm_buf_mgr.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1144,6 +1144,43 @@ static void msm_isp_release_all_bufq(
}
}
+static int msm_isp_get_bufq_state(struct msm_isp_buf_mgr *buf_mgr,
+ struct msm_vfe_bufq_state *bufq_state)
+{
+ int rc = 0;
+ struct msm_isp_bufq *bufq = NULL;
+ uint32_t i = 0;
+ int32_t *k_bufq_states = NULL;
+ uint32_t size = 0;
+
+ bufq = msm_isp_get_bufq(buf_mgr, bufq_state->handle);
+ if (bufq) {
+ bufq_state->nbufs = bufq->num_bufs;
+ size = bufq->num_bufs*sizeof(int32_t);
+ k_bufq_states = kzalloc(size, GFP_KERNEL);
+ if (!k_bufq_states) {
+ rc = -ENOMEM;
+ goto alloc_states_failed;
+ }
+
+ for (i = 0; i < bufq_state->nbufs; ++i)
+ k_bufq_states[i] = bufq->bufs[i].state;
+
+ if (copy_to_user(bufq_state->buf_state,
+ k_bufq_states,
+ sizeof(int32_t) * bufq->num_bufs)) {
+ rc = -EFAULT;
+ pr_err("%s copy_to_user fail\n", __func__);
+ goto copy_failed;
+ }
+
+copy_failed:
+ kfree(k_bufq_states);
+ }
+
+alloc_states_failed:
+ return rc;
+}
/**
* msm_isp_buf_put_scratch() - Release scratch buffers
@@ -1357,6 +1394,14 @@ int msm_isp_proc_buf_cmd(struct msm_isp_buf_mgr *buf_mgr,
rc = buf_mgr->ops->unmap_buf(buf_mgr, unmap_req->fd);
break;
}
+ case VIDIOC_MSM_ISP_CMD_EXT: {
+ struct msm_vfe_cmd_ext *cmd_ext = (struct msm_vfe_cmd_ext *)arg;
+
+ if (cmd_ext->type == VFE_GET_BUFQ_STATE)
+ rc = buf_mgr->ops->get_bufq_state(buf_mgr,
+ &cmd_ext->data.bufq_state);
+ break;
+ }
}
return rc;
}
@@ -1507,6 +1552,7 @@ static struct msm_isp_buf_ops isp_buf_ops = {
.buf_mgr_debug = msm_isp_buf_mgr_debug,
.get_bufq = msm_isp_get_bufq,
.update_put_buf_cnt = msm_isp_update_put_buf_cnt,
+ .get_bufq_state = msm_isp_get_bufq_state,
};
int msm_isp_create_isp_buf_mgr(
diff --git a/drivers/media/platform/msm/ais/isp/msm_buf_mgr.h b/drivers/media/platform/msm/ais/isp/msm_buf_mgr.h
index 4794771d3213..d9a3661306e3 100644
--- a/drivers/media/platform/msm/ais/isp/msm_buf_mgr.h
+++ b/drivers/media/platform/msm/ais/isp/msm_buf_mgr.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -44,16 +44,6 @@ enum msm_isp_buffer_src_t {
MSM_ISP_BUFFER_SRC_MAX,
};
-enum msm_isp_buffer_state {
- MSM_ISP_BUFFER_STATE_UNUSED, /* not used */
- MSM_ISP_BUFFER_STATE_INITIALIZED, /* REQBUF done */
- MSM_ISP_BUFFER_STATE_PREPARED, /* BUF mapped */
- MSM_ISP_BUFFER_STATE_QUEUED, /* buf queued */
- MSM_ISP_BUFFER_STATE_DEQUEUED, /* in use in VFE */
- MSM_ISP_BUFFER_STATE_DIVERTED, /* Sent to other hardware*/
- MSM_ISP_BUFFER_STATE_DISPATCHED, /* Sent to HAL*/
-};
-
enum msm_isp_buffer_put_state {
MSM_ISP_BUFFER_STATE_PUT_PREPARED, /* on init */
MSM_ISP_BUFFER_STATE_PUT_BUF, /* on rotation */
@@ -182,6 +172,9 @@ struct msm_isp_buf_ops {
int (*update_put_buf_cnt)(struct msm_isp_buf_mgr *buf_mgr,
uint32_t id, uint32_t bufq_handle, int32_t buf_index,
struct timeval *tv, uint32_t frame_id, uint32_t pingpong_bit);
+
+ int (*get_bufq_state)(struct msm_isp_buf_mgr *buf_mgr,
+ struct msm_vfe_bufq_state *bufq_state);
};
struct msm_isp_buf_mgr {
diff --git a/drivers/media/platform/msm/ais/isp/msm_isp.h b/drivers/media/platform/msm/ais/isp/msm_isp.h
index 419615cc9b4a..856bf55f8c29 100644
--- a/drivers/media/platform/msm/ais/isp/msm_isp.h
+++ b/drivers/media/platform/msm/ais/isp/msm_isp.h
@@ -782,6 +782,7 @@ struct vfe_device {
uint32_t is_split;
uint32_t dual_vfe_enable;
unsigned long page_fault_addr;
+ bool clk_enabled;
/* Debug variables */
int dump_reg;
diff --git a/drivers/media/platform/msm/ais/isp/msm_isp47.c b/drivers/media/platform/msm/ais/isp/msm_isp47.c
index 6ca91b4fcf83..1ddcab3ed331 100644
--- a/drivers/media/platform/msm/ais/isp/msm_isp47.c
+++ b/drivers/media/platform/msm/ais/isp/msm_isp47.c
@@ -22,6 +22,7 @@
#include "cam_hw_ops.h"
#include "msm_isp47.h"
#include "cam_soc_api.h"
+#include "msm_camera_diag_util.h"
#undef CDBG
#define CDBG(fmt, args...) pr_debug(fmt, ##args)
@@ -1038,16 +1039,18 @@ int msm_vfe47_start_fetch_engine(struct vfe_device *vfe_dev,
vfe_dev->buf_mgr, fe_cfg->session_id,
fe_cfg->stream_id);
vfe_dev->fetch_engine_info.bufq_handle = bufq_handle;
-
+ mutex_lock(&vfe_dev->buf_mgr->lock);
rc = vfe_dev->buf_mgr->ops->get_buf_by_index(
vfe_dev->buf_mgr, bufq_handle, fe_cfg->buf_idx, &buf);
if (rc < 0 || !buf) {
pr_err("%s: No fetch buffer rc= %d buf= %pK\n",
__func__, rc, buf);
+ mutex_unlock(&vfe_dev->buf_mgr->lock);
return -EINVAL;
}
mapped_info = buf->mapped_info[0];
buf->state = MSM_ISP_BUFFER_STATE_DISPATCHED;
+ mutex_unlock(&vfe_dev->buf_mgr->lock);
} else {
rc = vfe_dev->buf_mgr->ops->map_buf(vfe_dev->buf_mgr,
&mapped_info, fe_cfg->fd);
@@ -1100,14 +1103,15 @@ int msm_vfe47_start_fetch_engine_multi_pass(struct vfe_device *vfe_dev,
mutex_lock(&vfe_dev->buf_mgr->lock);
rc = vfe_dev->buf_mgr->ops->get_buf_by_index(
vfe_dev->buf_mgr, bufq_handle, fe_cfg->buf_idx, &buf);
- mutex_unlock(&vfe_dev->buf_mgr->lock);
if (rc < 0 || !buf) {
pr_err("%s: No fetch buffer rc= %d buf= %pK\n",
__func__, rc, buf);
+ mutex_unlock(&vfe_dev->buf_mgr->lock);
return -EINVAL;
}
mapped_info = buf->mapped_info[0];
buf->state = MSM_ISP_BUFFER_STATE_DISPATCHED;
+ mutex_unlock(&vfe_dev->buf_mgr->lock);
} else {
rc = vfe_dev->buf_mgr->ops->map_buf(vfe_dev->buf_mgr,
&mapped_info, fe_cfg->fd);
@@ -2393,6 +2397,8 @@ int msm_vfe47_update_bandwidth(
ab, ib,
isp_bandwidth_mgr->client_info,
sched_clock());
+ msm_camera_diag_update_isp_state(
+ isp_bandwidth_mgr->bus_vector_active_idx, ab, ib);
return 0;
}
@@ -2423,9 +2429,30 @@ void msm_vfe47_put_clks(struct vfe_device *vfe_dev)
int msm_vfe47_enable_clks(struct vfe_device *vfe_dev, int enable)
{
- return msm_camera_clk_enable(&vfe_dev->pdev->dev,
+ unsigned long flags;
+ int rc;
+
+ if (!enable) {
+ spin_lock_irqsave(&vfe_dev->tasklet_lock, flags);
+ vfe_dev->clk_enabled = false;
+ spin_unlock_irqrestore(&vfe_dev->tasklet_lock, flags);
+ }
+
+ rc = msm_camera_clk_enable(&vfe_dev->pdev->dev,
vfe_dev->vfe_clk_info,
vfe_dev->vfe_clk, vfe_dev->num_clk, enable);
+ if (rc < 0) {
+ pr_err("%s: clk set %d failed %d\n", __func__, enable, rc);
+ return rc;
+ }
+
+ if (enable) {
+ spin_lock_irqsave(&vfe_dev->tasklet_lock, flags);
+ vfe_dev->clk_enabled = true;
+ spin_unlock_irqrestore(&vfe_dev->tasklet_lock, flags);
+ }
+
+ return rc;
}
int msm_vfe47_set_clk_rate(struct vfe_device *vfe_dev, long *rate)
diff --git a/drivers/media/platform/msm/ais/isp/msm_isp_axi_util.c b/drivers/media/platform/msm/ais/isp/msm_isp_axi_util.c
index c0a36843d7ff..f135cfcd6ccd 100644
--- a/drivers/media/platform/msm/ais/isp/msm_isp_axi_util.c
+++ b/drivers/media/platform/msm/ais/isp/msm_isp_axi_util.c
@@ -1956,7 +1956,8 @@ static void msm_isp_handle_done_buf_frame_id_mismatch(
static int msm_isp_process_done_buf(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info, struct msm_isp_buffer *buf,
- struct timeval *time_stamp, uint32_t frame_id)
+ struct timeval *time_stamp, struct timeval *time_stamp_system,
+ uint32_t frame_id)
{
int rc;
unsigned long flags;
@@ -2037,7 +2038,13 @@ static int msm_isp_process_done_buf(struct vfe_device *vfe_dev,
}
buf_event.frame_id = frame_id;
+ /* timestamp stores monotonic time */
buf_event.timestamp = *time_stamp;
+ /* for buf_event, mono_timestamp is unused attribute
+ * reuse this to store system time and propagate to
+ * userspace
+ */
+ buf_event.mono_timestamp = *time_stamp_system;
buf_event.u.buf_done.session_id = stream_info->session_id;
buf_event.u.buf_done.stream_id = stream_info->stream_id;
buf_event.u.buf_done.handle = buf->bufq_handle;
@@ -2809,9 +2816,11 @@ static int msm_isp_start_axi_stream(struct vfe_device *vfe_dev,
vfe_dev->axi_data.src_info[VFE_PIX_0].eof_id = 0;
}
+ mutex_lock(&vfe_dev->buf_mgr->lock);
for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
if (HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i]) >=
VFE_AXI_SRC_MAX) {
+ mutex_unlock(&vfe_dev->buf_mgr->lock);
return -EINVAL;
}
stream_info = &axi_data->stream_info[
@@ -2821,6 +2830,7 @@ static int msm_isp_start_axi_stream(struct vfe_device *vfe_dev,
SRC_TO_INTF(stream_info->stream_src)].active;
else {
ISP_DBG("%s: invalid src info index\n", __func__);
+ mutex_unlock(&vfe_dev->buf_mgr->lock);
return -EINVAL;
}
@@ -2828,13 +2838,13 @@ static int msm_isp_start_axi_stream(struct vfe_device *vfe_dev,
msm_isp_get_stream_wm_mask(stream_info, &wm_reload_mask);
spin_lock_irqsave(&stream_info->lock, flags);
- msm_isp_reset_framedrop(vfe_dev, stream_info);
rc = msm_isp_init_stream_ping_pong_reg(vfe_dev, stream_info);
if (rc < 0) {
pr_err("%s: No buffer for stream%d\n", __func__,
HANDLE_TO_IDX(
stream_cfg_cmd->stream_handle[i]));
spin_unlock_irqrestore(&stream_info->lock, flags);
+ mutex_unlock(&vfe_dev->buf_mgr->lock);
return rc;
}
@@ -2893,6 +2903,7 @@ static int msm_isp_start_axi_stream(struct vfe_device *vfe_dev,
}
}
+ mutex_unlock(&vfe_dev->buf_mgr->lock);
msm_isp_update_stream_bandwidth(vfe_dev, stream_cfg_cmd->hw_state);
vfe_dev->hw_info->vfe_ops.axi_ops.reload_wm(vfe_dev,
vfe_dev->vfe_base, wm_reload_mask);
@@ -3574,6 +3585,11 @@ int msm_isp_axi_output_cfg(struct vfe_device *vfe_dev, void *arg)
pstream_info, plane_idx);
}
+ vfe_dev->hw_info->vfe_ops.axi_ops.cfg_framedrop(
+ vfe_dev->vfe_base, pstream_info,
+ pCmd->output_path_cfg[axi_src_idx].framedrop_pattern,
+ pCmd->output_path_cfg[axi_src_idx].framedrop_period);
+
if (axi_src_idx <= PIX_ENCODER && axi_src_idx <= IDEAL_RAW) {
if (axi_src_idx == CAMIF_RAW) {
vfe_dev->axi_data.src_info[VFE_PIX_0].
@@ -3614,6 +3630,29 @@ int msm_isp_axi_output_cfg(struct vfe_device *vfe_dev, void *arg)
return rc;
}
+void msm_isp_framedrop_update(struct vfe_device *vfe_dev, void *arg)
+{
+ struct msm_vfe_axi_framedrop_update *pCmd = arg;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ struct msm_vfe_axi_stream *pstream_info;
+
+ pr_debug("%s: entry\n", __func__);
+
+ if (pCmd->stream_src < VFE_AXI_SRC_MAX) {
+
+ pstream_info = &axi_data->stream_info[pCmd->stream_src];
+
+ vfe_dev->hw_info->vfe_ops.axi_ops.cfg_framedrop(
+ vfe_dev->vfe_base, pstream_info,
+ pCmd->framedrop_pattern,
+ pCmd->framedrop_period);
+
+ vfe_dev->hw_info->vfe_ops.core_ops.reg_update(
+ vfe_dev, SRC_TO_INTF(pstream_info->stream_src));
+ }
+
+ pr_debug("%s: exit\n", __func__);
+}
int msm_isp_update_axi_stream(struct vfe_device *vfe_dev, void *arg)
{
@@ -3822,10 +3861,12 @@ int msm_isp_update_axi_stream(struct vfe_device *vfe_dev, void *arg)
&update_cmd->update_info[i];
stream_info = &axi_data->stream_info[HANDLE_TO_IDX(
update_info->stream_handle)];
+ mutex_lock(&vfe_dev->buf_mgr->lock);
rc = msm_isp_request_frame(vfe_dev, stream_info,
update_info->user_stream_id,
update_info->frame_id,
MSM_ISP_INVALID_BUF_INDEX);
+ mutex_unlock(&vfe_dev->buf_mgr->lock);
if (rc)
pr_err("%s failed to request frame!\n",
__func__);
@@ -3898,10 +3939,12 @@ int msm_isp_update_axi_stream(struct vfe_device *vfe_dev, void *arg)
stream_info = &axi_data->stream_info[HANDLE_TO_IDX(
req_frm->stream_handle)];
+ mutex_lock(&vfe_dev->buf_mgr->lock);
rc = msm_isp_request_frame(vfe_dev, stream_info,
req_frm->user_stream_id,
req_frm->frame_id,
req_frm->buf_index);
+ mutex_unlock(&vfe_dev->buf_mgr->lock);
if (rc)
pr_err("%s failed to request frame!\n",
__func__);
@@ -3925,6 +3968,7 @@ void msm_isp_process_axi_irq_stream(struct vfe_device *vfe_dev,
struct msm_isp_buffer *done_buf = NULL;
unsigned long flags;
struct timeval *time_stamp;
+ struct timeval *time_stamp_system;
uint32_t frame_id, buf_index = -1;
struct msm_vfe_axi_stream *temp_stream;
@@ -3938,6 +3982,8 @@ void msm_isp_process_axi_irq_stream(struct vfe_device *vfe_dev,
time_stamp = &ts->vt_time;
} else {
time_stamp = &ts->buf_time;
+ /* store system time */
+ time_stamp_system = &ts->event_time;
}
frame_id = vfe_dev->axi_data.
@@ -4080,7 +4126,7 @@ void msm_isp_process_axi_irq_stream(struct vfe_device *vfe_dev,
}
msm_isp_process_done_buf(vfe_dev, stream_info,
- done_buf, time_stamp, frame_id);
+ done_buf, time_stamp, time_stamp_system, frame_id);
}
void msm_isp_process_axi_irq(struct vfe_device *vfe_dev,
diff --git a/drivers/media/platform/msm/ais/isp/msm_isp_axi_util.h b/drivers/media/platform/msm/ais/isp/msm_isp_axi_util.h
index 7babd750a05a..d695c4c0edf3 100644
--- a/drivers/media/platform/msm/ais/isp/msm_isp_axi_util.h
+++ b/drivers/media/platform/msm/ais/isp/msm_isp_axi_util.h
@@ -84,6 +84,7 @@ int msm_isp_axi_restart(struct vfe_device *vfe_dev,
int msm_isp_axi_output_cfg(struct vfe_device *vfe_dev, void *arg);
+void msm_isp_framedrop_update(struct vfe_device *vfe_dev, void *arg);
void msm_isp_axi_stream_update(struct vfe_device *vfe_dev,
enum msm_vfe_input_src frame_src);
diff --git a/drivers/media/platform/msm/ais/isp/msm_isp_stats_util.c b/drivers/media/platform/msm/ais/isp/msm_isp_stats_util.c
index 0d08cffda25c..360eb8eca8d7 100644
--- a/drivers/media/platform/msm/ais/isp/msm_isp_stats_util.c
+++ b/drivers/media/platform/msm/ais/isp/msm_isp_stats_util.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -684,18 +684,23 @@ static int msm_isp_start_stats_stream(struct vfe_device *vfe_dev,
stream_cfg_cmd->num_streams);
return -EINVAL;
}
+ mutex_lock(&vfe_dev->buf_mgr->lock);
+
num_stats_comp_mask =
vfe_dev->hw_info->stats_hw_info->num_stats_comp_mask;
rc = vfe_dev->hw_info->vfe_ops.stats_ops.check_streams(
stats_data->stream_info);
- if (rc < 0)
+ if (rc < 0) {
+ mutex_unlock(&vfe_dev->buf_mgr->lock);
return rc;
+ }
for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
idx = STATS_IDX(stream_cfg_cmd->stream_handle[i]);
if (idx >= vfe_dev->hw_info->stats_hw_info->num_stats_type) {
pr_err("%s Invalid stats index %d", __func__, idx);
+ mutex_unlock(&vfe_dev->buf_mgr->lock);
return -EINVAL;
}
@@ -711,11 +716,13 @@ static int msm_isp_start_stats_stream(struct vfe_device *vfe_dev,
pr_err("%s: comp grp %d exceed max %d\n",
__func__, stream_info->composite_flag,
num_stats_comp_mask);
+ mutex_unlock(&vfe_dev->buf_mgr->lock);
return -EINVAL;
}
rc = msm_isp_init_stats_ping_pong_reg(vfe_dev, stream_info);
if (rc < 0) {
pr_err("%s: No buffer for stream%d\n", __func__, idx);
+ mutex_unlock(&vfe_dev->buf_mgr->lock);
return rc;
}
if (!stream_info->composite_flag)
@@ -740,6 +747,7 @@ static int msm_isp_start_stats_stream(struct vfe_device *vfe_dev,
stats_data->num_active_stream);
}
+ mutex_unlock(&vfe_dev->buf_mgr->lock);
if (vfe_dev->axi_data.src_info[VFE_PIX_0].active) {
rc = msm_isp_stats_wait_for_cfg_done(vfe_dev);
diff --git a/drivers/media/platform/msm/ais/isp/msm_isp_util.c b/drivers/media/platform/msm/ais/isp/msm_isp_util.c
index 5ca3b8d531a2..a9b6e5e6a861 100644
--- a/drivers/media/platform/msm/ais/isp/msm_isp_util.c
+++ b/drivers/media/platform/msm/ais/isp/msm_isp_util.c
@@ -392,9 +392,10 @@ static int msm_isp_start_fetch_engine_multi_pass(struct vfe_device *vfe_dev,
vfe_dev->hw_info->vfe_ops.core_ops.reset_hw(vfe_dev,
0, 1);
msm_isp_reset_framedrop(vfe_dev, stream_info);
-
+ mutex_lock(&vfe_dev->buf_mgr->lock);
rc = msm_isp_cfg_offline_ping_pong_address(vfe_dev, stream_info,
VFE_PING_FLAG, fe_cfg->output_buf_idx);
+ mutex_unlock(&vfe_dev->buf_mgr->lock);
if (rc < 0) {
pr_err("%s: Fetch engine config failed\n", __func__);
return -EINVAL;
@@ -846,11 +847,30 @@ static int msm_isp_proc_cmd_list(struct vfe_device *vfe_dev, void *arg)
}
#endif /* CONFIG_COMPAT */
+static int process_isp_cmd_ext(struct vfe_device *vfe_dev, void *arg)
+{
+ int rc = 0;
+ struct msm_vfe_cmd_ext *cmd = (struct msm_vfe_cmd_ext *)arg;
+
+ switch (cmd->type) {
+ case VFE_GET_BUFQ_STATE: {
+ mutex_lock(&vfe_dev->buf_mgr->lock);
+ rc = msm_isp_proc_buf_cmd(vfe_dev->buf_mgr,
+ VIDIOC_MSM_ISP_CMD_EXT, arg);
+ mutex_unlock(&vfe_dev->buf_mgr->lock);
+ break;
+ }
+ }
+
+ return rc;
+}
+
static long msm_isp_ioctl_unlocked(struct v4l2_subdev *sd,
unsigned int cmd, void *arg)
{
long rc = 0;
long rc2 = 0;
+ unsigned long flags;
struct vfe_device *vfe_dev = v4l2_get_subdevdata(sd);
if (!vfe_dev || !vfe_dev->vfe_base) {
@@ -947,6 +967,7 @@ static long msm_isp_ioctl_unlocked(struct v4l2_subdev *sd,
break;
case VIDIOC_MSM_ISP_AXI_RESTART:
mutex_lock(&vfe_dev->core_mutex);
+ mutex_lock(&vfe_dev->buf_mgr->lock);
if (atomic_read(&vfe_dev->error_info.overflow_state)
!= HALT_ENFORCED) {
rc = msm_isp_stats_restart(vfe_dev);
@@ -957,6 +978,7 @@ static long msm_isp_ioctl_unlocked(struct v4l2_subdev *sd,
pr_err_ratelimited("%s: no AXI restart, halt enforced.\n",
__func__);
}
+ mutex_unlock(&vfe_dev->buf_mgr->lock);
mutex_unlock(&vfe_dev->core_mutex);
break;
case VIDIOC_MSM_ISP_INPUT_CFG:
@@ -979,12 +1001,14 @@ static long msm_isp_ioctl_unlocked(struct v4l2_subdev *sd,
mutex_unlock(&vfe_dev->core_mutex);
break;
case VIDIOC_MSM_ISP_FETCH_ENG_START:
- case VIDIOC_MSM_ISP_MAP_BUF_START_FE:
mutex_lock(&vfe_dev->core_mutex);
rc = msm_isp_start_fetch_engine(vfe_dev, arg);
mutex_unlock(&vfe_dev->core_mutex);
break;
+ case VIDIOC_MSM_ISP_CMD_EXT:
+ process_isp_cmd_ext(vfe_dev, arg);
+ break;
case VIDIOC_MSM_ISP_FETCH_ENG_MULTI_PASS_START:
case VIDIOC_MSM_ISP_MAP_BUF_START_MULTI_PASS_FE:
mutex_lock(&vfe_dev->core_mutex);
@@ -1049,6 +1073,11 @@ static long msm_isp_ioctl_unlocked(struct v4l2_subdev *sd,
rc = msm_isp_camif_cfg(vfe_dev, arg);
mutex_unlock(&vfe_dev->core_mutex);
break;
+ case VIDIOC_MSM_ISP_FRAMEDROP_UPDATE:
+ mutex_lock(&vfe_dev->core_mutex);
+ msm_isp_framedrop_update(vfe_dev, arg);
+ mutex_unlock(&vfe_dev->core_mutex);
+ break;
case MSM_SD_NOTIFY_FREEZE:
vfe_dev->isp_sof_debug = 0;
vfe_dev->isp_raw0_debug = 0;
@@ -1061,6 +1090,11 @@ static long msm_isp_ioctl_unlocked(struct v4l2_subdev *sd,
while (vfe_dev->vfe_open_cnt != 0)
msm_isp_close_node(sd, NULL);
break;
+ case VIDIOC_MSM_ISP_SET_CLK_STATUS:
+ spin_lock_irqsave(&vfe_dev->tasklet_lock, flags);
+ vfe_dev->clk_enabled = *((unsigned int *)arg);
+ spin_unlock_irqrestore(&vfe_dev->tasklet_lock, flags);
+ break;
default:
pr_err_ratelimited("%s: Invalid ISP command %d\n", __func__,
@@ -1999,7 +2033,7 @@ static void msm_isp_enqueue_tasklet_cmd(struct vfe_device *vfe_dev,
MSM_VFE_TASKLETQ_SIZE;
list_add_tail(&queue_cmd->list, &vfe_dev->tasklet_q);
spin_unlock_irqrestore(&vfe_dev->tasklet_lock, flags);
- tasklet_schedule(&vfe_dev->vfe_tasklet);
+ tasklet_hi_schedule(&vfe_dev->vfe_tasklet);
}
irqreturn_t msm_isp_process_irq(int irq_num, void *data)
@@ -2114,10 +2148,20 @@ void msm_isp_do_tasklet(unsigned long data)
atomic_sub(1, &vfe_dev->irq_cnt);
list_del(&queue_cmd->list);
queue_cmd->cmd_used = 0;
+
+ if (!vfe_dev->clk_enabled) {
+ /* client closed, delayed task should exit directly */
+ spin_unlock_irqrestore(&vfe_dev->tasklet_lock, flags);
+ return;
+ }
+
irq_status0 = queue_cmd->vfeInterruptStatus0;
irq_status1 = queue_cmd->vfeInterruptStatus1;
pingpong_status = queue_cmd->vfePingPongStatus;
ts = queue_cmd->ts;
+ /* related to rw reg, need to be protected */
+ irq_ops->process_halt_irq(vfe_dev,
+ irq_status0, irq_status1);
spin_unlock_irqrestore(&vfe_dev->tasklet_lock, flags);
ISP_DBG("%s: vfe_id %d status0: 0x%x status1: 0x%x\n",
__func__, vfe_dev->pdev->id, irq_status0, irq_status1);
@@ -2141,8 +2185,6 @@ void msm_isp_do_tasklet(unsigned long data)
}
irq_ops->process_reset_irq(vfe_dev,
irq_status0, irq_status1);
- irq_ops->process_halt_irq(vfe_dev,
- irq_status0, irq_status1);
if (atomic_read(&vfe_dev->error_info.overflow_state)
!= NO_OVERFLOW) {
ISP_DBG("%s: Recovery in processing, Ignore IRQs!!!\n",
@@ -2333,16 +2375,6 @@ int msm_isp_close_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
mutex_lock(&vfe_dev->realtime_mutex);
mutex_lock(&vfe_dev->core_mutex);
- /* Enable vfe clks to wake up from XO shutdown mode */
- if (vfe_dev->pdev->id == 0)
- id = CAM_AHB_CLIENT_VFE0;
- else
- id = CAM_AHB_CLIENT_VFE1;
- if (cam_config_ahb_clk(NULL, 0, id, CAM_AHB_SVS_VOTE) < 0)
- pr_err("%s: failed to vote for AHB\n", __func__);
- vfe_dev->hw_info->vfe_ops.platform_ops.enable_clks(vfe_dev, 1);
- vfe_dev->hw_info->vfe_ops.platform_ops.enable_regulators(vfe_dev, 1);
-
if (!vfe_dev->vfe_open_cnt) {
pr_err("%s invalid state open cnt %d\n", __func__,
vfe_dev->vfe_open_cnt);
@@ -2357,6 +2389,17 @@ int msm_isp_close_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
mutex_unlock(&vfe_dev->realtime_mutex);
return 0;
}
+
+ /* Enable vfe clks to wake up from XO shutdown mode */
+ if (vfe_dev->pdev->id == 0)
+ id = CAM_AHB_CLIENT_VFE0;
+ else
+ id = CAM_AHB_CLIENT_VFE1;
+ if (cam_config_ahb_clk(NULL, 0, id, CAM_AHB_SVS_VOTE) < 0)
+ pr_err("%s: failed to vote for AHB\n", __func__);
+ vfe_dev->hw_info->vfe_ops.platform_ops.enable_clks(vfe_dev, 1);
+ vfe_dev->hw_info->vfe_ops.platform_ops.enable_regulators(vfe_dev, 1);
+
/* Unregister page fault handler */
cam_smmu_reg_client_page_fault_handler(
vfe_dev->buf_mgr->iommu_hdl,
diff --git a/drivers/media/platform/msm/ais/ispif/msm_ispif.c b/drivers/media/platform/msm/ais/ispif/msm_ispif.c
index a72ac566bb8c..5ddf554d6ef3 100644
--- a/drivers/media/platform/msm/ais/ispif/msm_ispif.c
+++ b/drivers/media/platform/msm/ais/ispif/msm_ispif.c
@@ -29,6 +29,7 @@
#include "msm_camera_io_util.h"
#include "cam_hw_ops.h"
#include "cam_soc_api.h"
+#include "msm_camera_diag_util.h"
#ifdef CONFIG_AIS_MSM_ISPIF_V1
#include "msm_ispif_hwreg_v1.h"
@@ -1526,6 +1527,22 @@ static long msm_ispif_cmd(struct v4l2_subdev *sd, void *arg)
case ISPIF_SET_VFE_INFO:
rc = msm_ispif_set_vfe_info(ispif, &pcdata->vfe_info);
break;
+ case ISPIF_READ_REG_LIST_CMD:
+ {
+ struct msm_camera_reg_list_cmd reg_list_cmd;
+
+ if (copy_from_user(&reg_list_cmd,
+ (void __user *)pcdata->reg_list,
+ sizeof(struct msm_camera_reg_list_cmd))) {
+ pr_err("%s: %d failed\n", __func__, __LINE__);
+ rc = -EFAULT;
+ break;
+ }
+ rc = msm_camera_get_reg_list(ispif->base, &reg_list_cmd);
+ break;
+ }
+ case ISPIF_WRITE_REG_LIST_CMD:
+ break;
default:
pr_err("%s: invalid cfg_type\n", __func__);
rc = -EINVAL;
diff --git a/drivers/media/platform/msm/ais/jpeg_dma/msm_jpeg_dma_dev.c b/drivers/media/platform/msm/ais/jpeg_dma/msm_jpeg_dma_dev.c
index 76fe7dfa68cb..61200d379a1d 100644
--- a/drivers/media/platform/msm/ais/jpeg_dma/msm_jpeg_dma_dev.c
+++ b/drivers/media/platform/msm/ais/jpeg_dma/msm_jpeg_dma_dev.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -754,9 +754,12 @@ static int msm_jpegdma_s_fmt_vid_out(struct file *file,
static int msm_jpegdma_reqbufs(struct file *file,
void *fh, struct v4l2_requestbuffers *req)
{
+ int ret = 0;
struct jpegdma_ctx *ctx = msm_jpegdma_ctx_from_fh(fh);
-
- return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, req);
+ mutex_lock(&ctx->lock);
+ ret = v4l2_m2m_reqbufs(file, ctx->m2m_ctx, req);
+ mutex_unlock(&ctx->lock);
+ return ret;
}
/*
@@ -833,11 +836,11 @@ static int msm_jpegdma_streamoff(struct file *file,
{
struct jpegdma_ctx *ctx = msm_jpegdma_ctx_from_fh(fh);
int ret;
-
+ mutex_lock(&ctx->lock);
ret = v4l2_m2m_streamoff(file, ctx->m2m_ctx, buf_type);
if (ret < 0)
dev_err(ctx->jdma_device->dev, "Stream off fails\n");
-
+ mutex_unlock(&ctx->lock);
return ret;
}
diff --git a/drivers/media/platform/msm/ais/msm.c b/drivers/media/platform/msm/ais/msm.c
index 902e05b3329b..c3f3542cc87a 100644
--- a/drivers/media/platform/msm/ais/msm.c
+++ b/drivers/media/platform/msm/ais/msm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -31,6 +31,7 @@
#include "msm_sd.h"
#include "cam_hw_ops.h"
#include <media/ais/msm_ais_buf_mgr.h>
+#include "msm_camera_diag_util.h"
static struct v4l2_device *msm_v4l2_dev;
@@ -1384,6 +1385,12 @@ static int msm_probe(struct platform_device *pdev)
goto v4l2_fail;
}
+ rc = msm_camera_diag_init();
+ if (rc < 0) {
+ pr_err("%s: failed to init diag clk list\n", __func__);
+ goto v4l2_fail;
+ }
+
goto probe_end;
v4l2_fail:
@@ -1428,6 +1435,7 @@ static int __init msm_init(void)
static void __exit msm_exit(void)
{
+ msm_camera_diag_uninit();
platform_driver_unregister(&msm_driver);
}
diff --git a/drivers/media/platform/msm/ais/msm_ais_diag/Makefile b/drivers/media/platform/msm/ais/msm_ais_diag/Makefile
new file mode 100644
index 000000000000..7c40ea02b70a
--- /dev/null
+++ b/drivers/media/platform/msm/ais/msm_ais_diag/Makefile
@@ -0,0 +1,4 @@
+ccflags-y += -Idrivers/media/platform/msm/ais
+ccflags-y += -Idrivers/media/platform/msm/ais/common
+ccflags-y += -Idrivers/media/platform/msm/ais/sensor/io
+obj-$(CONFIG_MSM_AIS) += msm_diag_cam.o
diff --git a/drivers/media/platform/msm/ais/msm_ais_diag/msm_diag_cam.c b/drivers/media/platform/msm/ais/msm_ais_diag/msm_diag_cam.c
new file mode 100644
index 000000000000..c2933d79babc
--- /dev/null
+++ b/drivers/media/platform/msm/ais/msm_ais_diag/msm_diag_cam.c
@@ -0,0 +1,267 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/of_platform.h>
+#include <linux/regulator/consumer.h>
+#include "msm_sd.h"
+#include "msm_diag_cam.h"
+#include "msm_camera_io_util.h"
+#include "msm_camera_dt_util.h"
+#include "cam_hw_ops.h"
+#include "msm_camera_diag_util.h"
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+#undef DIAG_CAM_DBG
+#ifdef MSM_DIAG_CAM_DEBUG
+#define DIAG_CAM_DBG(fmt, args...) pr_err(fmt, ##args)
+#else
+#define DIAG_CAM_DBG(fmt, args...) pr_debug(fmt, ##args)
+#endif
+
+#define MSM_DIAG_CAM_DRV_NAME "msm_diag_cam"
+static struct platform_driver msm_diag_camera_driver;
+static struct diag_cam_device *new_diag_cam_dev;
+
+int msm_ais_enable_allclocks(void)
+{
+ int rc = 0;
+
+ CDBG("%s:\n", __func__);
+ /* Vote ON for clocks */
+ if (new_diag_cam_dev == NULL) {
+ rc = -EINVAL;
+ pr_err("%s: clock structure uninitialised %d\n", __func__,
+ rc);
+ return rc;
+ }
+
+ rc = msm_camera_enable_vreg(&new_diag_cam_dev->pdev->dev,
+ new_diag_cam_dev->diag_cam_vreg,
+ new_diag_cam_dev->regulator_count,
+ NULL,
+ 0,
+ &new_diag_cam_dev->diag_cam_reg_ptr[0], 1);
+ if (rc < 0)
+ pr_err("%s:%d diag_cam enable_vreg failed\n", __func__,
+ __LINE__);
+
+ rc = msm_camera_clk_enable(&new_diag_cam_dev->pdev->dev,
+ new_diag_cam_dev->diag_cam_clk_info,
+ new_diag_cam_dev->diag_cam_clk,
+ new_diag_cam_dev->num_clk, true);
+
+ if (rc < 0) {
+ pr_err("%s: clk enable failed %d\n", __func__, rc);
+ rc = 0;
+ return rc;
+ }
+ pr_debug("Turned ON camera clocks\n");
+ return 0;
+
+}
+
+int msm_ais_disable_allclocks(void)
+{
+ int rc = 0;
+
+ CDBG("%s:\n", __func__);
+ /* Vote OFF for clocks */
+ if (new_diag_cam_dev == NULL) {
+ rc = -EINVAL;
+ pr_err("%s: clock structure uninitialised %d\n", __func__,
+ rc);
+ return rc;
+ }
+
+ if ((new_diag_cam_dev->pdev == NULL) ||
+ (new_diag_cam_dev->diag_cam_clk_info == NULL) ||
+ (new_diag_cam_dev->diag_cam_clk == NULL) ||
+ (new_diag_cam_dev->num_clk == 0)) {
+ rc = -EINVAL;
+ pr_err("%s: Clock details uninitialised %d\n", __func__,
+ rc);
+ return rc;
+ }
+
+ rc = msm_camera_clk_enable(&new_diag_cam_dev->pdev->dev,
+ new_diag_cam_dev->diag_cam_clk_info,
+ new_diag_cam_dev->diag_cam_clk,
+ new_diag_cam_dev->num_clk, false);
+ if (rc < 0) {
+ pr_err("%s: clk disable failed %d\n", __func__, rc);
+ return rc;
+ }
+
+ rc = msm_camera_enable_vreg(&new_diag_cam_dev->pdev->dev,
+ new_diag_cam_dev->diag_cam_vreg,
+ new_diag_cam_dev->regulator_count,
+ NULL,
+ 0,
+ &new_diag_cam_dev->diag_cam_reg_ptr[0], 0);
+ if (rc < 0)
+ pr_err("%s:%d diag_cam disable_vreg failed\n", __func__,
+ __LINE__);
+
+ pr_debug("Turned OFF camera clocks\n");
+ return 0;
+}
+
+int msm_diag_camera_get_vreginfo_list(
+ struct msm_ais_diag_regulator_info_list_t *p_vreglist)
+{
+ int rc = 0;
+ uint32_t i = 0;
+ uint32_t len = 0;
+ uint32_t len1 = 0;
+ struct regulator *vreg = NULL;
+ char *vreg_name_inuser = NULL;
+
+ p_vreglist->regulator_num = new_diag_cam_dev->regulator_count;
+
+ pr_debug("ais diag regulator_count %u\n",
+ new_diag_cam_dev->regulator_count);
+
+ for (; i < p_vreglist->regulator_num ; ++i) {
+ vreg = new_diag_cam_dev->diag_cam_reg_ptr[i];
+ p_vreglist->infolist[i].enable =
+ regulator_is_enabled(vreg);
+ len = strlen(new_diag_cam_dev->diag_cam_vreg[i].reg_name);
+ len1 = sizeof(p_vreglist->infolist[i].regulatorname);
+ len = (len >= len1) ? len1 : (len+1);
+ vreg_name_inuser =
+ p_vreglist->infolist[i].regulatorname;
+ if (copy_to_user((void __user *)vreg_name_inuser,
+ (void *)new_diag_cam_dev->diag_cam_vreg[i].reg_name,
+ len)) {
+ rc = -EFAULT;
+ pr_err("%s copy_to_user fail\n", __func__);
+ break;
+ }
+ }
+
+ pr_debug("msm_diag_camera_get_vreginfo_list exit\n");
+ return rc;
+}
+
+static int msm_diag_cam_probe(struct platform_device *pdev)
+{
+ int rc = 0;
+
+ CDBG("%s: pdev %pK device id = %d\n", __func__, pdev, pdev->id);
+
+ new_diag_cam_dev = kzalloc(sizeof(struct diag_cam_device),
+ GFP_KERNEL);
+ if (!new_diag_cam_dev)
+ return -ENOMEM;
+
+ if (pdev->dev.of_node)
+ of_property_read_u32((&pdev->dev)->of_node,
+ "cell-index", &pdev->id);
+
+ rc = msm_camera_get_clk_info(pdev,
+ &new_diag_cam_dev->diag_cam_clk_info,
+ &new_diag_cam_dev->diag_cam_clk,
+ &new_diag_cam_dev->num_clk);
+ if (rc < 0) {
+ pr_err("%s: msm_diag_cam_get_clk_info() failed", __func__);
+ kfree(new_diag_cam_dev);
+ return -EFAULT;
+ }
+
+ new_diag_cam_dev->ref_count = 0;
+ new_diag_cam_dev->pdev = pdev;
+
+ rc = msm_camera_get_dt_vreg_data(
+ new_diag_cam_dev->pdev->dev.of_node,
+ &(new_diag_cam_dev->diag_cam_vreg),
+ &(new_diag_cam_dev->regulator_count));
+ if (rc < 0) {
+ pr_err("%s: msm_camera_get_dt_vreg_data fail\n", __func__);
+ rc = -EFAULT;
+ goto diag_cam_release_mem;
+ }
+
+ if ((new_diag_cam_dev->regulator_count < 0) ||
+ (new_diag_cam_dev->regulator_count > MAX_REGULATOR)) {
+ pr_err("%s: invalid reg count = %d, max is %d\n", __func__,
+ new_diag_cam_dev->regulator_count, MAX_REGULATOR);
+ rc = -EFAULT;
+ goto diag_cam_invalid_vreg_data;
+ }
+
+ rc = msm_camera_config_vreg(&new_diag_cam_dev->pdev->dev,
+ new_diag_cam_dev->diag_cam_vreg,
+ new_diag_cam_dev->regulator_count,
+ NULL,
+ 0,
+ &new_diag_cam_dev->diag_cam_reg_ptr[0], 1);
+ if (rc < 0)
+ pr_err("%s:%d diag_cam config_vreg failed\n", __func__,
+ __LINE__);
+
+ platform_set_drvdata(pdev, new_diag_cam_dev);
+
+ return 0;
+
+diag_cam_invalid_vreg_data:
+ kfree(new_diag_cam_dev->diag_cam_vreg);
+diag_cam_release_mem:
+ kfree(new_diag_cam_dev);
+ new_diag_cam_dev = NULL;
+ return rc;
+}
+
+static int msm_diag_cam_exit(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static int __init msm_diag_cam_init_module(void)
+{
+ return platform_driver_register(&msm_diag_camera_driver);
+}
+
+static void __exit msm_diag_cam_exit_module(void)
+{
+ kfree(new_diag_cam_dev);
+ platform_driver_unregister(&msm_diag_camera_driver);
+}
+
+static const struct of_device_id msm_diag_camera_match_table[] = {
+ { .compatible = "qcom,diag-cam" },
+ {},
+};
+
+static struct platform_driver msm_diag_camera_driver = {
+ .probe = msm_diag_cam_probe,
+ .remove = msm_diag_cam_exit,
+ .driver = {
+ .name = MSM_DIAG_CAM_DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = msm_diag_camera_match_table,
+ },
+};
+
+MODULE_DEVICE_TABLE(of, msm_diag_camera_match_table);
+
+module_init(msm_diag_cam_init_module);
+module_exit(msm_diag_cam_exit_module);
+MODULE_DESCRIPTION("MSM diag camera driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/ais/msm_ais_diag/msm_diag_cam.h b/drivers/media/platform/msm/ais/msm_ais_diag/msm_diag_cam.h
new file mode 100644
index 000000000000..572ba8dfba3a
--- /dev/null
+++ b/drivers/media/platform/msm/ais/msm_ais_diag/msm_diag_cam.h
@@ -0,0 +1,57 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_DIAG_CAM_H
+#define MSM_DIAG_CAM_H
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <media/v4l2-subdev.h>
+#include <linux/workqueue.h>
+#include <media/ais/msm_ais_sensor.h>
+#include <soc/qcom/ais.h>
+#include <media/ais/msm_ais.h>
+#include "msm_sd.h"
+#include "cam_soc_api.h"
+
+#define NUM_MASTERS 2
+#define NUM_QUEUES 2
+
+#define TRUE 1
+#define FALSE 0
+
+
+enum msm_diag_cam_state_t {
+ AIS_DIAG_STATE_DISABLED,
+ AIS_DIAG_STATE_ENABLED,
+};
+
+struct diag_cam_device {
+ struct platform_device *pdev;
+ uint8_t ref_count;
+ enum msm_diag_cam_state_t diag_cam_state;
+ size_t num_clk;
+ size_t num_clk_cases;
+ struct clk **diag_cam_clk;
+ uint32_t **diag_cam_clk_rates;
+ struct msm_cam_clk_info *diag_cam_clk_info;
+ struct camera_vreg_t *diag_cam_vreg;
+ struct regulator *diag_cam_reg_ptr[MAX_REGULATOR];
+ int32_t regulator_count;
+};
+
+int msm_ais_enable_allclocks(void);
+int msm_ais_disable_allclocks(void);
+int msm_diag_camera_get_vreginfo_list(
+ struct msm_ais_diag_regulator_info_list_t *p_vreglist);
+#endif
diff --git a/drivers/media/platform/msm/ais/msm_ais_mgr/Makefile b/drivers/media/platform/msm/ais/msm_ais_mgr/Makefile
index b7a078738489..bb14aec1ee29 100644
--- a/drivers/media/platform/msm/ais/msm_ais_mgr/Makefile
+++ b/drivers/media/platform/msm/ais/msm_ais_mgr/Makefile
@@ -2,4 +2,5 @@ ccflags-y += -Idrivers/media/platform/msm/ais
ccflags-y += -Idrivers/media/platform/msm/ais/common
ccflags-y += -Idrivers/media/platform/msm/ais/sensor/io
ccflags-y += -Idrivers/media/platform/msm/ais/sensor/cci
+ccflags-y += -Idrivers/media/platform/msm/ais/msm_ais_diag
obj-$(CONFIG_MSM_AIS) += msm_ais_mgr.o
diff --git a/drivers/media/platform/msm/ais/msm_ais_mgr/msm_ais_mgr.c b/drivers/media/platform/msm/ais/msm_ais_mgr/msm_ais_mgr.c
index 9391c1d0d4ab..4ae07932f5da 100644
--- a/drivers/media/platform/msm/ais/msm_ais_mgr/msm_ais_mgr.c
+++ b/drivers/media/platform/msm/ais/msm_ais_mgr/msm_ais_mgr.c
@@ -15,6 +15,8 @@
#include <media/ais/msm_ais_mgr.h>
#include "msm_ais_mngr.h"
#include "msm_early_cam.h"
+#include "msm_camera_diag_util.h"
+#include "msm_diag_cam.h"
#undef CDBG
#define CDBG(fmt, args...) pr_debug(fmt, ##args)
@@ -42,7 +44,56 @@ static long msm_ais_hndl_ioctl(struct v4l2_subdev *sd, void *arg)
case AIS_CLK_DISABLE:
rc = msm_ais_disable_clocks();
break;
+ case AIS_CLK_ENABLE_ALLCLK:
+ rc = msm_ais_enable_allclocks();
+ break;
+ case AIS_CLK_DISABLE_ALLCLK:
+ rc = msm_ais_disable_allclocks();
+ break;
+ default:
+ pr_err("invalid cfg_type\n");
+ rc = -EINVAL;
+ }
+
+ if (rc)
+ pr_err("msm_ais_hndl_ioctl failed %ld\n", rc);
+
+ mutex_unlock(&clk_mngr_dev->cont_mutex);
+ return rc;
+}
+
+static long msm_ais_hndl_ext_ioctl(struct v4l2_subdev *sd, void *arg)
+{
+ long rc = 0;
+ struct clk_mgr_cfg_data_ext *pcdata =
+ (struct clk_mgr_cfg_data_ext *)arg;
+ struct msm_ais_mngr_device *clk_mngr_dev =
+ (struct msm_ais_mngr_device *)v4l2_get_subdevdata(sd);
+ if (WARN_ON(!clk_mngr_dev) || WARN_ON(!pcdata)) {
+ rc = -EINVAL;
+ return rc;
+ }
+
+ mutex_lock(&clk_mngr_dev->cont_mutex);
+ CDBG(pr_fmt("cfg_type = %d\n"), pcdata->cfg_type);
+ switch (pcdata->cfg_type) {
+ case AIS_DIAG_GET_REGULATOR_INFO_LIST:
+ rc = msm_diag_camera_get_vreginfo_list(
+ &pcdata->data.vreg_infolist);
+ break;
+ case AIS_DIAG_GET_BUS_INFO_STATE:
+ rc = msm_camera_diag_get_ddrbw(&pcdata->data.bus_info);
+ break;
+ case AIS_DIAG_GET_CLK_INFO_LIST:
+ rc = msm_camera_diag_get_clk_list(&pcdata->data.clk_infolist);
+ break;
+ case AIS_DIAG_GET_GPIO_LIST:
+ rc = msm_camera_diag_get_gpio_list(&pcdata->data.gpio_list);
+ break;
+ case AIS_DIAG_SET_GPIO_LIST:
+ rc = msm_camera_diag_set_gpio_list(&pcdata->data.gpio_list);
+ break;
default:
pr_err("invalid cfg_type\n");
rc = -EINVAL;
@@ -63,6 +114,11 @@ static long msm_ais_mngr_subdev_ioctl(struct v4l2_subdev *sd,
if (rc)
pr_err("msm_ais_mngr_subdev_ioctl failed\n");
break;
+ case VIDIOC_MSM_AIS_CLK_CFG_EXT:
+ rc = msm_ais_hndl_ext_ioctl(sd, arg);
+ if (rc)
+ pr_err("msm_ais_hndl_ext_ioctl failed\n");
+ break;
default:
rc = -ENOIOCTLCMD;
}
@@ -136,6 +192,7 @@ static int32_t __init msm_ais_mngr_init(void)
static void __exit msm_ais_mngr_exit(void)
{
+
msm_sd_unregister(&msm_ais_mngr_dev->subdev);
mutex_destroy(&msm_ais_mngr_dev->cont_mutex);
kfree(msm_ais_mngr_dev);
diff --git a/drivers/media/platform/msm/ais/sensor/cci/msm_early_cam.c b/drivers/media/platform/msm/ais/sensor/cci/msm_early_cam.c
index fa7a93345575..885f09c85cb8 100644
--- a/drivers/media/platform/msm/ais/sensor/cci/msm_early_cam.c
+++ b/drivers/media/platform/msm/ais/sensor/cci/msm_early_cam.c
@@ -163,59 +163,6 @@ int msm_ais_enable_clocks(void)
return rc;
}
- if (new_early_cam_dev->pdev->dev.of_node)
- of_property_read_u32((&new_early_cam_dev->pdev->dev)->of_node,
- "cell-index", &new_early_cam_dev->pdev->id);
-
- rc = msm_camera_get_clk_info_and_rates(new_early_cam_dev->pdev,
- &new_early_cam_dev->early_cam_clk_info,
- &new_early_cam_dev->early_cam_clk,
- &new_early_cam_dev->early_cam_clk_rates,
- &new_early_cam_dev->num_clk_cases,
- &new_early_cam_dev->num_clk);
- if (rc < 0) {
- pr_err("%s: msm_early_cam_get_clk_info() failed", __func__);
- return -EFAULT;
- }
-
- rc = msm_camera_get_dt_vreg_data(
- new_early_cam_dev->pdev->dev.of_node,
- &(new_early_cam_dev->early_cam_vreg),
- &(new_early_cam_dev->regulator_count));
- if (rc < 0) {
- pr_err("%s: msm_camera_get_dt_vreg_data fail\n", __func__);
- rc = -EFAULT;
- return rc;
- }
-
- if ((new_early_cam_dev->regulator_count < 0) ||
- (new_early_cam_dev->regulator_count > MAX_REGULATOR)) {
- pr_err("%s: invalid reg count = %d, max is %d\n", __func__,
- new_early_cam_dev->regulator_count, MAX_REGULATOR);
- rc = -EFAULT;
- return rc;
- }
-
- rc = msm_camera_config_vreg(&new_early_cam_dev->pdev->dev,
- new_early_cam_dev->early_cam_vreg,
- new_early_cam_dev->regulator_count,
- NULL,
- 0,
- &new_early_cam_dev->early_cam_reg_ptr[0], 1);
- if (rc < 0)
- pr_err("%s:%d early_cam config_vreg failed\n", __func__,
- __LINE__);
-
- rc = msm_camera_enable_vreg(&new_early_cam_dev->pdev->dev,
- new_early_cam_dev->early_cam_vreg,
- new_early_cam_dev->regulator_count,
- NULL,
- 0,
- &new_early_cam_dev->early_cam_reg_ptr[0], 1);
- if (rc < 0)
- pr_err("%s:%d early_cam enable_vreg failed\n", __func__,
- __LINE__);
-
rc = msm_camera_clk_enable(&new_early_cam_dev->pdev->dev,
new_early_cam_dev->early_cam_clk_info,
new_early_cam_dev->early_cam_clk,
@@ -357,12 +304,10 @@ static int msm_early_cam_probe(struct platform_device *pdev)
of_property_read_u32((&pdev->dev)->of_node,
"cell-index", &pdev->id);
- rc = msm_camera_get_clk_info_and_rates(pdev,
- &new_early_cam_dev->early_cam_clk_info,
- &new_early_cam_dev->early_cam_clk,
- &new_early_cam_dev->early_cam_clk_rates,
- &new_early_cam_dev->num_clk_cases,
- &new_early_cam_dev->num_clk);
+ rc = msm_camera_get_clk_info(pdev,
+ &new_early_cam_dev->early_cam_clk_info,
+ &new_early_cam_dev->early_cam_clk,
+ &new_early_cam_dev->num_clk);
if (rc < 0) {
pr_err("%s: msm_early_cam_get_clk_info() failed", __func__);
kfree(new_early_cam_dev);
diff --git a/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_5_hwreg.h b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_5_hwreg.h
index 392d902d3e0c..0486c8aa96d0 100644
--- a/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_5_hwreg.h
+++ b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_5_hwreg.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -56,7 +56,12 @@ static struct csid_reg_parms_t csid_v3_5 = {
0xC,
0x84,
0xA4,
- 0x7f010800,
+ /*
+ * Default IRQ enabled:
+ * FIFO overflow, Unbounded frame, Stream underflow,
+ * Error ECC, Error CRC, Reset done
+ */
+ 0x73000800,
20,
17,
16,
diff --git a/drivers/media/platform/msm/ais/sensor/csid/msm_csid.c b/drivers/media/platform/msm/ais/sensor/csid/msm_csid.c
index 2b3eefa65606..b820aa45136a 100644
--- a/drivers/media/platform/msm/ais/sensor/csid/msm_csid.c
+++ b/drivers/media/platform/msm/ais/sensor/csid/msm_csid.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -14,6 +14,12 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/irqreturn.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-event.h>
#include "msm_csid.h"
#include "msm_sd.h"
#include "msm_camera_io_util.h"
@@ -30,6 +36,7 @@
#include "include/msm_csid_3_6_0_hwreg.h"
#include "include/msm_csid_3_5_1_hwreg.h"
#include "cam_hw_ops.h"
+#include "msm_camera_diag_util.h"
#define V4L2_IDENT_CSID 50002
#define CSID_VERSION_V20 0x02000011
@@ -51,12 +58,13 @@
#define CSID_VERSION_V40 0x40000000
#define MSM_CSID_DRV_NAME "msm_csid"
-#define DBG_CSID 0
+#define DBG_CSID 1
#define SHORT_PKT_CAPTURE 0
#define SHORT_PKT_OFFSET 0x200
#define ENABLE_3P_BIT 1
#define SOF_DEBUG_ENABLE 1
#define SOF_DEBUG_DISABLE 0
+#define MAX_CSID_V4l2_EVENTS 100
#define TRUE 1
#define FALSE 0
@@ -156,6 +164,25 @@ static int msm_csid_stop(struct csid_device *csid_dev, uint32_t cid_mask)
return 0;
}
+static int msm_csid_send_event(struct csid_device *csid_dev,
+ uint32_t event_type, struct msm_csid_event_data *event_data)
+{
+ struct v4l2_event csid_event;
+ struct msm_csid_event_data *d_event_data;
+
+ memset(&csid_event, 0, sizeof(struct v4l2_event));
+ csid_event.id = 0;
+ csid_event.type = event_type;
+
+ d_event_data =
+ (struct msm_csid_event_data *)(&csid_event.u.data[0]);
+ d_event_data->csid_id = event_data->csid_id;
+ d_event_data->error_status = event_data->error_status;
+
+ v4l2_event_queue(csid_dev->msm_sd.sd.devnode, &csid_event);
+ return 0;
+}
+
#if (DBG_CSID)
static void msm_csid_set_debug_reg(struct csid_device *csid_dev,
struct msm_camera_csid_params *csid_params)
@@ -490,8 +517,9 @@ static irqreturn_t msm_csid_irq(int irq_num, void *data)
#else
static irqreturn_t msm_csid_irq(int irq_num, void *data)
{
- uint32_t irq;
+ uint32_t irq, error_irq, rst_done_irq_mask;
struct csid_device *csid_dev = data;
+ struct msm_csid_event_data csid_event;
if (!csid_dev) {
pr_err("%s:%d csid_dev NULL\n", __func__, __LINE__);
@@ -509,11 +537,26 @@ static irqreturn_t msm_csid_irq(int irq_num, void *data)
irq = msm_camera_io_r(csid_dev->base +
csid_dev->ctrl_reg->csid_reg.csid_irq_status_addr);
+ irq &= msm_camera_io_r(csid_dev->base +
+ csid_dev->ctrl_reg->csid_reg.csid_irq_mask_addr);
pr_err_ratelimited("%s CSID%d_IRQ_STATUS_ADDR = 0x%x\n",
__func__, csid_dev->pdev->id, irq);
- if (irq & (0x1 <<
- csid_dev->ctrl_reg->csid_reg.csid_rst_done_irq_bitshift))
+ error_irq = irq;
+ rst_done_irq_mask =
+ 0x1 << csid_dev->ctrl_reg->csid_reg.csid_rst_done_irq_bitshift;
+
+ if (irq & rst_done_irq_mask) {
complete(&csid_dev->reset_complete);
+ error_irq &= ~rst_done_irq_mask;
+ }
+
+ if (error_irq) {
+ csid_event.csid_id = csid_dev->pdev->id;
+ csid_event.error_status = error_irq;
+ msm_csid_send_event(csid_dev, CSID_EVENT_SIGNAL_ERROR,
+ &csid_event);
+ }
+
msm_camera_io_w(irq, csid_dev->base +
csid_dev->ctrl_reg->csid_reg.csid_irq_clear_cmd_addr);
return IRQ_HANDLED;
@@ -828,6 +871,20 @@ static int32_t msm_csid_cmd(struct csid_device *csid_dev, void *arg)
case CSID_STOP:
rc = msm_csid_stop(csid_dev, cdata->cfg.csid_cidmask);
break;
+ case CSID_READ_REG_LIST_CMD:
+ {
+ struct msm_camera_reg_list_cmd reg_list_cmd;
+
+ if (copy_from_user(&reg_list_cmd,
+ (void __user *)cdata->cfg.csid_reg_list_cmd,
+ sizeof(struct msm_camera_reg_list_cmd))) {
+ pr_err("%s: %d failed\n", __func__, __LINE__);
+ rc = -EFAULT;
+ break;
+ }
+ rc = msm_camera_get_reg_list(csid_dev->base, &reg_list_cmd);
+ break;
+ }
default:
pr_err("%s: %d failed\n", __func__, __LINE__);
rc = -ENOIOCTLCMD;
@@ -890,7 +947,6 @@ static long msm_csid_subdev_ioctl(struct v4l2_subdev *sd,
return rc;
}
-
#ifdef CONFIG_COMPAT
static int32_t msm_csid_cmd32(struct csid_device *csid_dev, void *arg)
{
@@ -1061,27 +1117,135 @@ static long msm_csid_subdev_ioctl32(struct v4l2_subdev *sd,
mutex_unlock(&csid_dev->mutex);
return rc;
}
+#endif
-static long msm_csid_subdev_do_ioctl32(
+static long msm_csid_subdev_do_ioctl(
struct file *file, unsigned int cmd, void *arg)
{
+ int rc = -ENOIOCTLCMD;
struct video_device *vdev = video_devdata(file);
struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
+ struct v4l2_fh *vfh = file->private_data;
+
+ switch (cmd) {
+ case VIDIOC_DQEVENT:
+ if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
+ return -ENOIOCTLCMD;
+ return v4l2_event_dequeue(vfh, arg,
+ file->f_flags & O_NONBLOCK);
+ case VIDIOC_SUBSCRIBE_EVENT:
+ return v4l2_subdev_call(sd, core, subscribe_event, vfh, arg);
- return msm_csid_subdev_ioctl32(sd, cmd, arg);
+ case VIDIOC_UNSUBSCRIBE_EVENT:
+ return v4l2_subdev_call(sd, core, unsubscribe_event, vfh, arg);
+
+ case VIDIOC_MSM_CSID_IO_CFG32:
+#ifdef CONFIG_COMPAT
+ rc = msm_csid_subdev_ioctl32(sd, cmd, arg);
+#endif
+ break;
+
+ default:
+ rc = msm_csid_subdev_ioctl(sd, cmd, arg);
+ break;
+ }
+
+ return rc;
}
-static long msm_csid_subdev_fops_ioctl32(struct file *file, unsigned int cmd,
+static long msm_csid_subdev_fops_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
- return video_usercopy(file, cmd, arg, msm_csid_subdev_do_ioctl32);
+ return video_usercopy(file, cmd, arg, msm_csid_subdev_do_ioctl);
}
-#endif
+
+static u32 msm_csid_evt_mask_to_csid_event(u32 evt_mask)
+{
+ u32 evt_id = CSID_EVENT_SUBS_MASK_NONE;
+
+ switch (evt_mask) {
+ case CSID_EVENT_MASK_INDEX_SIGNAL_ERROR:
+ evt_id = CSID_EVENT_SIGNAL_ERROR;
+ break;
+ default:
+ evt_id = CSID_EVENT_SUBS_MASK_NONE;
+ break;
+ }
+
+ return evt_id;
+}
+
+static int msm_csid_subscribe_event_mask(struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub, int evt_mask_index,
+ u32 evt_id, bool subscribe_flag)
+{
+ int rc = 0;
+
+ sub->type = evt_id;
+
+ if (subscribe_flag)
+ rc = v4l2_event_subscribe(fh, sub,
+ MAX_CSID_V4l2_EVENTS, NULL);
+ else
+ rc = v4l2_event_unsubscribe(fh, sub);
+ if (rc != 0) {
+ pr_err("%s: Subs event_type =0x%x failed\n",
+ __func__, sub->type);
+ return rc;
+ }
+ return rc;
+}
+
+static int msm_csid_process_event_subscription(struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub, bool subscribe_flag)
+{
+ int rc = 0, evt_mask_index = 0;
+ u32 evt_mask = sub->type;
+ u32 evt_id = 0;
+
+ if (evt_mask == CSID_EVENT_SUBS_MASK_NONE) {
+ pr_err("%s: Subs event_type is None=0x%x\n",
+ __func__, evt_mask);
+ return 0;
+ }
+
+ evt_mask_index = CSID_EVENT_MASK_INDEX_SIGNAL_ERROR;
+ if (evt_mask & (1<<evt_mask_index)) {
+ evt_id =
+ msm_csid_evt_mask_to_csid_event(
+ evt_mask_index);
+ rc = msm_csid_subscribe_event_mask(fh, sub,
+ evt_mask_index, evt_id, subscribe_flag);
+ if (rc != 0) {
+ pr_err("%s: Subs event index:%d failed\n",
+ __func__, evt_mask_index);
+ return rc;
+ }
+ }
+
+ return rc;
+}
+static int msm_csid_subscribe_event(struct v4l2_subdev *sd,
+ struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ return msm_csid_process_event_subscription(fh, sub, true);
+}
+
+static int msm_csid_unsubscribe_event(struct v4l2_subdev *sd,
+ struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ return msm_csid_process_event_subscription(fh, sub, false);
+}
+
static const struct v4l2_subdev_internal_ops msm_csid_internal_ops;
static struct v4l2_subdev_core_ops msm_csid_subdev_core_ops = {
.ioctl = &msm_csid_subdev_ioctl,
.interrupt_service_routine = msm_csid_irq_routine,
+ .subscribe_event = msm_csid_subscribe_event,
+ .unsubscribe_event = msm_csid_unsubscribe_event,
};
static const struct v4l2_subdev_ops msm_csid_subdev_ops = {
@@ -1175,6 +1339,7 @@ static int csid_probe(struct platform_device *pdev)
new_csid_dev->pdev = pdev;
new_csid_dev->msm_sd.sd.internal_ops = &msm_csid_internal_ops;
new_csid_dev->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ new_csid_dev->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_EVENTS;
snprintf(new_csid_dev->msm_sd.sd.name,
ARRAY_SIZE(new_csid_dev->msm_sd.sd.name), "msm_csid");
media_entity_init(&new_csid_dev->msm_sd.sd.entity, 0, NULL, 0);
@@ -1183,11 +1348,12 @@ static int csid_probe(struct platform_device *pdev)
new_csid_dev->msm_sd.close_seq = MSM_SD_CLOSE_2ND_CATEGORY | 0x5;
msm_sd_register(&new_csid_dev->msm_sd);
-#ifdef CONFIG_COMPAT
msm_cam_copy_v4l2_subdev_fops(&msm_csid_v4l2_subdev_fops);
- msm_csid_v4l2_subdev_fops.compat_ioctl32 = msm_csid_subdev_fops_ioctl32;
- new_csid_dev->msm_sd.sd.devnode->fops = &msm_csid_v4l2_subdev_fops;
+ msm_csid_v4l2_subdev_fops.unlocked_ioctl = msm_csid_subdev_fops_ioctl;
+#ifdef CONFIG_COMPAT
+ msm_csid_v4l2_subdev_fops.compat_ioctl32 = msm_csid_subdev_fops_ioctl;
#endif
+ new_csid_dev->msm_sd.sd.devnode->fops = &msm_csid_v4l2_subdev_fops;
rc = msm_camera_register_irq(pdev, new_csid_dev->irq,
msm_csid_irq, IRQF_TRIGGER_RISING, "csid", new_csid_dev);
diff --git a/drivers/media/platform/msm/ais/sensor/csiphy/msm_csiphy.c b/drivers/media/platform/msm/ais/sensor/csiphy/msm_csiphy.c
index c3b087f61888..ebf817149184 100644
--- a/drivers/media/platform/msm/ais/sensor/csiphy/msm_csiphy.c
+++ b/drivers/media/platform/msm/ais/sensor/csiphy/msm_csiphy.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -27,6 +27,7 @@
#include "include/msm_csiphy_3_4_2_1_hwreg.h"
#include "include/msm_csiphy_3_5_hwreg.h"
#include "cam_hw_ops.h"
+#include "msm_camera_diag_util.h"
#define DBG_CSIPHY 0
#define SOF_DEBUG_ENABLE 1
@@ -1264,6 +1265,20 @@ static int32_t msm_csiphy_cmd(struct csiphy_device *csiphy_dev, void *arg)
}
break;
+ case CSIPHY_READ_REG_LIST_CMD:
+ {
+ struct msm_camera_reg_list_cmd reg_list_cmd;
+
+ if (copy_from_user(&reg_list_cmd,
+ (void __user *)cdata->cfg.csiphy_reg_list_cmd,
+ sizeof(struct msm_camera_reg_list_cmd))) {
+ pr_err("%s: %d failed\n", __func__, __LINE__);
+ rc = -EFAULT;
+ break;
+ }
+ rc = msm_camera_get_reg_list(csiphy_dev->base, &reg_list_cmd);
+ break;
+ }
default:
pr_err("%s: %d failed\n", __func__, __LINE__);
rc = -ENOIOCTLCMD;
diff --git a/drivers/media/platform/msm/ais/sensor/msm_sensor.c b/drivers/media/platform/msm/ais/sensor/msm_sensor.c
index 2801fc3ed34e..7434ba49fb8d 100644
--- a/drivers/media/platform/msm/ais/sensor/msm_sensor.c
+++ b/drivers/media/platform/msm/ais/sensor/msm_sensor.c
@@ -296,7 +296,8 @@ static void msm_sensor_stop_stream(struct msm_sensor_ctrl_t *s_ctrl)
int32_t rc = 0;
mutex_lock(s_ctrl->msm_sensor_mutex);
- if (s_ctrl->sensor_state == MSM_SENSOR_POWER_UP) {
+ if (s_ctrl->sensor_state == MSM_SENSOR_POWER_UP ||
+ s_ctrl->sensor_state == MSM_SENSOR_CCI_UP) {
s_ctrl->sensor_i2c_client->i2c_func_tbl->i2c_write_table(
s_ctrl->sensor_i2c_client, &s_ctrl->stop_setting);
kfree(s_ctrl->stop_setting.reg_setting);
@@ -511,7 +512,8 @@ static int msm_sensor_config32(struct msm_sensor_ctrl_t *s_ctrl,
if (s_ctrl->is_csid_tg_mode)
goto DONE;
- if (s_ctrl->sensor_state != MSM_SENSOR_POWER_UP) {
+ if (s_ctrl->sensor_state != MSM_SENSOR_POWER_UP &&
+ s_ctrl->sensor_state != MSM_SENSOR_CCI_UP) {
pr_err("%s:%d failed: invalid state %d\n", __func__,
__LINE__, s_ctrl->sensor_state);
rc = -EFAULT;
@@ -588,6 +590,14 @@ static int msm_sensor_config32(struct msm_sensor_ctrl_t *s_ctrl,
if (s_ctrl->is_csid_tg_mode)
goto DONE;
+ if (s_ctrl->sensor_state != MSM_SENSOR_POWER_UP &&
+ s_ctrl->sensor_state != MSM_SENSOR_CCI_UP) {
+ pr_err("%s:%d failed: invalid state %d\n", __func__,
+ __LINE__, s_ctrl->sensor_state);
+ rc = -EFAULT;
+ break;
+ }
+
read_config_ptr =
(struct msm_camera_i2c_read_config *)
compat_ptr(cdata->cfg.setting);
@@ -660,6 +670,14 @@ static int msm_sensor_config32(struct msm_sensor_ctrl_t *s_ctrl,
if (s_ctrl->is_csid_tg_mode)
goto DONE;
+ if (s_ctrl->sensor_state != MSM_SENSOR_POWER_UP &&
+ s_ctrl->sensor_state != MSM_SENSOR_CCI_UP) {
+ pr_err("%s:%d failed: invalid state %d\n", __func__,
+ __LINE__, s_ctrl->sensor_state);
+ rc = -EFAULT;
+ break;
+ }
+
if (copy_from_user(&write_config32,
(void __user *)compat_ptr(cdata->cfg.setting),
sizeof(
@@ -766,7 +784,8 @@ static int msm_sensor_config32(struct msm_sensor_ctrl_t *s_ctrl,
if (s_ctrl->is_csid_tg_mode)
goto DONE;
- if (s_ctrl->sensor_state != MSM_SENSOR_POWER_UP) {
+ if (s_ctrl->sensor_state != MSM_SENSOR_POWER_UP &&
+ s_ctrl->sensor_state != MSM_SENSOR_CCI_UP) {
pr_err("%s:%d failed: invalid state %d\n", __func__,
__LINE__, s_ctrl->sensor_state);
rc = -EFAULT;
@@ -852,7 +871,7 @@ static int msm_sensor_config32(struct msm_sensor_ctrl_t *s_ctrl,
kfree(s_ctrl->stop_setting.reg_setting);
s_ctrl->stop_setting.reg_setting = NULL;
- if (s_ctrl->sensor_state != MSM_SENSOR_POWER_UP) {
+ if (s_ctrl->sensor_state == MSM_SENSOR_POWER_DOWN) {
pr_err("%s:%d failed: invalid state %d\n", __func__,
__LINE__, s_ctrl->sensor_state);
rc = -EFAULT;
@@ -879,6 +898,12 @@ static int msm_sensor_config32(struct msm_sensor_ctrl_t *s_ctrl,
if (s_ctrl->is_csid_tg_mode)
goto DONE;
+ if (s_ctrl->sensor_state != MSM_SENSOR_CCI_DOWN) {
+ pr_err("%s:%d failed: invalid state %d\n", __func__,
+ __LINE__, s_ctrl->sensor_state);
+ rc = -EFAULT;
+ break;
+ }
rc = msm_camera_cci_power_up(s_ctrl->sensor_device_type,
s_ctrl->sensor_i2c_client);
if (rc < 0) {
@@ -886,11 +911,21 @@ static int msm_sensor_config32(struct msm_sensor_ctrl_t *s_ctrl,
__LINE__, rc);
break;
}
+ s_ctrl->sensor_state = MSM_SENSOR_CCI_UP;
+ CDBG("%s:%d sensor state %d\n", __func__, __LINE__,
+ s_ctrl->sensor_state);
break;
case CFG_CCI_POWER_DOWN:
if (s_ctrl->is_csid_tg_mode)
goto DONE;
+ if (s_ctrl->sensor_state != MSM_SENSOR_POWER_UP &&
+ s_ctrl->sensor_state != MSM_SENSOR_CCI_UP) {
+ pr_err("%s:%d failed: invalid state %d\n", __func__,
+ __LINE__, s_ctrl->sensor_state);
+ rc = -EFAULT;
+ break;
+ }
rc = msm_camera_cci_power_down(s_ctrl->sensor_device_type,
s_ctrl->sensor_i2c_client);
if (rc < 0) {
@@ -898,6 +933,9 @@ static int msm_sensor_config32(struct msm_sensor_ctrl_t *s_ctrl,
__LINE__, rc);
break;
}
+ s_ctrl->sensor_state = MSM_SENSOR_CCI_DOWN;
+ CDBG("%s:%d sensor state %d\n", __func__, __LINE__,
+ s_ctrl->sensor_state);
break;
case CFG_SET_STOP_STREAM_SETTING: {
struct msm_camera_i2c_reg_setting32 stop_setting32;
@@ -908,6 +946,14 @@ static int msm_sensor_config32(struct msm_sensor_ctrl_t *s_ctrl,
if (s_ctrl->is_csid_tg_mode)
goto DONE;
+ if (s_ctrl->sensor_state != MSM_SENSOR_POWER_UP &&
+ s_ctrl->sensor_state != MSM_SENSOR_CCI_UP) {
+ pr_err("%s:%d failed: invalid state %d\n", __func__,
+ __LINE__, s_ctrl->sensor_state);
+ rc = -EFAULT;
+ break;
+ }
+
if (copy_from_user(&stop_setting32,
(void __user *)compat_ptr((cdata->cfg.setting)),
sizeof(struct msm_camera_i2c_reg_setting32))) {
@@ -1064,7 +1110,8 @@ int msm_sensor_config(struct msm_sensor_ctrl_t *s_ctrl, void *argp)
if (s_ctrl->is_csid_tg_mode)
goto DONE;
- if (s_ctrl->sensor_state != MSM_SENSOR_POWER_UP) {
+ if (s_ctrl->sensor_state != MSM_SENSOR_POWER_UP &&
+ s_ctrl->sensor_state != MSM_SENSOR_CCI_UP) {
pr_err("%s:%d failed: invalid state %d\n", __func__,
__LINE__, s_ctrl->sensor_state);
rc = -EFAULT;
@@ -1134,6 +1181,14 @@ int msm_sensor_config(struct msm_sensor_ctrl_t *s_ctrl, void *argp)
if (s_ctrl->is_csid_tg_mode)
goto DONE;
+ if (s_ctrl->sensor_state != MSM_SENSOR_POWER_UP &&
+ s_ctrl->sensor_state != MSM_SENSOR_CCI_UP) {
+ pr_err("%s:%d failed: invalid state %d\n", __func__,
+ __LINE__, s_ctrl->sensor_state);
+ rc = -EFAULT;
+ break;
+ }
+
read_config_ptr =
(struct msm_camera_i2c_read_config *)cdata->cfg.setting;
if (copy_from_user(&read_config, (void __user *)read_config_ptr,
@@ -1187,7 +1242,12 @@ int msm_sensor_config(struct msm_sensor_ctrl_t *s_ctrl, void *argp)
pr_err("%s:%d: i2c_read failed\n", __func__, __LINE__);
break;
}
- read_config_ptr->data = local_data;
+ if (copy_to_user((void __user *)&read_config_ptr->data,
+ &local_data, sizeof(local_data))) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ rc = -EFAULT;
+ break;
+ }
break;
}
case CFG_SLAVE_WRITE_I2C_ARRAY: {
@@ -1199,6 +1259,14 @@ int msm_sensor_config(struct msm_sensor_ctrl_t *s_ctrl, void *argp)
if (s_ctrl->is_csid_tg_mode)
goto DONE;
+ if (s_ctrl->sensor_state != MSM_SENSOR_POWER_UP &&
+ s_ctrl->sensor_state != MSM_SENSOR_CCI_UP) {
+ pr_err("%s:%d failed: invalid state %d\n", __func__,
+ __LINE__, s_ctrl->sensor_state);
+ rc = -EFAULT;
+ break;
+ }
+
if (copy_from_user(&write_config,
(void __user *)cdata->cfg.setting,
sizeof(struct msm_camera_i2c_array_write_config))) {
@@ -1282,7 +1350,8 @@ int msm_sensor_config(struct msm_sensor_ctrl_t *s_ctrl, void *argp)
if (s_ctrl->is_csid_tg_mode)
goto DONE;
- if (s_ctrl->sensor_state != MSM_SENSOR_POWER_UP) {
+ if (s_ctrl->sensor_state != MSM_SENSOR_POWER_UP &&
+ s_ctrl->sensor_state != MSM_SENSOR_CCI_UP) {
pr_err("%s:%d failed: invalid state %d\n", __func__,
__LINE__, s_ctrl->sensor_state);
rc = -EFAULT;
@@ -1364,7 +1433,7 @@ int msm_sensor_config(struct msm_sensor_ctrl_t *s_ctrl, void *argp)
kfree(s_ctrl->stop_setting.reg_setting);
s_ctrl->stop_setting.reg_setting = NULL;
- if (s_ctrl->sensor_state != MSM_SENSOR_POWER_UP) {
+ if (s_ctrl->sensor_state == MSM_SENSOR_POWER_DOWN) {
pr_err("%s:%d failed: invalid state %d\n", __func__,
__LINE__, s_ctrl->sensor_state);
rc = -EFAULT;
@@ -1392,6 +1461,12 @@ int msm_sensor_config(struct msm_sensor_ctrl_t *s_ctrl, void *argp)
if (s_ctrl->is_csid_tg_mode)
goto DONE;
+ if (s_ctrl->sensor_state != MSM_SENSOR_CCI_DOWN) {
+ pr_err("%s:%d failed: invalid state %d\n", __func__,
+ __LINE__, s_ctrl->sensor_state);
+ rc = -EFAULT;
+ break;
+ }
rc = msm_camera_cci_power_up(s_ctrl->sensor_device_type,
s_ctrl->sensor_i2c_client);
if (rc < 0) {
@@ -1399,12 +1474,22 @@ int msm_sensor_config(struct msm_sensor_ctrl_t *s_ctrl, void *argp)
__LINE__, rc);
break;
}
+ s_ctrl->sensor_state = MSM_SENSOR_CCI_UP;
+ CDBG("%s:%d sensor state %d\n", __func__, __LINE__,
+ s_ctrl->sensor_state);
break;
case CFG_CCI_POWER_DOWN:
if (s_ctrl->is_csid_tg_mode)
goto DONE;
+ if (s_ctrl->sensor_state != MSM_SENSOR_POWER_UP &&
+ s_ctrl->sensor_state != MSM_SENSOR_CCI_UP) {
+ pr_err("%s:%d failed: invalid state %d\n", __func__,
+ __LINE__, s_ctrl->sensor_state);
+ rc = -EFAULT;
+ break;
+ }
rc = msm_camera_cci_power_down(s_ctrl->sensor_device_type,
s_ctrl->sensor_i2c_client);
if (rc < 0) {
@@ -1412,6 +1497,9 @@ int msm_sensor_config(struct msm_sensor_ctrl_t *s_ctrl, void *argp)
__LINE__, rc);
break;
}
+ s_ctrl->sensor_state = MSM_SENSOR_CCI_DOWN;
+ CDBG("%s:%d sensor state %d\n", __func__, __LINE__,
+ s_ctrl->sensor_state);
break;
case CFG_SET_STOP_STREAM_SETTING: {
@@ -1422,6 +1510,14 @@ int msm_sensor_config(struct msm_sensor_ctrl_t *s_ctrl, void *argp)
if (s_ctrl->is_csid_tg_mode)
goto DONE;
+ if (s_ctrl->sensor_state != MSM_SENSOR_POWER_UP &&
+ s_ctrl->sensor_state != MSM_SENSOR_CCI_UP) {
+ pr_err("%s:%d failed: invalid state %d\n", __func__,
+ __LINE__, s_ctrl->sensor_state);
+ rc = -EFAULT;
+ break;
+ }
+
if (copy_from_user(stop_setting,
(void __user *)cdata->cfg.setting,
sizeof(struct msm_camera_i2c_reg_setting))) {
@@ -1522,7 +1618,8 @@ static int msm_sensor_power(struct v4l2_subdev *sd, int on)
struct msm_sensor_ctrl_t *s_ctrl = get_sctrl(sd);
mutex_lock(s_ctrl->msm_sensor_mutex);
- if (!on && s_ctrl->sensor_state == MSM_SENSOR_POWER_UP) {
+ if (!on && (s_ctrl->sensor_state == MSM_SENSOR_POWER_UP ||
+ s_ctrl->sensor_state == MSM_SENSOR_CCI_UP)) {
s_ctrl->func_tbl->sensor_power_down(s_ctrl);
s_ctrl->sensor_state = MSM_SENSOR_POWER_DOWN;
}
diff --git a/drivers/media/platform/msm/ais/sensor/msm_sensor.h b/drivers/media/platform/msm/ais/sensor/msm_sensor.h
index b742d06d3baa..ca6de8d0a7a8 100644
--- a/drivers/media/platform/msm/ais/sensor/msm_sensor.h
+++ b/drivers/media/platform/msm/ais/sensor/msm_sensor.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -52,6 +52,8 @@ struct msm_sensor_ctrl_t;
enum msm_sensor_state_t {
MSM_SENSOR_POWER_DOWN,
MSM_SENSOR_POWER_UP,
+ MSM_SENSOR_CCI_DOWN,
+ MSM_SENSOR_CCI_UP,
};
struct msm_sensor_fn_t {
diff --git a/drivers/media/platform/msm/ais/sensor/msm_sensor_driver.c b/drivers/media/platform/msm/ais/sensor/msm_sensor_driver.c
index 5e34016d199c..03ae276d1a6f 100644
--- a/drivers/media/platform/msm/ais/sensor/msm_sensor_driver.c
+++ b/drivers/media/platform/msm/ais/sensor/msm_sensor_driver.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -639,7 +639,7 @@ static irqreturn_t bridge_irq(int irq, void *dev)
{
struct msm_sensor_ctrl_t *s_ctrl = dev;
- pr_err("msm_sensor_driver: received bridge interrupt:0x%x",
+ pr_debug("msm_sensor_driver: received bridge interrupt:0x%x\n",
s_ctrl->sensordata->slave_info->sensor_slave_addr);
schedule_delayed_work(&s_ctrl->irq_delayed_work,
msecs_to_jiffies(0));
@@ -682,7 +682,7 @@ static void bridge_irq_delay_work(struct work_struct *work)
&sensor_event);
mutex_unlock(s_ctrl->msm_sensor_mutex);
exit_queue:
- pr_err("Work IRQ exit");
+ pr_debug("Work IRQ exit\n");
}
/* static function definition */
@@ -947,8 +947,6 @@ CSID_TG:
goto free_camera_info;
}
- pr_err("%s probe succeeded", slave_info->sensor_name);
-
/*
* Update the subdevice id of flash-src based on availability in kernel.
*/
@@ -1009,8 +1007,6 @@ CSID_TG:
pr_err("%s: Failed gpio_direction irq %d",
__func__, rc);
goto cancel_work;
- } else {
- pr_err("sensor probe IRQ direction succeeded");
}
}
@@ -1035,7 +1031,7 @@ CSID_TG:
}
/* Keep irq enabled */
- pr_err("msm_sensor_driver.c irq number = %d", s_ctrl->irq);
+ pr_debug("msm_sensor_driver.c irq number = %d\n", s_ctrl->irq);
}
/*
diff --git a/drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.c b/drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.c
index d881b4aea48f..8e0a7443f98c 100644
--- a/drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.c
+++ b/drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -442,7 +442,7 @@ static int msm_fd_open(struct file *file)
}
ctx->mem_pool.fd_device = ctx->fd_device;
- ctx->stats = vmalloc(sizeof(*ctx->stats) * MSM_FD_MAX_RESULT_BUFS);
+ ctx->stats = vzalloc(sizeof(*ctx->stats) * MSM_FD_MAX_RESULT_BUFS);
if (!ctx->stats) {
dev_err(device->dev, "No memory for face statistics\n");
ret = -ENOMEM;
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c b/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c
index eab56b70e646..20a38925aa10 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -26,6 +26,7 @@
#include <media/v4l2-ioctl.h>
#include <media/v4l2-device.h>
#include <media/videobuf2-core.h>
+#include <media/msmb_generic_buf_mgr.h>
#include "msm.h"
#include "msm_buf_mgr.h"
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
index d336e1ef1bd7..acf0a90ed93d 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -155,9 +155,11 @@ struct msm_vfe_irq_ops {
struct msm_isp_timestamp *ts);
void (*process_axi_irq)(struct vfe_device *vfe_dev,
uint32_t irq_status0, uint32_t irq_status1,
+ uint32_t pingpong_status,
struct msm_isp_timestamp *ts);
void (*process_stats_irq)(struct vfe_device *vfe_dev,
uint32_t irq_status0, uint32_t irq_status1,
+ uint32_t pingpong_status,
struct msm_isp_timestamp *ts);
void (*config_irq)(struct vfe_device *vfe_dev,
uint32_t irq_status0, uint32_t irq_status1,
@@ -596,6 +598,7 @@ struct msm_vfe_tasklet_queue_cmd {
struct list_head list;
uint32_t vfeInterruptStatus0;
uint32_t vfeInterruptStatus1;
+ uint32_t vfe_pingpong_status;
struct msm_isp_timestamp ts;
uint8_t cmd_used;
struct vfe_device *vfe_dev;
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
index 717e3750dcca..850f1b032a8a 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
@@ -1061,15 +1061,18 @@ static int msm_vfe40_start_fetch_engine(struct vfe_device *vfe_dev,
fe_cfg->stream_id);
vfe_dev->fetch_engine_info.bufq_handle = bufq_handle;
+ mutex_lock(&vfe_dev->buf_mgr->lock);
rc = vfe_dev->buf_mgr->ops->get_buf_by_index(
vfe_dev->buf_mgr, bufq_handle, fe_cfg->buf_idx, &buf);
if (rc < 0 || !buf) {
pr_err("%s: No fetch buffer rc= %d buf= %pK\n",
__func__, rc, buf);
+ mutex_unlock(&vfe_dev->buf_mgr->lock);
return -EINVAL;
}
mapped_info = buf->mapped_info[0];
buf->state = MSM_ISP_BUFFER_STATE_DISPATCHED;
+ mutex_unlock(&vfe_dev->buf_mgr->lock);
} else {
rc = vfe_dev->buf_mgr->ops->map_buf(vfe_dev->buf_mgr,
&mapped_info, fe_cfg->fd);
@@ -1122,14 +1125,15 @@ static int msm_vfe40_start_fetch_engine_multi_pass(struct vfe_device *vfe_dev,
mutex_lock(&vfe_dev->buf_mgr->lock);
rc = vfe_dev->buf_mgr->ops->get_buf_by_index(
vfe_dev->buf_mgr, bufq_handle, fe_cfg->buf_idx, &buf);
- mutex_unlock(&vfe_dev->buf_mgr->lock);
if (rc < 0 || !buf) {
pr_err("%s: No fetch buffer rc= %d buf= %pK\n",
__func__, rc, buf);
+ mutex_unlock(&vfe_dev->buf_mgr->lock);
return -EINVAL;
}
mapped_info = buf->mapped_info[0];
buf->state = MSM_ISP_BUFFER_STATE_DISPATCHED;
+ mutex_unlock(&vfe_dev->buf_mgr->lock);
} else {
rc = vfe_dev->buf_mgr->ops->map_buf(vfe_dev->buf_mgr,
&mapped_info, fe_cfg->fd);
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp44.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp44.c
index 1d5035468134..3b8de1a13c88 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp44.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp44.c
@@ -895,13 +895,14 @@ static int msm_vfe44_fetch_engine_start(struct vfe_device *vfe_dev,
mutex_lock(&vfe_dev->buf_mgr->lock);
rc = vfe_dev->buf_mgr->ops->get_buf_by_index(
vfe_dev->buf_mgr, bufq_handle, fe_cfg->buf_idx, &buf);
- mutex_unlock(&vfe_dev->buf_mgr->lock);
if (rc < 0) {
pr_err("%s: No fetch buffer\n", __func__);
+ mutex_unlock(&vfe_dev->buf_mgr->lock);
return -EINVAL;
}
mapped_info = buf->mapped_info[0];
buf->state = MSM_ISP_BUFFER_STATE_DISPATCHED;
+ mutex_unlock(&vfe_dev->buf_mgr->lock);
} else {
rc = vfe_dev->buf_mgr->ops->map_buf(vfe_dev->buf_mgr,
&mapped_info, fe_cfg->fd);
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp46.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp46.c
index 42787c6c47db..f8866b01e617 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp46.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp46.c
@@ -836,14 +836,15 @@ static int msm_vfe46_start_fetch_engine(struct vfe_device *vfe_dev,
mutex_lock(&vfe_dev->buf_mgr->lock);
rc = vfe_dev->buf_mgr->ops->get_buf_by_index(
vfe_dev->buf_mgr, bufq_handle, fe_cfg->buf_idx, &buf);
- mutex_unlock(&vfe_dev->buf_mgr->lock);
if (rc < 0 || !buf) {
pr_err("%s: No fetch buffer rc= %d buf= %pK\n",
__func__, rc, buf);
+ mutex_unlock(&vfe_dev->buf_mgr->lock);
return -EINVAL;
}
mapped_info = buf->mapped_info[0];
buf->state = MSM_ISP_BUFFER_STATE_DISPATCHED;
+ mutex_unlock(&vfe_dev->buf_mgr->lock);
} else {
rc = vfe_dev->buf_mgr->ops->map_buf(vfe_dev->buf_mgr,
&mapped_info, fe_cfg->fd);
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c
index 121f1ab8bcd1..0a969cc897b0 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c
@@ -570,6 +570,7 @@ void msm_vfe47_process_error_status(struct vfe_device *vfe_dev)
void msm_vfe47_read_and_clear_irq_status(struct vfe_device *vfe_dev,
uint32_t *irq_status0, uint32_t *irq_status1)
{
+ uint32_t count = 0;
*irq_status0 = msm_camera_io_r(vfe_dev->vfe_base + 0x6C);
*irq_status1 = msm_camera_io_r(vfe_dev->vfe_base + 0x70);
/* Mask off bits that are not enabled */
@@ -578,6 +579,14 @@ void msm_vfe47_read_and_clear_irq_status(struct vfe_device *vfe_dev,
msm_camera_io_w_mb(1, vfe_dev->vfe_base + 0x58);
*irq_status0 &= vfe_dev->irq0_mask;
*irq_status1 &= vfe_dev->irq1_mask;
+ /* check if status register is cleared if not clear again*/
+ while (*irq_status0 &&
+ (*irq_status0 & msm_camera_io_r(vfe_dev->vfe_base + 0x6C)) &&
+ (count < MAX_RECOVERY_THRESHOLD)) {
+ msm_camera_io_w(*irq_status0, vfe_dev->vfe_base + 0x64);
+ msm_camera_io_w_mb(1, vfe_dev->vfe_base + 0x58);
+ count++;
+ }
if (*irq_status1 & (1 << 0)) {
vfe_dev->error_info.camif_status =
@@ -1095,15 +1104,18 @@ int msm_vfe47_start_fetch_engine(struct vfe_device *vfe_dev,
fe_cfg->stream_id);
vfe_dev->fetch_engine_info.bufq_handle = bufq_handle;
+ mutex_lock(&vfe_dev->buf_mgr->lock);
rc = vfe_dev->buf_mgr->ops->get_buf_by_index(
vfe_dev->buf_mgr, bufq_handle, fe_cfg->buf_idx, &buf);
if (rc < 0 || !buf) {
pr_err("%s: No fetch buffer rc= %d buf= %pK\n",
__func__, rc, buf);
+ mutex_unlock(&vfe_dev->buf_mgr->lock);
return -EINVAL;
}
mapped_info = buf->mapped_info[0];
buf->state = MSM_ISP_BUFFER_STATE_DISPATCHED;
+ mutex_unlock(&vfe_dev->buf_mgr->lock);
} else {
rc = vfe_dev->buf_mgr->ops->map_buf(vfe_dev->buf_mgr,
&mapped_info, fe_cfg->fd);
@@ -1156,14 +1168,15 @@ int msm_vfe47_start_fetch_engine_multi_pass(struct vfe_device *vfe_dev,
mutex_lock(&vfe_dev->buf_mgr->lock);
rc = vfe_dev->buf_mgr->ops->get_buf_by_index(
vfe_dev->buf_mgr, bufq_handle, fe_cfg->buf_idx, &buf);
- mutex_unlock(&vfe_dev->buf_mgr->lock);
if (rc < 0 || !buf) {
pr_err("%s: No fetch buffer rc= %d buf= %pK\n",
__func__, rc, buf);
+ mutex_unlock(&vfe_dev->buf_mgr->lock);
return -EINVAL;
}
mapped_info = buf->mapped_info[0];
buf->state = MSM_ISP_BUFFER_STATE_DISPATCHED;
+ mutex_unlock(&vfe_dev->buf_mgr->lock);
} else {
rc = vfe_dev->buf_mgr->ops->map_buf(vfe_dev->buf_mgr,
&mapped_info, fe_cfg->fd);
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
index 15f8061b9919..925a89601636 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -236,6 +236,7 @@ static int msm_isp_validate_axi_request(struct vfe_device *vfe_dev,
case V4L2_PIX_FMT_META:
case V4L2_PIX_FMT_META10:
case V4L2_PIX_FMT_GREY:
+ case V4L2_PIX_FMT_Y10:
stream_info->num_planes = 1;
stream_info->format_factor = ISP_Q2;
break;
@@ -345,6 +346,7 @@ static uint32_t msm_isp_axi_get_plane_size(
case V4L2_PIX_FMT_QGRBG10:
case V4L2_PIX_FMT_QRGGB10:
case V4L2_PIX_FMT_META10:
+ case V4L2_PIX_FMT_Y10:
/* TODO: fix me */
size = plane_cfg[plane_idx].output_height *
plane_cfg[plane_idx].output_width;
@@ -3095,12 +3097,18 @@ static int msm_isp_start_axi_stream(struct vfe_device *vfe_dev_ioctl,
return -EINVAL;
msm_isp_get_timestamp(&timestamp, vfe_dev_ioctl);
-
+ mutex_lock(&vfe_dev_ioctl->buf_mgr->lock);
for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
if (stream_cfg_cmd->stream_handle[i] == 0)
continue;
stream_info = msm_isp_get_stream_common_data(vfe_dev_ioctl,
HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i]));
+
+ if (!stream_info) {
+ pr_err("%s: stream_info is NULL", __func__);
+ mutex_unlock(&vfe_dev_ioctl->buf_mgr->lock);
+ return -EINVAL;
+ }
if (SRC_TO_INTF(stream_info->stream_src) < VFE_SRC_MAX)
src_state = axi_data->src_info[
SRC_TO_INTF(stream_info->stream_src)].active;
@@ -3108,6 +3116,7 @@ static int msm_isp_start_axi_stream(struct vfe_device *vfe_dev_ioctl,
else {
ISP_DBG("%s: invalid src info index\n", __func__);
rc = -EINVAL;
+ mutex_unlock(&vfe_dev_ioctl->buf_mgr->lock);
goto error;
}
spin_lock_irqsave(&stream_info->lock, flags);
@@ -3119,6 +3128,7 @@ static int msm_isp_start_axi_stream(struct vfe_device *vfe_dev_ioctl,
}
if (rc) {
spin_unlock_irqrestore(&stream_info->lock, flags);
+ mutex_unlock(&vfe_dev_ioctl->buf_mgr->lock);
goto error;
}
@@ -3141,6 +3151,7 @@ static int msm_isp_start_axi_stream(struct vfe_device *vfe_dev_ioctl,
HANDLE_TO_IDX(
stream_cfg_cmd->stream_handle[i]));
spin_unlock_irqrestore(&stream_info->lock, flags);
+ mutex_unlock(&vfe_dev_ioctl->buf_mgr->lock);
goto error;
}
for (k = 0; k < stream_info->num_isp; k++) {
@@ -3199,6 +3210,7 @@ static int msm_isp_start_axi_stream(struct vfe_device *vfe_dev_ioctl,
spin_unlock_irqrestore(&stream_info->lock, flags);
streams[num_streams++] = stream_info;
}
+ mutex_unlock(&vfe_dev_ioctl->buf_mgr->lock);
for (i = 0; i < MAX_VFE; i++) {
vfe_dev = update_vfes[i];
@@ -3722,13 +3734,6 @@ static int msm_isp_stream_axi_cfg_update(struct vfe_device *vfe_dev,
unsigned long flags;
int vfe_idx;
- if (atomic_read(&vfe_dev->axi_data.axi_cfg_update[
- SRC_TO_INTF(stream_info->stream_src)])) {
- pr_err("%s: Update in progress for vfe %d intf %d\n",
- __func__, vfe_dev->pdev->id,
- SRC_TO_INTF(stream_info->stream_src));
- return -EINVAL;
- }
spin_lock_irqsave(&stream_info->lock, flags);
if (stream_info->state != ACTIVE) {
spin_unlock_irqrestore(&stream_info->lock, flags);
@@ -3951,10 +3956,12 @@ int msm_isp_update_axi_stream(struct vfe_device *vfe_dev, void *arg)
&update_cmd->update_info[i];
stream_info = msm_isp_get_stream_common_data(vfe_dev,
HANDLE_TO_IDX(update_info->stream_handle));
+ mutex_lock(&vfe_dev->buf_mgr->lock);
rc = msm_isp_request_frame(vfe_dev, stream_info,
update_info->user_stream_id,
update_info->frame_id,
MSM_ISP_INVALID_BUF_INDEX);
+ mutex_unlock(&vfe_dev->buf_mgr->lock);
if (rc)
pr_err("%s failed to request frame!\n",
__func__);
@@ -4000,10 +4007,12 @@ int msm_isp_update_axi_stream(struct vfe_device *vfe_dev, void *arg)
rc = -EINVAL;
break;
}
+ mutex_lock(&vfe_dev->buf_mgr->lock);
rc = msm_isp_request_frame(vfe_dev, stream_info,
req_frm->user_stream_id,
req_frm->frame_id,
req_frm->buf_index);
+ mutex_unlock(&vfe_dev->buf_mgr->lock);
if (rc)
pr_err("%s failed to request frame!\n",
__func__);
@@ -4202,11 +4211,11 @@ void msm_isp_process_axi_irq_stream(struct vfe_device *vfe_dev,
void msm_isp_process_axi_irq(struct vfe_device *vfe_dev,
uint32_t irq_status0, uint32_t irq_status1,
- struct msm_isp_timestamp *ts)
+ uint32_t pingpong_status, struct msm_isp_timestamp *ts)
{
int i, rc = 0;
uint32_t comp_mask = 0, wm_mask = 0;
- uint32_t pingpong_status, stream_idx;
+ uint32_t stream_idx;
struct msm_vfe_axi_stream *stream_info;
struct msm_vfe_axi_composite_info *comp_info;
struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
@@ -4220,8 +4229,6 @@ void msm_isp_process_axi_irq(struct vfe_device *vfe_dev,
return;
ISP_DBG("%s: status: 0x%x\n", __func__, irq_status0);
- pingpong_status =
- vfe_dev->hw_info->vfe_ops.axi_ops.get_pingpong_status(vfe_dev);
for (i = 0; i < axi_data->hw_info->num_comp_mask; i++) {
rc = 0;
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.h
index 0f029c0d5178..9794db5a1b9c 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.h
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -54,7 +54,7 @@ void msm_isp_notify(struct vfe_device *vfe_dev, uint32_t event_type,
void msm_isp_process_axi_irq(struct vfe_device *vfe_dev,
uint32_t irq_status0, uint32_t irq_status1,
- struct msm_isp_timestamp *ts);
+ uint32_t pingpong_status, struct msm_isp_timestamp *ts);
void msm_isp_axi_disable_all_wm(struct vfe_device *vfe_dev);
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c
index f0831e64f250..3e8220005f77 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -256,13 +256,12 @@ static int32_t msm_isp_stats_buf_divert(struct vfe_device *vfe_dev,
static int32_t msm_isp_stats_configure(struct vfe_device *vfe_dev,
uint32_t stats_irq_mask, struct msm_isp_timestamp *ts,
- bool is_composite)
+ uint32_t pingpong_status, bool is_composite)
{
int i, rc = 0;
struct msm_isp_event_data buf_event;
struct msm_isp_stats_event *stats_event = &buf_event.u.stats;
struct msm_vfe_stats_stream *stream_info = NULL;
- uint32_t pingpong_status;
uint32_t comp_stats_type_mask = 0;
int result = 0;
@@ -271,8 +270,6 @@ static int32_t msm_isp_stats_configure(struct vfe_device *vfe_dev,
buf_event.mono_timestamp = ts->buf_time;
buf_event.frame_id = vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
- pingpong_status = vfe_dev->hw_info->
- vfe_ops.stats_ops.get_pingpong_status(vfe_dev);
for (i = 0; i < vfe_dev->hw_info->stats_hw_info->num_stats_type; i++) {
if (!(stats_irq_mask & (1 << i)))
@@ -309,7 +306,7 @@ static int32_t msm_isp_stats_configure(struct vfe_device *vfe_dev,
void msm_isp_process_stats_irq(struct vfe_device *vfe_dev,
uint32_t irq_status0, uint32_t irq_status1,
- struct msm_isp_timestamp *ts)
+ uint32_t pingpong_status, struct msm_isp_timestamp *ts)
{
int j, rc;
uint32_t atomic_stats_mask = 0;
@@ -337,7 +334,7 @@ void msm_isp_process_stats_irq(struct vfe_device *vfe_dev,
/* Process non-composite irq */
if (stats_irq_mask) {
rc = msm_isp_stats_configure(vfe_dev, stats_irq_mask, ts,
- comp_flag);
+ pingpong_status, comp_flag);
}
/* Process composite irq */
@@ -350,7 +347,7 @@ void msm_isp_process_stats_irq(struct vfe_device *vfe_dev,
&vfe_dev->stats_data.stats_comp_mask[j]);
rc = msm_isp_stats_configure(vfe_dev, atomic_stats_mask,
- ts, !comp_flag);
+ ts, pingpong_status, !comp_flag);
}
}
}
@@ -1105,6 +1102,7 @@ static int msm_isp_start_stats_stream(struct vfe_device *vfe_dev_ioctl,
struct vfe_device *vfe_dev;
msm_isp_get_timestamp(&timestamp, vfe_dev_ioctl);
+ mutex_lock(&vfe_dev_ioctl->buf_mgr->lock);
num_stats_comp_mask =
vfe_dev_ioctl->hw_info->stats_hw_info->num_stats_comp_mask;
@@ -1123,6 +1121,7 @@ static int msm_isp_start_stats_stream(struct vfe_device *vfe_dev_ioctl,
}
if (rc) {
spin_unlock_irqrestore(&stream_info->lock, flags);
+ mutex_unlock(&vfe_dev_ioctl->buf_mgr->lock);
goto error;
}
rc = msm_isp_init_stats_ping_pong_reg(
@@ -1130,6 +1129,7 @@ static int msm_isp_start_stats_stream(struct vfe_device *vfe_dev_ioctl,
if (rc < 0) {
spin_unlock_irqrestore(&stream_info->lock, flags);
pr_err("%s: No buffer for stream%d\n", __func__, idx);
+ mutex_unlock(&vfe_dev_ioctl->buf_mgr->lock);
return rc;
}
init_completion(&stream_info->active_comp);
@@ -1164,6 +1164,7 @@ static int msm_isp_start_stats_stream(struct vfe_device *vfe_dev_ioctl,
stats_data->num_active_stream);
streams[num_stream++] = stream_info;
}
+ mutex_unlock(&vfe_dev_ioctl->buf_mgr->lock);
for (k = 0; k < MAX_VFE; k++) {
if (!update_vfes[k] || num_active_streams[k])
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.h
index 2e3a24dd1f0d..3efd5b57a029 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.h
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2016, 2018 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -17,7 +17,7 @@
void msm_isp_process_stats_irq(struct vfe_device *vfe_dev,
uint32_t irq_status0, uint32_t irq_status1,
- struct msm_isp_timestamp *ts);
+ uint32_t pingpong_status, struct msm_isp_timestamp *ts);
void msm_isp_stats_stream_update(struct vfe_device *vfe_dev);
int msm_isp_cfg_stats_stream(struct vfe_device *vfe_dev, void *arg);
int msm_isp_update_stats_stream(struct vfe_device *vfe_dev, void *arg);
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
index 242ed33b2b29..684b331d9ac4 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
@@ -406,8 +406,10 @@ static int msm_isp_start_fetch_engine_multi_pass(struct vfe_device *vfe_dev,
0, 1);
msm_isp_reset_framedrop(vfe_dev, stream_info);
+ mutex_lock(&vfe_dev->buf_mgr->lock);
rc = msm_isp_cfg_offline_ping_pong_address(vfe_dev, stream_info,
VFE_PING_FLAG, fe_cfg->output_buf_idx);
+ mutex_unlock(&vfe_dev->buf_mgr->lock);
if (rc < 0) {
pr_err("%s: Fetch engine config failed\n", __func__);
return -EINVAL;
@@ -925,6 +927,7 @@ static long msm_isp_ioctl_unlocked(struct v4l2_subdev *sd,
case VIDIOC_MSM_ISP_AXI_RESTART:
mutex_lock(&vfe_dev->core_mutex);
MSM_ISP_DUAL_VFE_MUTEX_LOCK(vfe_dev);
+ mutex_lock(&vfe_dev->buf_mgr->lock);
if (atomic_read(&vfe_dev->error_info.overflow_state)
!= HALT_ENFORCED) {
rc = msm_isp_stats_restart(vfe_dev);
@@ -935,6 +938,7 @@ static long msm_isp_ioctl_unlocked(struct v4l2_subdev *sd,
pr_err_ratelimited("%s: no AXI restart, halt enforced.\n",
__func__);
}
+ mutex_unlock(&vfe_dev->buf_mgr->lock);
MSM_ISP_DUAL_VFE_MUTEX_UNLOCK(vfe_dev);
mutex_unlock(&vfe_dev->core_mutex);
break;
@@ -2064,7 +2068,8 @@ void msm_isp_prepare_tasklet_debug_info(struct vfe_device *vfe_dev,
}
static void msm_isp_enqueue_tasklet_cmd(struct vfe_device *vfe_dev,
- uint32_t irq_status0, uint32_t irq_status1)
+ uint32_t irq_status0, uint32_t irq_status1,
+ uint32_t ping_pong_status)
{
unsigned long flags;
struct msm_vfe_tasklet_queue_cmd *queue_cmd = NULL;
@@ -2087,8 +2092,8 @@ static void msm_isp_enqueue_tasklet_cmd(struct vfe_device *vfe_dev,
}
queue_cmd->vfeInterruptStatus0 = irq_status0;
queue_cmd->vfeInterruptStatus1 = irq_status1;
+ queue_cmd->vfe_pingpong_status = ping_pong_status;
msm_isp_get_timestamp(&queue_cmd->ts, vfe_dev);
-
queue_cmd->cmd_used = 1;
queue_cmd->vfe_dev = vfe_dev;
@@ -2102,7 +2107,7 @@ static void msm_isp_enqueue_tasklet_cmd(struct vfe_device *vfe_dev,
irqreturn_t msm_isp_process_irq(int irq_num, void *data)
{
struct vfe_device *vfe_dev = (struct vfe_device *) data;
- uint32_t irq_status0, irq_status1;
+ uint32_t irq_status0, irq_status1, ping_pong_status;
uint32_t error_mask0, error_mask1;
vfe_dev->hw_info->vfe_ops.irq_ops.
@@ -2113,6 +2118,8 @@ irqreturn_t msm_isp_process_irq(int irq_num, void *data)
__func__, vfe_dev->pdev->id);
return IRQ_HANDLED;
}
+ ping_pong_status = vfe_dev->hw_info->vfe_ops.axi_ops.
+ get_pingpong_status(vfe_dev);
if (vfe_dev->hw_info->vfe_ops.irq_ops.preprocess_camif_irq) {
vfe_dev->hw_info->vfe_ops.irq_ops.preprocess_camif_irq(
vfe_dev, irq_status0);
@@ -2140,7 +2147,8 @@ irqreturn_t msm_isp_process_irq(int irq_num, void *data)
return IRQ_HANDLED;
}
msm_isp_prepare_irq_debug_info(vfe_dev, irq_status0, irq_status1);
- msm_isp_enqueue_tasklet_cmd(vfe_dev, irq_status0, irq_status1);
+ msm_isp_enqueue_tasklet_cmd(vfe_dev, irq_status0, irq_status1,
+ ping_pong_status);
return IRQ_HANDLED;
}
@@ -2153,7 +2161,7 @@ void msm_isp_do_tasklet(unsigned long data)
struct msm_vfe_irq_ops *irq_ops;
struct msm_vfe_tasklet_queue_cmd *queue_cmd;
struct msm_isp_timestamp ts;
- uint32_t irq_status0, irq_status1;
+ uint32_t irq_status0, irq_status1, pingpong_status;
while (1) {
spin_lock_irqsave(&tasklet->tasklet_lock, flags);
@@ -2169,6 +2177,7 @@ void msm_isp_do_tasklet(unsigned long data)
queue_cmd->vfe_dev = NULL;
irq_status0 = queue_cmd->vfeInterruptStatus0;
irq_status1 = queue_cmd->vfeInterruptStatus1;
+ pingpong_status = queue_cmd->vfe_pingpong_status;
ts = queue_cmd->ts;
spin_unlock_irqrestore(&tasklet->tasklet_lock, flags);
if (vfe_dev->vfe_open_cnt == 0) {
@@ -2193,9 +2202,11 @@ void msm_isp_do_tasklet(unsigned long data)
}
msm_isp_process_error_info(vfe_dev);
irq_ops->process_stats_irq(vfe_dev,
- irq_status0, irq_status1, &ts);
+ irq_status0, irq_status1,
+ pingpong_status, &ts);
irq_ops->process_axi_irq(vfe_dev,
- irq_status0, irq_status1, &ts);
+ irq_status0, irq_status1,
+ pingpong_status, &ts);
irq_ops->process_camif_irq(vfe_dev,
irq_status0, irq_status1, &ts);
irq_ops->process_reg_update(vfe_dev,
diff --git a/drivers/media/platform/msm/camera_v2/msm_buf_mgr/msm_generic_buf_mgr.c b/drivers/media/platform/msm/camera_v2/msm_buf_mgr/msm_generic_buf_mgr.c
index f732f5180e81..58bfdb77a492 100644
--- a/drivers/media/platform/msm/camera_v2/msm_buf_mgr/msm_generic_buf_mgr.c
+++ b/drivers/media/platform/msm/camera_v2/msm_buf_mgr/msm_generic_buf_mgr.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -166,6 +166,33 @@ static int32_t msm_buf_mngr_buf_done(struct msm_buf_mngr_device *buf_mngr_dev,
return ret;
}
+static int32_t msm_buf_mngr_buf_error(struct msm_buf_mngr_device *buf_mngr_dev,
+ struct msm_buf_mngr_info *buf_info)
+{
+ unsigned long flags;
+ struct msm_get_bufs *bufs, *save;
+ int32_t ret = -EINVAL;
+
+ spin_lock_irqsave(&buf_mngr_dev->buf_q_spinlock, flags);
+ list_for_each_entry_safe(bufs, save, &buf_mngr_dev->buf_qhead, entry) {
+ if ((bufs->session_id == buf_info->session_id) &&
+ (bufs->stream_id == buf_info->stream_id) &&
+ (bufs->index == buf_info->index)) {
+ ret = buf_mngr_dev->vb2_ops.buf_error
+ (bufs->vb2_v4l2_buf,
+ buf_info->session_id,
+ buf_info->stream_id,
+ buf_info->frame_id,
+ &buf_info->timestamp,
+ buf_info->reserved);
+ list_del_init(&bufs->entry);
+ kfree(bufs);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&buf_mngr_dev->buf_q_spinlock, flags);
+ return ret;
+}
static int32_t msm_buf_mngr_put_buf(struct msm_buf_mngr_device *buf_mngr_dev,
struct msm_buf_mngr_info *buf_info)
@@ -473,6 +500,9 @@ int msm_cam_buf_mgr_ops(unsigned int cmd, void *argp)
case VIDIOC_MSM_BUF_MNGR_BUF_DONE:
rc = msm_buf_mngr_buf_done(msm_buf_mngr_dev, argp);
break;
+ case VIDIOC_MSM_BUF_MNGR_BUF_ERROR:
+ rc = msm_buf_mngr_buf_error(msm_buf_mngr_dev, argp);
+ break;
case VIDIOC_MSM_BUF_MNGR_PUT_BUF:
rc = msm_buf_mngr_put_buf(msm_buf_mngr_dev, argp);
break;
@@ -571,6 +601,7 @@ static long msm_buf_mngr_subdev_ioctl(struct v4l2_subdev *sd,
case VIDIOC_MSM_BUF_MNGR_GET_BUF:
case VIDIOC_MSM_BUF_MNGR_BUF_DONE:
case VIDIOC_MSM_BUF_MNGR_PUT_BUF:
+ case VIDIOC_MSM_BUF_MNGR_BUF_ERROR:
rc = msm_cam_buf_mgr_ops(cmd, argp);
break;
case VIDIOC_MSM_BUF_MNGR_INIT:
@@ -719,6 +750,9 @@ static long msm_bmgr_subdev_fops_compat_ioctl(struct file *file,
case VIDIOC_MSM_BUF_MNGR_BUF_DONE32:
cmd = VIDIOC_MSM_BUF_MNGR_BUF_DONE;
break;
+ case VIDIOC_MSM_BUF_MNGR_BUF_ERROR32:
+ cmd = VIDIOC_MSM_BUF_MNGR_BUF_ERROR;
+ break;
case VIDIOC_MSM_BUF_MNGR_PUT_BUF32:
cmd = VIDIOC_MSM_BUF_MNGR_PUT_BUF;
break;
@@ -737,6 +771,7 @@ static long msm_bmgr_subdev_fops_compat_ioctl(struct file *file,
switch (cmd) {
case VIDIOC_MSM_BUF_MNGR_GET_BUF:
case VIDIOC_MSM_BUF_MNGR_BUF_DONE:
+ case VIDIOC_MSM_BUF_MNGR_BUF_ERROR:
case VIDIOC_MSM_BUF_MNGR_FLUSH:
case VIDIOC_MSM_BUF_MNGR_PUT_BUF: {
struct msm_buf_mngr_info32_t buf_info32;
diff --git a/drivers/media/platform/msm/camera_v2/msm_sd.h b/drivers/media/platform/msm/camera_v2/msm_sd.h
index d893d9fc07e3..3d5d3e03632e 100644
--- a/drivers/media/platform/msm/camera_v2/msm_sd.h
+++ b/drivers/media/platform/msm/camera_v2/msm_sd.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2016, 2018 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -81,6 +81,9 @@ struct msm_sd_req_vb2_q {
unsigned int stream_id, uint32_t sequence, struct timeval *ts,
uint32_t reserved);
int (*flush_buf)(int session_id, unsigned int stream_id);
+ int (*buf_error)(struct vb2_v4l2_buffer *vb2_v4l2_buf, int session_id,
+ unsigned int stream_id, uint32_t sequence, struct timeval *ts,
+ uint32_t reserved);
};
#define MSM_SD_NOTIFY_GET_SD 0x00000001
diff --git a/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.c b/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.c
index e271c7fcd1b6..f2b048e37319 100644
--- a/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.c
+++ b/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -457,6 +457,67 @@ static int msm_vb2_buf_done(struct vb2_v4l2_buffer *vb, int session_id,
return rc;
}
+static int msm_vb2_buf_error(struct vb2_v4l2_buffer *vb, int session_id,
+ unsigned int stream_id, uint32_t sequence,
+ struct timeval *ts, uint32_t buf_type)
+{
+ unsigned long flags, rl_flags;
+ struct msm_vb2_buffer *msm_vb2;
+ struct msm_stream *stream;
+ struct msm_session *session;
+ struct vb2_v4l2_buffer *vb2_v4l2_buf = NULL;
+ int rc = 0;
+
+ session = msm_get_session(session_id);
+ if (IS_ERR_OR_NULL(session))
+ return -EINVAL;
+
+ read_lock_irqsave(&session->stream_rwlock, rl_flags);
+
+ stream = msm_get_stream(session, stream_id);
+ if (IS_ERR_OR_NULL(stream)) {
+ read_unlock_irqrestore(&session->stream_rwlock, rl_flags);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&stream->stream_lock, flags);
+ if (vb) {
+ list_for_each_entry(msm_vb2, &(stream->queued_list), list) {
+ vb2_v4l2_buf = &(msm_vb2->vb2_v4l2_buf);
+ if (vb2_v4l2_buf == vb)
+ break;
+ }
+ if (vb2_v4l2_buf != vb) {
+ pr_err("VB buffer is INVALID ses_id=%d, str_id=%d, vb=%pK\n",
+ session_id, stream_id, vb);
+ spin_unlock_irqrestore(&stream->stream_lock, flags);
+ read_unlock_irqrestore(&session->stream_rwlock,
+ rl_flags);
+ return -EINVAL;
+ }
+ msm_vb2 =
+ container_of(vb2_v4l2_buf, struct msm_vb2_buffer,
+ vb2_v4l2_buf);
+ /* put buf before buf done */
+ if (msm_vb2->in_freeq) {
+ vb2_v4l2_buf->sequence = sequence;
+ vb2_v4l2_buf->timestamp = *ts;
+ vb2_buffer_done(&vb2_v4l2_buf->vb2_buf,
+ VB2_BUF_STATE_ERROR);
+ msm_vb2->in_freeq = 0;
+ rc = 0;
+ } else
+ rc = -EINVAL;
+ } else {
+ pr_err(" VB buffer is NULL for ses_id=%d, str_id=%d\n",
+ session_id, stream_id);
+ rc = -EINVAL;
+ }
+ spin_unlock_irqrestore(&stream->stream_lock, flags);
+ read_unlock_irqrestore(&session->stream_rwlock, rl_flags);
+ return rc;
+}
+
long msm_vb2_return_buf_by_idx(int session_id, unsigned int stream_id,
uint32_t index)
{
@@ -555,6 +616,7 @@ int msm_vb2_request_cb(struct msm_sd_req_vb2_q *req)
req->put_buf = msm_vb2_put_buf;
req->buf_done = msm_vb2_buf_done;
req->flush_buf = msm_vb2_flush_buf;
+ req->buf_error = msm_vb2_buf_error;
return 0;
}
diff --git a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
index 25fc34b26bc1..53a01aff4bdd 100644
--- a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
+++ b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1624,6 +1624,7 @@ static int msm_cpp_buffer_ops(struct cpp_device *cpp_dev,
case VIDIOC_MSM_BUF_MNGR_PUT_BUF:
case VIDIOC_MSM_BUF_MNGR_BUF_DONE:
case VIDIOC_MSM_BUF_MNGR_GET_BUF:
+ case VIDIOC_MSM_BUF_MNGR_BUF_ERROR:
default: {
struct msm_buf_mngr_info *buff_mgr_info =
(struct msm_buf_mngr_info *)arg;
@@ -3617,7 +3618,7 @@ STREAM_BUFF_END:
break;
}
buff_mgr_info.frame_id = frame_info.frame_id;
- rc = msm_cpp_buffer_ops(cpp_dev, VIDIOC_MSM_BUF_MNGR_BUF_DONE,
+ rc = msm_cpp_buffer_ops(cpp_dev, VIDIOC_MSM_BUF_MNGR_BUF_ERROR,
0x0, &buff_mgr_info);
if (rc < 0) {
pr_err("error in buf done\n");
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_3_5_hwreg.h b/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_3_5_hwreg.h
index 6ed5c5c7dbce..4f55f4bc7c4a 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_3_5_hwreg.h
+++ b/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_3_5_hwreg.h
@@ -14,7 +14,6 @@
#define MSM_CSIPHY_3_5_HWREG_H
#define ULPM_WAKE_UP_TIMER_MODE 2
-#define GLITCH_ELIMINATION_NUM 0x12 /* bit [6:4] */
#include <sensor/csiphy/msm_csiphy.h>
@@ -47,13 +46,13 @@ struct csiphy_reg_3ph_parms_t csiphy_v3_5_3ph = {
{0x138, 0x0},
{0x13C, 0x10},
{0x140, 0x1},
- {0x144, GLITCH_ELIMINATION_NUM},
+ {0x144, 0x32},
{0x148, 0xFE},
{0x14C, 0x1},
{0x154, 0x0},
{0x15C, 0x23},
{0x160, ULPM_WAKE_UP_TIMER_MODE},
- {0x164, 0x48},
+ {0x164, 0x50},
{0x168, 0x70},
{0x16C, 0x17},
{0x170, 0x41},
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c
index a8d7c1f8b489..4f7a62716810 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -52,6 +52,7 @@
#define MAX_DPHY_DATA_LN 4
#define CLOCK_OFFSET 0x700
#define CSIPHY_SOF_DEBUG_COUNT 2
+#define GBPS 1000000000
#undef CDBG
#define CDBG(fmt, args...) pr_debug(fmt, ##args)
@@ -134,8 +135,10 @@ static int msm_csiphy_3phase_lane_config(
uint8_t i = 0;
uint16_t lane_mask = 0, lane_enable = 0, temp;
void __iomem *csiphybase;
+ uint64_t two_gbps = 0;
csiphybase = csiphy_dev->base;
+ two_gbps = 2 * (uint64_t)csiphy_params->lane_cnt * GBPS;
lane_mask = csiphy_params->lane_mask & 0x7;
while (lane_mask != 0) {
temp = (i << 1)+1;
@@ -281,11 +284,20 @@ static int msm_csiphy_3phase_lane_config(
csiphy_3ph_reg.mipi_csiphy_3ph_lnn_ctrl51.addr +
0x200*i);
}
- msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
- mipi_csiphy_3ph_lnn_ctrl25.data,
- csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
- mipi_csiphy_3ph_lnn_ctrl25.addr + 0x200*i);
+ if ((csiphy_dev->hw_version == CSIPHY_VERSION_V35) &&
+ (csiphy_params->data_rate > two_gbps)) {
+ msm_camera_io_w(0x40,
+ csiphybase +
+ csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl25.addr + 0x200*i);
+ } else {
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl25.data,
+ csiphybase +
+ csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl25.addr + 0x200*i);
+ }
lane_mask >>= 1;
i++;
}
@@ -797,10 +809,10 @@ static int msm_csiphy_lane_config(struct csiphy_device *csiphy_dev,
ratio = csiphy_dev->csiphy_max_clk/clk_rate;
csiphy_params->settle_cnt = csiphy_params->settle_cnt/ratio;
}
- CDBG("%s csiphy_params, mask = 0x%x cnt = %d\n",
+ CDBG("%s csiphy_params, mask = 0x%x cnt = %d, data rate = %llu\n",
__func__,
csiphy_params->lane_mask,
- csiphy_params->lane_cnt);
+ csiphy_params->lane_cnt, csiphy_params->data_rate);
CDBG("%s csiphy_params, settle cnt = 0x%x csid %d\n",
__func__, csiphy_params->settle_cnt,
csiphy_params->csid_core);
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
index 422c7a590a45..2dbf02e1e47a 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
@@ -540,7 +540,7 @@ static int _sde_rotator_secure_session_ctrl(bool enable)
{
struct sde_rot_data_type *mdata = sde_rot_get_mdata();
uint32_t sid_info;
- struct scm_desc desc;
+ struct scm_desc desc = {0};
unsigned int resp = 0;
int ret = 0;
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
index 1966fa9805c0..a2381557070d 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -482,6 +482,11 @@ static ssize_t sde_rot_evtlog_dump_read(struct file *file, char __user *buff,
if (__sde_rot_evtlog_dump_calc_range()) {
len = sde_rot_evtlog_dump_entry(evtlog_buf,
SDE_ROT_EVTLOG_BUF_MAX);
+ if (len < 0 || len > count) {
+ pr_err("len is more than the user buffer size\n");
+ return 0;
+ }
+
if (copy_to_user(buff, evtlog_buf, len))
return -EFAULT;
*ppos += len;
diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c
index 02bfc459614f..c288568edfbd 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index f92687f0a2fa..3af6e53b21e7 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1135,8 +1135,7 @@ static void handle_event_change(enum hal_command_response cmd, void *data)
case HAL_EVENT_SEQ_CHANGED_SUFFICIENT_RESOURCES:
rc = msm_comm_g_ctrl_for_id(inst,
V4L2_CID_MPEG_VIDC_VIDEO_CONTINUE_DATA_TRANSFER);
- if (!is_thumbnail_session(inst) &&
- (IS_ERR_VALUE(rc) || rc == false))
+ if ((IS_ERR_VALUE(rc) || rc == false))
event = V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT;
else
event = V4L2_EVENT_SEQ_CHANGED_SUFFICIENT;
@@ -3117,7 +3116,7 @@ static int set_output_buffers(struct msm_vidc_inst *inst,
{
int rc = 0;
struct msm_smem *handle;
- struct internal_buf *binfo;
+ struct internal_buf *binfo = NULL;
u32 smem_flags = 0, buffer_size;
struct hal_buffer_requirements *output_buf, *extradata_buf;
int i;
@@ -3223,10 +3222,10 @@ static int set_output_buffers(struct msm_vidc_inst *inst,
}
return rc;
fail_set_buffers:
- kfree(binfo);
-fail_kzalloc:
msm_comm_smem_free(inst, handle);
err_no_mem:
+ kfree(binfo);
+fail_kzalloc:
return rc;
}
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.h b/drivers/media/platform/msm/vidc/msm_vidc_common.h
index 5658df95db26..ef1898dd36c3 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_internal.h b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
index 4cb900bbca10..2a4033598bd2 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_internal.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
diff --git a/drivers/media/platform/soc_camera/soc_scale_crop.c b/drivers/media/platform/soc_camera/soc_scale_crop.c
index bda29bc1b933..2f74a5ac0147 100644
--- a/drivers/media/platform/soc_camera/soc_scale_crop.c
+++ b/drivers/media/platform/soc_camera/soc_scale_crop.c
@@ -405,3 +405,7 @@ void soc_camera_calc_client_output(struct soc_camera_device *icd,
mf->height = soc_camera_shift_scale(rect->height, shift, scale_v);
}
EXPORT_SYMBOL(soc_camera_calc_client_output);
+
+MODULE_DESCRIPTION("soc-camera scaling-cropping functions");
+MODULE_AUTHOR("Guennadi Liakhovetski <kernel@pengutronix.de>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
index 8490a65ae1c6..a43404cad3e3 100644
--- a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
+++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
@@ -83,7 +83,7 @@ static void c8sectpfe_timer_interrupt(unsigned long ac8sectpfei)
static void channel_swdemux_tsklet(unsigned long data)
{
struct channel_info *channel = (struct channel_info *)data;
- struct c8sectpfei *fei = channel->fei;
+ struct c8sectpfei *fei;
unsigned long wp, rp;
int pos, num_packets, n, size;
u8 *buf;
@@ -91,6 +91,8 @@ static void channel_swdemux_tsklet(unsigned long data)
if (unlikely(!channel || !channel->irec))
return;
+ fei = channel->fei;
+
wp = readl(channel->irec + DMA_PRDS_BUSWP_TP(0));
rp = readl(channel->irec + DMA_PRDS_BUSRP_TP(0));
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index f838d9c7ed12..0fba4a2c1602 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -1370,8 +1370,13 @@ static int mceusb_dev_probe(struct usb_interface *intf,
goto rc_dev_fail;
/* wire up inbound data handler */
- usb_fill_int_urb(ir->urb_in, dev, pipe, ir->buf_in, maxp,
- mceusb_dev_recv, ir, ep_in->bInterval);
+ if (usb_endpoint_xfer_int(ep_in))
+ usb_fill_int_urb(ir->urb_in, dev, pipe, ir->buf_in, maxp,
+ mceusb_dev_recv, ir, ep_in->bInterval);
+ else
+ usb_fill_bulk_urb(ir->urb_in, dev, pipe, ir->buf_in, maxp,
+ mceusb_dev_recv, ir);
+
ir->urb_in->transfer_dma = ir->dma_in;
ir->urb_in->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
diff --git a/drivers/media/tuners/r820t.c b/drivers/media/tuners/r820t.c
index a7a8452e99d2..c1ce8d3ce877 100644
--- a/drivers/media/tuners/r820t.c
+++ b/drivers/media/tuners/r820t.c
@@ -410,9 +410,11 @@ static int r820t_write(struct r820t_priv *priv, u8 reg, const u8 *val,
return 0;
}
-static int r820t_write_reg(struct r820t_priv *priv, u8 reg, u8 val)
+static inline int r820t_write_reg(struct r820t_priv *priv, u8 reg, u8 val)
{
- return r820t_write(priv, reg, &val, 1);
+ u8 tmp = val; /* work around GCC PR81715 with asan-stack=1 */
+
+ return r820t_write(priv, reg, &tmp, 1);
}
static int r820t_read_cache_reg(struct r820t_priv *priv, int reg)
@@ -425,17 +427,18 @@ static int r820t_read_cache_reg(struct r820t_priv *priv, int reg)
return -EINVAL;
}
-static int r820t_write_reg_mask(struct r820t_priv *priv, u8 reg, u8 val,
+static inline int r820t_write_reg_mask(struct r820t_priv *priv, u8 reg, u8 val,
u8 bit_mask)
{
+ u8 tmp = val;
int rc = r820t_read_cache_reg(priv, reg);
if (rc < 0)
return rc;
- val = (rc & ~bit_mask) | (val & bit_mask);
+ tmp = (rc & ~bit_mask) | (tmp & bit_mask);
- return r820t_write(priv, reg, &val, 1);
+ return r820t_write(priv, reg, &tmp, 1);
}
static int r820t_read(struct r820t_priv *priv, u8 reg, u8 *val, int len)
diff --git a/drivers/media/usb/cpia2/cpia2_v4l.c b/drivers/media/usb/cpia2/cpia2_v4l.c
index 9caea8344547..d793c630f1dd 100644
--- a/drivers/media/usb/cpia2/cpia2_v4l.c
+++ b/drivers/media/usb/cpia2/cpia2_v4l.c
@@ -812,7 +812,7 @@ static int cpia2_querybuf(struct file *file, void *fh, struct v4l2_buffer *buf)
struct camera_data *cam = video_drvdata(file);
if(buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
- buf->index > cam->num_frames)
+ buf->index >= cam->num_frames)
return -EINVAL;
buf->m.offset = cam->buffers[buf->index].data - cam->frame_buffer;
@@ -863,7 +863,7 @@ static int cpia2_qbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
if(buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
buf->memory != V4L2_MEMORY_MMAP ||
- buf->index > cam->num_frames)
+ buf->index >= cam->num_frames)
return -EINVAL;
DBG("QBUF #%d\n", buf->index);
diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c
index 3721ee63b8fb..09c97847bf95 100644
--- a/drivers/media/usb/dvb-usb-v2/lmedm04.c
+++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c
@@ -503,18 +503,23 @@ static int lme2510_pid_filter(struct dvb_usb_adapter *adap, int index, u16 pid,
static int lme2510_return_status(struct dvb_usb_device *d)
{
- int ret = 0;
+ int ret;
u8 *data;
- data = kzalloc(10, GFP_KERNEL);
+ data = kzalloc(6, GFP_KERNEL);
if (!data)
return -ENOMEM;
- ret |= usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0),
- 0x06, 0x80, 0x0302, 0x00, data, 0x0006, 200);
- info("Firmware Status: %x (%x)", ret , data[2]);
+ ret = usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0),
+ 0x06, 0x80, 0x0302, 0x00,
+ data, 0x6, 200);
+ if (ret != 6)
+ ret = -EINVAL;
+ else
+ ret = data[2];
+
+ info("Firmware Status: %6ph", data);
- ret = (ret < 0) ? -ENODEV : data[2];
kfree(data);
return ret;
}
@@ -1078,8 +1083,6 @@ static int dm04_lme2510_frontend_attach(struct dvb_usb_adapter *adap)
if (adap->fe[0]) {
info("FE Found M88RS2000");
- dvb_attach(ts2020_attach, adap->fe[0], &ts2020_config,
- &d->i2c_adap);
st->i2c_tuner_gate_w = 5;
st->i2c_tuner_gate_r = 5;
st->i2c_tuner_addr = 0x60;
@@ -1145,17 +1148,18 @@ static int dm04_lme2510_tuner(struct dvb_usb_adapter *adap)
ret = st->tuner_config;
break;
case TUNER_RS2000:
- ret = st->tuner_config;
+ if (dvb_attach(ts2020_attach, adap->fe[0],
+ &ts2020_config, &d->i2c_adap))
+ ret = st->tuner_config;
break;
default:
break;
}
- if (ret)
+ if (ret) {
info("TUN Found %s tuner", tun_msg[ret]);
- else {
- info("TUN No tuner found --- resetting device");
- lme_coldreset(d);
+ } else {
+ info("TUN No tuner found");
return -ENODEV;
}
@@ -1199,6 +1203,7 @@ static int lme2510_get_adapter_count(struct dvb_usb_device *d)
static int lme2510_identify_state(struct dvb_usb_device *d, const char **name)
{
struct lme2510_state *st = d->priv;
+ int status;
usb_reset_configuration(d->udev);
@@ -1207,12 +1212,16 @@ static int lme2510_identify_state(struct dvb_usb_device *d, const char **name)
st->dvb_usb_lme2510_firmware = dvb_usb_lme2510_firmware;
- if (lme2510_return_status(d) == 0x44) {
+ status = lme2510_return_status(d);
+ if (status == 0x44) {
*name = lme_firmware_switch(d, 0);
return COLD;
}
- return 0;
+ if (status != 0x47)
+ return -EINVAL;
+
+ return WARM;
}
static int lme2510_get_stream_config(struct dvb_frontend *fe, u8 *ts_type,
diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
index ab7151181728..d00b27ed73a6 100644
--- a/drivers/media/usb/dvb-usb/cxusb.c
+++ b/drivers/media/usb/dvb-usb/cxusb.c
@@ -818,6 +818,8 @@ static int dvico_bluebird_xc2028_callback(void *ptr, int component,
case XC2028_RESET_CLK:
deb_info("%s: XC2028_RESET_CLK %d\n", __func__, arg);
break;
+ case XC2028_I2C_FLUSH:
+ break;
default:
deb_info("%s: unknown command %d, arg %d\n", __func__,
command, arg);
diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c
index 7df0707a0455..38c03283a441 100644
--- a/drivers/media/usb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/usb/dvb-usb/dib0700_devices.c
@@ -431,6 +431,7 @@ static int stk7700ph_xc3028_callback(void *ptr, int component,
state->dib7000p_ops.set_gpio(adap->fe_adap[0].fe, 8, 0, 1);
break;
case XC2028_RESET_CLK:
+ case XC2028_I2C_FLUSH:
break;
default:
err("%s: unknown command %d, arg %d\n", __func__,
diff --git a/drivers/media/usb/em28xx/Kconfig b/drivers/media/usb/em28xx/Kconfig
index e382210c4ada..75323f5efd0f 100644
--- a/drivers/media/usb/em28xx/Kconfig
+++ b/drivers/media/usb/em28xx/Kconfig
@@ -11,7 +11,7 @@ config VIDEO_EM28XX_V4L2
select VIDEO_SAA711X if MEDIA_SUBDRV_AUTOSELECT
select VIDEO_TVP5150 if MEDIA_SUBDRV_AUTOSELECT
select VIDEO_MSP3400 if MEDIA_SUBDRV_AUTOSELECT
- select VIDEO_MT9V011 if MEDIA_SUBDRV_AUTOSELECT
+ select VIDEO_MT9V011 if MEDIA_SUBDRV_AUTOSELECT && MEDIA_CAMERA_SUPPORT
---help---
This is a video4linux driver for Empia 28xx based TV cards.
diff --git a/drivers/media/usb/go7007/Kconfig b/drivers/media/usb/go7007/Kconfig
index 95a3af644a92..af1d02430931 100644
--- a/drivers/media/usb/go7007/Kconfig
+++ b/drivers/media/usb/go7007/Kconfig
@@ -11,7 +11,7 @@ config VIDEO_GO7007
select VIDEO_TW2804 if MEDIA_SUBDRV_AUTOSELECT
select VIDEO_TW9903 if MEDIA_SUBDRV_AUTOSELECT
select VIDEO_TW9906 if MEDIA_SUBDRV_AUTOSELECT
- select VIDEO_OV7640 if MEDIA_SUBDRV_AUTOSELECT
+ select VIDEO_OV7640 if MEDIA_SUBDRV_AUTOSELECT && MEDIA_CAMERA_SUPPORT
select VIDEO_UDA1342 if MEDIA_SUBDRV_AUTOSELECT
---help---
This is a video4linux driver for the WIS GO7007 MPEG
diff --git a/drivers/media/usb/hdpvr/hdpvr-core.c b/drivers/media/usb/hdpvr/hdpvr-core.c
index 3fc64197b4e6..08f0ca7aa012 100644
--- a/drivers/media/usb/hdpvr/hdpvr-core.c
+++ b/drivers/media/usb/hdpvr/hdpvr-core.c
@@ -273,7 +273,9 @@ static int hdpvr_probe(struct usb_interface *interface,
struct hdpvr_device *dev;
struct usb_host_interface *iface_desc;
struct usb_endpoint_descriptor *endpoint;
+#if IS_ENABLED(CONFIG_I2C)
struct i2c_client *client;
+#endif
size_t buffer_size;
int i;
int retval = -ENOMEM;
diff --git a/drivers/media/usb/pwc/pwc-if.c b/drivers/media/usb/pwc/pwc-if.c
index 58f23bcfe94e..299750e56916 100644
--- a/drivers/media/usb/pwc/pwc-if.c
+++ b/drivers/media/usb/pwc/pwc-if.c
@@ -1119,8 +1119,10 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
return 0;
+#ifdef CONFIG_USB_PWC_INPUT_EVDEV
err_video_unreg:
video_unregister_device(&pdev->vdev);
+#endif
err_unregister_v4l2_dev:
v4l2_device_unregister(&pdev->v4l2_dev);
err_free_controls:
diff --git a/drivers/media/usb/usbtv/usbtv-core.c b/drivers/media/usb/usbtv/usbtv-core.c
index 3bbc77aa6a33..483457d4904f 100644
--- a/drivers/media/usb/usbtv/usbtv-core.c
+++ b/drivers/media/usb/usbtv/usbtv-core.c
@@ -95,6 +95,8 @@ static int usbtv_probe(struct usb_interface *intf,
return 0;
usbtv_audio_fail:
+ /* we must not free at this point */
+ usb_get_dev(usbtv->udev);
usbtv_video_free(usbtv);
usbtv_video_fail:
diff --git a/drivers/media/v4l2-core/Kconfig b/drivers/media/v4l2-core/Kconfig
index 9beece00869b..29b3436d0910 100644
--- a/drivers/media/v4l2-core/Kconfig
+++ b/drivers/media/v4l2-core/Kconfig
@@ -37,7 +37,6 @@ config VIDEO_PCI_SKELETON
# Used by drivers that need tuner.ko
config VIDEO_TUNER
tristate
- depends on MEDIA_TUNER
# Used by drivers that need v4l2-mem2mem.ko
config V4L2_MEM2MEM_DEV
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
index 18045a7e24e0..5cbd15742050 100644
--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
@@ -18,8 +18,18 @@
#include <linux/videodev2.h>
#include <linux/v4l2-subdev.h>
#include <media/v4l2-dev.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-ctrls.h>
#include <media/v4l2-ioctl.h>
+/* Use the same argument order as copy_in_user */
+#define assign_in_user(to, from) \
+({ \
+ typeof(*from) __assign_tmp; \
+ \
+ get_user(__assign_tmp, from) || put_user(__assign_tmp, to); \
+})
+
static long native_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
long ret = -ENOIOCTLCMD;
@@ -33,131 +43,90 @@ static long native_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
struct v4l2_clip32 {
struct v4l2_rect c;
- compat_caddr_t next;
+ compat_caddr_t next;
};
struct v4l2_window32 {
struct v4l2_rect w;
- __u32 field; /* enum v4l2_field */
+ __u32 field; /* enum v4l2_field */
__u32 chromakey;
compat_caddr_t clips; /* actually struct v4l2_clip32 * */
__u32 clipcount;
compat_caddr_t bitmap;
+ __u8 global_alpha;
};
-static int get_v4l2_window32(struct v4l2_window *kp, struct v4l2_window32 __user *up)
-{
- if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_window32)) ||
- copy_from_user(&kp->w, &up->w, sizeof(up->w)) ||
- get_user(kp->field, &up->field) ||
- get_user(kp->chromakey, &up->chromakey) ||
- get_user(kp->clipcount, &up->clipcount))
- return -EFAULT;
- if (kp->clipcount > 2048)
- return -EINVAL;
- if (kp->clipcount) {
- struct v4l2_clip32 __user *uclips;
- struct v4l2_clip __user *kclips;
- int n = kp->clipcount;
- compat_caddr_t p;
-
- if (get_user(p, &up->clips))
- return -EFAULT;
- uclips = compat_ptr(p);
- kclips = compat_alloc_user_space(n * sizeof(struct v4l2_clip));
- kp->clips = kclips;
- while (--n >= 0) {
- if (copy_in_user(&kclips->c, &uclips->c, sizeof(uclips->c)))
- return -EFAULT;
- if (put_user(n ? kclips + 1 : NULL, &kclips->next))
- return -EFAULT;
- uclips += 1;
- kclips += 1;
- }
- } else
- kp->clips = NULL;
- return 0;
-}
-
-static int put_v4l2_window32(struct v4l2_window *kp, struct v4l2_window32 __user *up)
+static int get_v4l2_window32(struct v4l2_window __user *kp,
+ struct v4l2_window32 __user *up,
+ void __user *aux_buf, u32 aux_space)
{
- if (copy_to_user(&up->w, &kp->w, sizeof(kp->w)) ||
- put_user(kp->field, &up->field) ||
- put_user(kp->chromakey, &up->chromakey) ||
- put_user(kp->clipcount, &up->clipcount))
- return -EFAULT;
- return 0;
-}
-
-static inline int get_v4l2_pix_format(struct v4l2_pix_format *kp, struct v4l2_pix_format __user *up)
-{
- if (copy_from_user(kp, up, sizeof(struct v4l2_pix_format)))
- return -EFAULT;
- return 0;
-}
-
-static inline int get_v4l2_pix_format_mplane(struct v4l2_pix_format_mplane *kp,
- struct v4l2_pix_format_mplane __user *up)
-{
- if (copy_from_user(kp, up, sizeof(struct v4l2_pix_format_mplane)))
- return -EFAULT;
- return 0;
-}
-
-static inline int put_v4l2_pix_format(struct v4l2_pix_format *kp, struct v4l2_pix_format __user *up)
-{
- if (copy_to_user(up, kp, sizeof(struct v4l2_pix_format)))
+ struct v4l2_clip32 __user *uclips;
+ struct v4l2_clip __user *kclips;
+ compat_caddr_t p;
+ u32 clipcount;
+
+ if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
+ copy_in_user(&kp->w, &up->w, sizeof(up->w)) ||
+ assign_in_user(&kp->field, &up->field) ||
+ assign_in_user(&kp->chromakey, &up->chromakey) ||
+ assign_in_user(&kp->global_alpha, &up->global_alpha) ||
+ get_user(clipcount, &up->clipcount) ||
+ put_user(clipcount, &kp->clipcount))
return -EFAULT;
- return 0;
-}
+ if (clipcount > 2048)
+ return -EINVAL;
+ if (!clipcount)
+ return put_user(NULL, &kp->clips);
-static inline int put_v4l2_pix_format_mplane(struct v4l2_pix_format_mplane *kp,
- struct v4l2_pix_format_mplane __user *up)
-{
- if (copy_to_user(up, kp, sizeof(struct v4l2_pix_format_mplane)))
+ if (get_user(p, &up->clips))
return -EFAULT;
- return 0;
-}
-
-static inline int get_v4l2_vbi_format(struct v4l2_vbi_format *kp, struct v4l2_vbi_format __user *up)
-{
- if (copy_from_user(kp, up, sizeof(struct v4l2_vbi_format)))
+ uclips = compat_ptr(p);
+ if (aux_space < clipcount * sizeof(*kclips))
return -EFAULT;
- return 0;
-}
-
-static inline int put_v4l2_vbi_format(struct v4l2_vbi_format *kp, struct v4l2_vbi_format __user *up)
-{
- if (copy_to_user(up, kp, sizeof(struct v4l2_vbi_format)))
+ kclips = aux_buf;
+ if (put_user(kclips, &kp->clips))
return -EFAULT;
- return 0;
-}
-static inline int get_v4l2_sliced_vbi_format(struct v4l2_sliced_vbi_format *kp, struct v4l2_sliced_vbi_format __user *up)
-{
- if (copy_from_user(kp, up, sizeof(struct v4l2_sliced_vbi_format)))
- return -EFAULT;
+ while (clipcount--) {
+ if (copy_in_user(&kclips->c, &uclips->c, sizeof(uclips->c)))
+ return -EFAULT;
+ if (put_user(clipcount ? kclips + 1 : NULL, &kclips->next))
+ return -EFAULT;
+ uclips++;
+ kclips++;
+ }
return 0;
}
-static inline int put_v4l2_sliced_vbi_format(struct v4l2_sliced_vbi_format *kp, struct v4l2_sliced_vbi_format __user *up)
+static int put_v4l2_window32(struct v4l2_window __user *kp,
+ struct v4l2_window32 __user *up)
{
- if (copy_to_user(up, kp, sizeof(struct v4l2_sliced_vbi_format)))
+ struct v4l2_clip __user *kclips;
+ struct v4l2_clip32 __user *uclips;
+ compat_caddr_t p;
+ u32 clipcount;
+
+ if (copy_in_user(&up->w, &kp->w, sizeof(kp->w)) ||
+ assign_in_user(&up->field, &kp->field) ||
+ assign_in_user(&up->chromakey, &kp->chromakey) ||
+ assign_in_user(&up->global_alpha, &kp->global_alpha) ||
+ get_user(clipcount, &kp->clipcount) ||
+ put_user(clipcount, &up->clipcount))
return -EFAULT;
- return 0;
-}
+ if (!clipcount)
+ return 0;
-static inline int get_v4l2_sdr_format(struct v4l2_sdr_format *kp, struct v4l2_sdr_format __user *up)
-{
- if (copy_from_user(kp, up, sizeof(struct v4l2_sdr_format)))
+ if (get_user(kclips, &kp->clips))
return -EFAULT;
- return 0;
-}
-
-static inline int put_v4l2_sdr_format(struct v4l2_sdr_format *kp, struct v4l2_sdr_format __user *up)
-{
- if (copy_to_user(up, kp, sizeof(struct v4l2_sdr_format)))
+ if (get_user(p, &up->clips))
return -EFAULT;
+ uclips = compat_ptr(p);
+ while (clipcount--) {
+ if (copy_in_user(&uclips->c, &kclips->c, sizeof(uclips->c)))
+ return -EFAULT;
+ uclips++;
+ kclips++;
+ }
return 0;
}
@@ -191,97 +160,158 @@ struct v4l2_create_buffers32 {
__u32 reserved[8];
};
-static int __get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
+static int __bufsize_v4l2_format(struct v4l2_format32 __user *up, u32 *size)
+{
+ u32 type;
+
+ if (get_user(type, &up->type))
+ return -EFAULT;
+
+ switch (type) {
+ case V4L2_BUF_TYPE_VIDEO_OVERLAY:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: {
+ u32 clipcount;
+
+ if (get_user(clipcount, &up->fmt.win.clipcount))
+ return -EFAULT;
+ if (clipcount > 2048)
+ return -EINVAL;
+ *size = clipcount * sizeof(struct v4l2_clip);
+ return 0;
+ }
+ default:
+ *size = 0;
+ return 0;
+ }
+}
+
+static int bufsize_v4l2_format(struct v4l2_format32 __user *up, u32 *size)
{
- if (get_user(kp->type, &up->type))
+ if (!access_ok(VERIFY_READ, up, sizeof(*up)))
return -EFAULT;
+ return __bufsize_v4l2_format(up, size);
+}
- switch (kp->type) {
+static int __get_v4l2_format32(struct v4l2_format __user *kp,
+ struct v4l2_format32 __user *up,
+ void __user *aux_buf, u32 aux_space)
+{
+ u32 type;
+
+ if (get_user(type, &up->type) || put_user(type, &kp->type))
+ return -EFAULT;
+
+ switch (type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
- return get_v4l2_pix_format(&kp->fmt.pix, &up->fmt.pix);
+ return copy_in_user(&kp->fmt.pix, &up->fmt.pix,
+ sizeof(kp->fmt.pix)) ? -EFAULT : 0;
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
- return get_v4l2_pix_format_mplane(&kp->fmt.pix_mp,
- &up->fmt.pix_mp);
+ return copy_in_user(&kp->fmt.pix_mp, &up->fmt.pix_mp,
+ sizeof(kp->fmt.pix_mp)) ? -EFAULT : 0;
case V4L2_BUF_TYPE_VIDEO_OVERLAY:
case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
- return get_v4l2_window32(&kp->fmt.win, &up->fmt.win);
+ return get_v4l2_window32(&kp->fmt.win, &up->fmt.win,
+ aux_buf, aux_space);
case V4L2_BUF_TYPE_VBI_CAPTURE:
case V4L2_BUF_TYPE_VBI_OUTPUT:
- return get_v4l2_vbi_format(&kp->fmt.vbi, &up->fmt.vbi);
+ return copy_in_user(&kp->fmt.vbi, &up->fmt.vbi,
+ sizeof(kp->fmt.vbi)) ? -EFAULT : 0;
case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
- return get_v4l2_sliced_vbi_format(&kp->fmt.sliced, &up->fmt.sliced);
+ return copy_in_user(&kp->fmt.sliced, &up->fmt.sliced,
+ sizeof(kp->fmt.sliced)) ? -EFAULT : 0;
case V4L2_BUF_TYPE_SDR_CAPTURE:
case V4L2_BUF_TYPE_SDR_OUTPUT:
- return get_v4l2_sdr_format(&kp->fmt.sdr, &up->fmt.sdr);
+ return copy_in_user(&kp->fmt.sdr, &up->fmt.sdr,
+ sizeof(kp->fmt.sdr)) ? -EFAULT : 0;
default:
- pr_info("compat_ioctl32: unexpected VIDIOC_FMT type %d\n",
- kp->type);
return -EINVAL;
}
}
-static int get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
+static int get_v4l2_format32(struct v4l2_format __user *kp,
+ struct v4l2_format32 __user *up,
+ void __user *aux_buf, u32 aux_space)
{
- if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_format32)))
+ if (!access_ok(VERIFY_READ, up, sizeof(*up)))
return -EFAULT;
- return __get_v4l2_format32(kp, up);
+ return __get_v4l2_format32(kp, up, aux_buf, aux_space);
}
-static int get_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up)
+static int bufsize_v4l2_create(struct v4l2_create_buffers32 __user *up,
+ u32 *size)
{
- if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_create_buffers32)) ||
- copy_from_user(kp, up, offsetof(struct v4l2_create_buffers32, format)))
+ if (!access_ok(VERIFY_READ, up, sizeof(*up)))
return -EFAULT;
- return __get_v4l2_format32(&kp->format, &up->format);
+ return __bufsize_v4l2_format(&up->format, size);
}
-static int __put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
+static int get_v4l2_create32(struct v4l2_create_buffers __user *kp,
+ struct v4l2_create_buffers32 __user *up,
+ void __user *aux_buf, u32 aux_space)
{
- if (put_user(kp->type, &up->type))
+ if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
+ copy_in_user(kp, up,
+ offsetof(struct v4l2_create_buffers32, format)))
return -EFAULT;
+ return __get_v4l2_format32(&kp->format, &up->format,
+ aux_buf, aux_space);
+}
+
+static int __put_v4l2_format32(struct v4l2_format __user *kp,
+ struct v4l2_format32 __user *up)
+{
+ u32 type;
- switch (kp->type) {
+ if (get_user(type, &kp->type))
+ return -EFAULT;
+
+ switch (type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
- return put_v4l2_pix_format(&kp->fmt.pix, &up->fmt.pix);
+ return copy_in_user(&up->fmt.pix, &kp->fmt.pix,
+ sizeof(kp->fmt.pix)) ? -EFAULT : 0;
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
- return put_v4l2_pix_format_mplane(&kp->fmt.pix_mp,
- &up->fmt.pix_mp);
+ return copy_in_user(&up->fmt.pix_mp, &kp->fmt.pix_mp,
+ sizeof(kp->fmt.pix_mp)) ? -EFAULT : 0;
case V4L2_BUF_TYPE_VIDEO_OVERLAY:
case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
return put_v4l2_window32(&kp->fmt.win, &up->fmt.win);
case V4L2_BUF_TYPE_VBI_CAPTURE:
case V4L2_BUF_TYPE_VBI_OUTPUT:
- return put_v4l2_vbi_format(&kp->fmt.vbi, &up->fmt.vbi);
+ return copy_in_user(&up->fmt.vbi, &kp->fmt.vbi,
+ sizeof(kp->fmt.vbi)) ? -EFAULT : 0;
case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
- return put_v4l2_sliced_vbi_format(&kp->fmt.sliced, &up->fmt.sliced);
+ return copy_in_user(&up->fmt.sliced, &kp->fmt.sliced,
+ sizeof(kp->fmt.sliced)) ? -EFAULT : 0;
case V4L2_BUF_TYPE_SDR_CAPTURE:
case V4L2_BUF_TYPE_SDR_OUTPUT:
- return put_v4l2_sdr_format(&kp->fmt.sdr, &up->fmt.sdr);
+ return copy_in_user(&up->fmt.sdr, &kp->fmt.sdr,
+ sizeof(kp->fmt.sdr)) ? -EFAULT : 0;
default:
- pr_info("compat_ioctl32: unexpected VIDIOC_FMT type %d\n",
- kp->type);
return -EINVAL;
}
}
-static int put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
+static int put_v4l2_format32(struct v4l2_format __user *kp,
+ struct v4l2_format32 __user *up)
{
- if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_format32)))
+ if (!access_ok(VERIFY_WRITE, up, sizeof(*up)))
return -EFAULT;
return __put_v4l2_format32(kp, up);
}
-static int put_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up)
+static int put_v4l2_create32(struct v4l2_create_buffers __user *kp,
+ struct v4l2_create_buffers32 __user *up)
{
- if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_create_buffers32)) ||
- copy_to_user(up, kp, offsetof(struct v4l2_create_buffers32, format)) ||
- copy_to_user(up->reserved, kp->reserved, sizeof(kp->reserved)))
+ if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) ||
+ copy_in_user(up, kp,
+ offsetof(struct v4l2_create_buffers32, format)) ||
+ copy_in_user(up->reserved, kp->reserved, sizeof(kp->reserved)))
return -EFAULT;
return __put_v4l2_format32(&kp->format, &up->format);
}
@@ -295,25 +325,28 @@ struct v4l2_standard32 {
__u32 reserved[4];
};
-static int get_v4l2_standard32(struct v4l2_standard *kp, struct v4l2_standard32 __user *up)
+static int get_v4l2_standard32(struct v4l2_standard __user *kp,
+ struct v4l2_standard32 __user *up)
{
/* other fields are not set by the user, nor used by the driver */
- if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_standard32)) ||
- get_user(kp->index, &up->index))
+ if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
+ assign_in_user(&kp->index, &up->index))
return -EFAULT;
return 0;
}
-static int put_v4l2_standard32(struct v4l2_standard *kp, struct v4l2_standard32 __user *up)
+static int put_v4l2_standard32(struct v4l2_standard __user *kp,
+ struct v4l2_standard32 __user *up)
{
- if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_standard32)) ||
- put_user(kp->index, &up->index) ||
- put_user(kp->id, &up->id) ||
- copy_to_user(up->name, kp->name, 24) ||
- copy_to_user(&up->frameperiod, &kp->frameperiod, sizeof(kp->frameperiod)) ||
- put_user(kp->framelines, &up->framelines) ||
- copy_to_user(up->reserved, kp->reserved, 4 * sizeof(__u32)))
- return -EFAULT;
+ if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) ||
+ assign_in_user(&up->index, &kp->index) ||
+ assign_in_user(&up->id, &kp->id) ||
+ copy_in_user(up->name, kp->name, sizeof(up->name)) ||
+ copy_in_user(&up->frameperiod, &kp->frameperiod,
+ sizeof(up->frameperiod)) ||
+ assign_in_user(&up->framelines, &kp->framelines) ||
+ copy_in_user(up->reserved, kp->reserved, sizeof(up->reserved)))
+ return -EFAULT;
return 0;
}
@@ -352,144 +385,192 @@ struct v4l2_buffer32 {
__u32 reserved;
};
-static int get_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32,
- enum v4l2_memory memory)
+static int get_v4l2_plane32(struct v4l2_plane __user *up,
+ struct v4l2_plane32 __user *up32,
+ enum v4l2_memory memory)
{
- void __user *up_pln;
- compat_long_t p;
+ compat_ulong_t p;
if (copy_in_user(up, up32, 2 * sizeof(__u32)) ||
- copy_in_user(&up->data_offset, &up32->data_offset,
- sizeof(__u32)) ||
- copy_in_user(up->reserved, up32->reserved,
- sizeof(up->reserved)) ||
- copy_in_user(&up->length, &up32->length,
- sizeof(__u32)))
+ copy_in_user(&up->data_offset, &up32->data_offset,
+ sizeof(up->data_offset)) ||
+ copy_in_user(up->reserved, up32->reserved,
+ sizeof(up->reserved)) ||
+ copy_in_user(&up->length, &up32->length,
+ sizeof(up->length)))
return -EFAULT;
- if (memory == V4L2_MEMORY_USERPTR) {
- if (get_user(p, &up32->m.userptr))
- return -EFAULT;
- up_pln = compat_ptr(p);
- if (put_user((unsigned long)up_pln, &up->m.userptr))
+ switch (memory) {
+ case V4L2_MEMORY_MMAP:
+ case V4L2_MEMORY_OVERLAY:
+ if (copy_in_user(&up->m.mem_offset, &up32->m.mem_offset,
+ sizeof(up32->m.mem_offset)))
return -EFAULT;
- } else if (memory == V4L2_MEMORY_DMABUF) {
- if (copy_in_user(&up->m.fd, &up32->m.fd, sizeof(int)))
+ break;
+ case V4L2_MEMORY_USERPTR:
+ if (get_user(p, &up32->m.userptr) ||
+ put_user((unsigned long)compat_ptr(p), &up->m.userptr))
return -EFAULT;
- } else {
- if (copy_in_user(&up->m.mem_offset, &up32->m.mem_offset,
- sizeof(__u32)))
+ break;
+ case V4L2_MEMORY_DMABUF:
+ if (copy_in_user(&up->m.fd, &up32->m.fd, sizeof(up32->m.fd)))
return -EFAULT;
+ break;
}
return 0;
}
-static int put_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32,
- enum v4l2_memory memory)
+static int put_v4l2_plane32(struct v4l2_plane __user *up,
+ struct v4l2_plane32 __user *up32,
+ enum v4l2_memory memory)
{
+ unsigned long p;
+
if (copy_in_user(up32, up, 2 * sizeof(__u32)) ||
- copy_in_user(up32->reserved, up->reserved,
- sizeof(up32->reserved)) ||
- copy_in_user(&up32->data_offset, &up->data_offset,
- sizeof(__u32)))
+ copy_in_user(up32->reserved, up->reserved,
+ sizeof(up32->reserved)) ||
+ copy_in_user(&up32->data_offset, &up->data_offset,
+ sizeof(up->data_offset)))
return -EFAULT;
- /* For MMAP, driver might've set up the offset, so copy it back.
- * USERPTR stays the same (was userspace-provided), so no copying. */
- if (memory == V4L2_MEMORY_MMAP)
+ switch (memory) {
+ case V4L2_MEMORY_MMAP:
+ case V4L2_MEMORY_OVERLAY:
if (copy_in_user(&up32->m.mem_offset, &up->m.mem_offset,
- sizeof(__u32)))
+ sizeof(up->m.mem_offset)))
return -EFAULT;
- /* For DMABUF, driver might've set up the fd, so copy it back. */
- if (memory == V4L2_MEMORY_DMABUF)
- if (copy_in_user(&up32->m.fd, &up->m.fd,
- sizeof(int)))
+ break;
+ case V4L2_MEMORY_USERPTR:
+ if (get_user(p, &up->m.userptr) ||
+ put_user((compat_ulong_t)ptr_to_compat((__force void *)p),
+ &up32->m.userptr))
return -EFAULT;
- if (memory == V4L2_MEMORY_USERPTR)
- if (copy_in_user(&up32->m.userptr, &up->m.userptr,
- sizeof(compat_long_t)))
+ break;
+ case V4L2_MEMORY_DMABUF:
+ if (copy_in_user(&up32->m.fd, &up->m.fd, sizeof(up->m.fd)))
return -EFAULT;
+ break;
+ }
return 0;
}
-static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user *up)
+static int bufsize_v4l2_buffer(struct v4l2_buffer32 __user *up, u32 *size)
{
+ u32 type;
+ u32 length;
+
+ if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
+ get_user(type, &up->type) ||
+ get_user(length, &up->length))
+ return -EFAULT;
+
+ if (V4L2_TYPE_IS_MULTIPLANAR(type)) {
+ if (length > VIDEO_MAX_PLANES)
+ return -EINVAL;
+
+ /*
+ * We don't really care if userspace decides to kill itself
+ * by passing a very big length value
+ */
+ *size = length * sizeof(struct v4l2_plane);
+ } else {
+ *size = 0;
+ }
+ return 0;
+}
+
+static int get_v4l2_buffer32(struct v4l2_buffer __user *kp,
+ struct v4l2_buffer32 __user *up,
+ void __user *aux_buf, u32 aux_space)
+{
+ u32 type;
+ u32 length;
+ enum v4l2_memory memory;
struct v4l2_plane32 __user *uplane32;
struct v4l2_plane __user *uplane;
compat_caddr_t p;
- int num_planes;
int ret;
- if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_buffer32)) ||
- get_user(kp->index, &up->index) ||
- get_user(kp->type, &up->type) ||
- get_user(kp->flags, &up->flags) ||
- get_user(kp->memory, &up->memory) ||
- get_user(kp->length, &up->length))
- return -EFAULT;
+ if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
+ assign_in_user(&kp->index, &up->index) ||
+ get_user(type, &up->type) ||
+ put_user(type, &kp->type) ||
+ assign_in_user(&kp->flags, &up->flags) ||
+ get_user(memory, &up->memory) ||
+ put_user(memory, &kp->memory) ||
+ get_user(length, &up->length) ||
+ put_user(length, &kp->length))
+ return -EFAULT;
- if (V4L2_TYPE_IS_OUTPUT(kp->type))
- if (get_user(kp->bytesused, &up->bytesused) ||
- get_user(kp->field, &up->field) ||
- get_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) ||
- get_user(kp->timestamp.tv_usec,
- &up->timestamp.tv_usec))
+ if (V4L2_TYPE_IS_OUTPUT(type))
+ if (assign_in_user(&kp->bytesused, &up->bytesused) ||
+ assign_in_user(&kp->field, &up->field) ||
+ assign_in_user(&kp->timestamp.tv_sec,
+ &up->timestamp.tv_sec) ||
+ assign_in_user(&kp->timestamp.tv_usec,
+ &up->timestamp.tv_usec))
return -EFAULT;
- if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) {
- num_planes = kp->length;
+ if (V4L2_TYPE_IS_MULTIPLANAR(type)) {
+ u32 num_planes = length;
+
if (num_planes == 0) {
- kp->m.planes = NULL;
- /* num_planes == 0 is legal, e.g. when userspace doesn't
- * need planes array on DQBUF*/
- return 0;
+ /*
+ * num_planes == 0 is legal, e.g. when userspace doesn't
+ * need planes array on DQBUF
+ */
+ return put_user(NULL, &kp->m.planes);
}
+ if (num_planes > VIDEO_MAX_PLANES)
+ return -EINVAL;
if (get_user(p, &up->m.planes))
return -EFAULT;
uplane32 = compat_ptr(p);
if (!access_ok(VERIFY_READ, uplane32,
- num_planes * sizeof(struct v4l2_plane32)))
+ num_planes * sizeof(*uplane32)))
+ return -EFAULT;
+
+ /*
+ * We don't really care if userspace decides to kill itself
+ * by passing a very big num_planes value
+ */
+ if (aux_space < num_planes * sizeof(*uplane))
return -EFAULT;
- /* We don't really care if userspace decides to kill itself
- * by passing a very big num_planes value */
- uplane = compat_alloc_user_space(num_planes *
- sizeof(struct v4l2_plane));
- kp->m.planes = (__force struct v4l2_plane *)uplane;
+ uplane = aux_buf;
+ if (put_user((__force struct v4l2_plane *)uplane,
+ &kp->m.planes))
+ return -EFAULT;
- while (--num_planes >= 0) {
- ret = get_v4l2_plane32(uplane, uplane32, kp->memory);
+ while (num_planes--) {
+ ret = get_v4l2_plane32(uplane, uplane32, memory);
if (ret)
return ret;
- ++uplane;
- ++uplane32;
+ uplane++;
+ uplane32++;
}
} else {
- switch (kp->memory) {
+ switch (memory) {
case V4L2_MEMORY_MMAP:
- if (get_user(kp->m.offset, &up->m.offset))
+ case V4L2_MEMORY_OVERLAY:
+ if (assign_in_user(&kp->m.offset, &up->m.offset))
return -EFAULT;
break;
- case V4L2_MEMORY_USERPTR:
- {
- compat_long_t tmp;
+ case V4L2_MEMORY_USERPTR: {
+ compat_ulong_t userptr;
- if (get_user(tmp, &up->m.userptr))
- return -EFAULT;
-
- kp->m.userptr = (unsigned long)compat_ptr(tmp);
- }
- break;
- case V4L2_MEMORY_OVERLAY:
- if (get_user(kp->m.offset, &up->m.offset))
+ if (get_user(userptr, &up->m.userptr) ||
+ put_user((unsigned long)compat_ptr(userptr),
+ &kp->m.userptr))
return -EFAULT;
break;
+ }
case V4L2_MEMORY_DMABUF:
- if (get_user(kp->m.fd, &up->m.fd))
+ if (assign_in_user(&kp->m.fd, &up->m.fd))
return -EFAULT;
break;
}
@@ -498,65 +579,70 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
return 0;
}
-static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user *up)
+static int put_v4l2_buffer32(struct v4l2_buffer __user *kp,
+ struct v4l2_buffer32 __user *up)
{
+ u32 type;
+ u32 length;
+ enum v4l2_memory memory;
struct v4l2_plane32 __user *uplane32;
struct v4l2_plane __user *uplane;
compat_caddr_t p;
- int num_planes;
int ret;
- if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_buffer32)) ||
- put_user(kp->index, &up->index) ||
- put_user(kp->type, &up->type) ||
- put_user(kp->flags, &up->flags) ||
- put_user(kp->memory, &up->memory))
- return -EFAULT;
+ if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) ||
+ assign_in_user(&up->index, &kp->index) ||
+ get_user(type, &kp->type) ||
+ put_user(type, &up->type) ||
+ assign_in_user(&up->flags, &kp->flags) ||
+ get_user(memory, &kp->memory) ||
+ put_user(memory, &up->memory))
+ return -EFAULT;
- if (put_user(kp->bytesused, &up->bytesused) ||
- put_user(kp->field, &up->field) ||
- put_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) ||
- put_user(kp->timestamp.tv_usec, &up->timestamp.tv_usec) ||
- copy_to_user(&up->timecode, &kp->timecode, sizeof(struct v4l2_timecode)) ||
- put_user(kp->sequence, &up->sequence) ||
- put_user(kp->reserved2, &up->reserved2) ||
- put_user(kp->reserved, &up->reserved) ||
- put_user(kp->length, &up->length))
- return -EFAULT;
+ if (assign_in_user(&up->bytesused, &kp->bytesused) ||
+ assign_in_user(&up->field, &kp->field) ||
+ assign_in_user(&up->timestamp.tv_sec, &kp->timestamp.tv_sec) ||
+ assign_in_user(&up->timestamp.tv_usec, &kp->timestamp.tv_usec) ||
+ copy_in_user(&up->timecode, &kp->timecode, sizeof(kp->timecode)) ||
+ assign_in_user(&up->sequence, &kp->sequence) ||
+ assign_in_user(&up->reserved2, &kp->reserved2) ||
+ assign_in_user(&up->reserved, &kp->reserved) ||
+ get_user(length, &kp->length) ||
+ put_user(length, &up->length))
+ return -EFAULT;
+
+ if (V4L2_TYPE_IS_MULTIPLANAR(type)) {
+ u32 num_planes = length;
- if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) {
- num_planes = kp->length;
if (num_planes == 0)
return 0;
- uplane = (__force struct v4l2_plane __user *)kp->m.planes;
+ if (get_user(uplane, ((__force struct v4l2_plane __user **)&kp->m.planes)))
+ return -EFAULT;
if (get_user(p, &up->m.planes))
return -EFAULT;
uplane32 = compat_ptr(p);
- while (--num_planes >= 0) {
- ret = put_v4l2_plane32(uplane, uplane32, kp->memory);
+ while (num_planes--) {
+ ret = put_v4l2_plane32(uplane, uplane32, memory);
if (ret)
return ret;
++uplane;
++uplane32;
}
} else {
- switch (kp->memory) {
+ switch (memory) {
case V4L2_MEMORY_MMAP:
- if (put_user(kp->m.offset, &up->m.offset))
+ case V4L2_MEMORY_OVERLAY:
+ if (assign_in_user(&up->m.offset, &kp->m.offset))
return -EFAULT;
break;
case V4L2_MEMORY_USERPTR:
- if (put_user(kp->m.userptr, &up->m.userptr))
- return -EFAULT;
- break;
- case V4L2_MEMORY_OVERLAY:
- if (put_user(kp->m.offset, &up->m.offset))
+ if (assign_in_user(&up->m.userptr, &kp->m.userptr))
return -EFAULT;
break;
case V4L2_MEMORY_DMABUF:
- if (put_user(kp->m.fd, &up->m.fd))
+ if (assign_in_user(&up->m.fd, &kp->m.fd))
return -EFAULT;
break;
}
@@ -568,7 +654,7 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
struct v4l2_framebuffer32 {
__u32 capability;
__u32 flags;
- compat_caddr_t base;
+ compat_caddr_t base;
struct {
__u32 width;
__u32 height;
@@ -581,30 +667,33 @@ struct v4l2_framebuffer32 {
} fmt;
};
-static int get_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_framebuffer32 __user *up)
+static int get_v4l2_framebuffer32(struct v4l2_framebuffer __user *kp,
+ struct v4l2_framebuffer32 __user *up)
{
- u32 tmp;
-
- if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_framebuffer32)) ||
- get_user(tmp, &up->base) ||
- get_user(kp->capability, &up->capability) ||
- get_user(kp->flags, &up->flags) ||
- copy_from_user(&kp->fmt, &up->fmt, sizeof(up->fmt)))
- return -EFAULT;
- kp->base = (__force void *)compat_ptr(tmp);
+ compat_caddr_t tmp;
+
+ if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
+ get_user(tmp, &up->base) ||
+ put_user((__force void *)compat_ptr(tmp), &kp->base) ||
+ assign_in_user(&kp->capability, &up->capability) ||
+ assign_in_user(&kp->flags, &up->flags) ||
+ copy_in_user(&kp->fmt, &up->fmt, sizeof(kp->fmt)))
+ return -EFAULT;
return 0;
}
-static int put_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_framebuffer32 __user *up)
+static int put_v4l2_framebuffer32(struct v4l2_framebuffer __user *kp,
+ struct v4l2_framebuffer32 __user *up)
{
- u32 tmp = (u32)((unsigned long)kp->base);
-
- if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_framebuffer32)) ||
- put_user(tmp, &up->base) ||
- put_user(kp->capability, &up->capability) ||
- put_user(kp->flags, &up->flags) ||
- copy_to_user(&up->fmt, &kp->fmt, sizeof(up->fmt)))
- return -EFAULT;
+ void *base;
+
+ if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) ||
+ get_user(base, &kp->base) ||
+ put_user(ptr_to_compat(base), &up->base) ||
+ assign_in_user(&up->capability, &kp->capability) ||
+ assign_in_user(&up->flags, &kp->flags) ||
+ copy_in_user(&up->fmt, &kp->fmt, sizeof(kp->fmt)))
+ return -EFAULT;
return 0;
}
@@ -616,21 +705,26 @@ struct v4l2_input32 {
__u32 tuner; /* Associated tuner */
compat_u64 std;
__u32 status;
- __u32 reserved[4];
+ __u32 capabilities;
+ __u32 reserved[3];
};
-/* The 64-bit v4l2_input struct has extra padding at the end of the struct.
- Otherwise it is identical to the 32-bit version. */
-static inline int get_v4l2_input32(struct v4l2_input *kp, struct v4l2_input32 __user *up)
+/*
+ * The 64-bit v4l2_input struct has extra padding at the end of the struct.
+ * Otherwise it is identical to the 32-bit version.
+ */
+static inline int get_v4l2_input32(struct v4l2_input __user *kp,
+ struct v4l2_input32 __user *up)
{
- if (copy_from_user(kp, up, sizeof(struct v4l2_input32)))
+ if (copy_in_user(kp, up, sizeof(*up)))
return -EFAULT;
return 0;
}
-static inline int put_v4l2_input32(struct v4l2_input *kp, struct v4l2_input32 __user *up)
+static inline int put_v4l2_input32(struct v4l2_input __user *kp,
+ struct v4l2_input32 __user *up)
{
- if (copy_to_user(up, kp, sizeof(struct v4l2_input32)))
+ if (copy_in_user(up, kp, sizeof(*up)))
return -EFAULT;
return 0;
}
@@ -654,58 +748,95 @@ struct v4l2_ext_control32 {
};
} __attribute__ ((packed));
-/* The following function really belong in v4l2-common, but that causes
- a circular dependency between modules. We need to think about this, but
- for now this will do. */
-
-/* Return non-zero if this control is a pointer type. Currently only
- type STRING is a pointer type. */
-static inline int ctrl_is_pointer(u32 id)
+/* Return true if this control is a pointer type. */
+static inline bool ctrl_is_pointer(struct file *file, u32 id)
{
- switch (id) {
- case V4L2_CID_RDS_TX_PS_NAME:
- case V4L2_CID_RDS_TX_RADIO_TEXT:
- return 1;
- default:
- return 0;
+ struct video_device *vdev = video_devdata(file);
+ struct v4l2_fh *fh = NULL;
+ struct v4l2_ctrl_handler *hdl = NULL;
+ struct v4l2_query_ext_ctrl qec = { id };
+ const struct v4l2_ioctl_ops *ops = vdev->ioctl_ops;
+
+ if (test_bit(V4L2_FL_USES_V4L2_FH, &vdev->flags))
+ fh = file->private_data;
+
+ if (fh && fh->ctrl_handler)
+ hdl = fh->ctrl_handler;
+ else if (vdev->ctrl_handler)
+ hdl = vdev->ctrl_handler;
+
+ if (hdl) {
+ struct v4l2_ctrl *ctrl = v4l2_ctrl_find(hdl, id);
+
+ return ctrl && ctrl->is_ptr;
}
+
+ if (!ops || !ops->vidioc_query_ext_ctrl)
+ return false;
+
+ return !ops->vidioc_query_ext_ctrl(file, fh, &qec) &&
+ (qec.flags & V4L2_CTRL_FLAG_HAS_PAYLOAD);
}
-static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext_controls32 __user *up)
+static int bufsize_v4l2_ext_controls(struct v4l2_ext_controls32 __user *up,
+ u32 *size)
+{
+ u32 count;
+
+ if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
+ get_user(count, &up->count))
+ return -EFAULT;
+ if (count > V4L2_CID_MAX_CTRLS)
+ return -EINVAL;
+ *size = count * sizeof(struct v4l2_ext_control);
+ return 0;
+}
+
+static int get_v4l2_ext_controls32(struct file *file,
+ struct v4l2_ext_controls __user *kp,
+ struct v4l2_ext_controls32 __user *up,
+ void __user *aux_buf, u32 aux_space)
{
struct v4l2_ext_control32 __user *ucontrols;
struct v4l2_ext_control __user *kcontrols;
- int n;
+ u32 count;
+ u32 n;
compat_caddr_t p;
- if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_ext_controls32)) ||
- get_user(kp->ctrl_class, &up->ctrl_class) ||
- get_user(kp->count, &up->count) ||
- get_user(kp->error_idx, &up->error_idx) ||
- copy_from_user(kp->reserved, up->reserved,
- sizeof(kp->reserved)))
- return -EFAULT;
- n = kp->count;
- if (n == 0) {
- kp->controls = NULL;
- return 0;
- }
+ if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
+ assign_in_user(&kp->ctrl_class, &up->ctrl_class) ||
+ get_user(count, &up->count) ||
+ put_user(count, &kp->count) ||
+ assign_in_user(&kp->error_idx, &up->error_idx) ||
+ copy_in_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
+ return -EFAULT;
+
+ if (count == 0)
+ return put_user(NULL, &kp->controls);
+ if (count > V4L2_CID_MAX_CTRLS)
+ return -EINVAL;
if (get_user(p, &up->controls))
return -EFAULT;
ucontrols = compat_ptr(p);
- if (!access_ok(VERIFY_READ, ucontrols,
- n * sizeof(struct v4l2_ext_control32)))
+ if (!access_ok(VERIFY_READ, ucontrols, count * sizeof(*ucontrols)))
+ return -EFAULT;
+ if (aux_space < count * sizeof(*kcontrols))
+ return -EFAULT;
+ kcontrols = aux_buf;
+ if (put_user((__force struct v4l2_ext_control *)kcontrols,
+ &kp->controls))
return -EFAULT;
- kcontrols = compat_alloc_user_space(n * sizeof(struct v4l2_ext_control));
- kp->controls = (__force struct v4l2_ext_control *)kcontrols;
- while (--n >= 0) {
+
+ for (n = 0; n < count; n++) {
u32 id;
if (copy_in_user(kcontrols, ucontrols, sizeof(*ucontrols)))
return -EFAULT;
+
if (get_user(id, &kcontrols->id))
return -EFAULT;
- if (ctrl_is_pointer(id)) {
+
+ if (ctrl_is_pointer(file, id)) {
void __user *s;
if (get_user(p, &ucontrols->string))
@@ -720,43 +851,55 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
return 0;
}
-static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext_controls32 __user *up)
+static int put_v4l2_ext_controls32(struct file *file,
+ struct v4l2_ext_controls __user *kp,
+ struct v4l2_ext_controls32 __user *up)
{
struct v4l2_ext_control32 __user *ucontrols;
- struct v4l2_ext_control __user *kcontrols =
- (__force struct v4l2_ext_control __user *)kp->controls;
- int n = kp->count;
+ struct v4l2_ext_control __user *kcontrols;
+ u32 count;
+ u32 n;
compat_caddr_t p;
- if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_ext_controls32)) ||
- put_user(kp->ctrl_class, &up->ctrl_class) ||
- put_user(kp->count, &up->count) ||
- put_user(kp->error_idx, &up->error_idx) ||
- copy_to_user(up->reserved, kp->reserved, sizeof(up->reserved)))
- return -EFAULT;
- if (!kp->count)
- return 0;
+ if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) ||
+ assign_in_user(&up->ctrl_class, &kp->ctrl_class) ||
+ get_user(count, &kp->count) ||
+ put_user(count, &up->count) ||
+ assign_in_user(&up->error_idx, &kp->error_idx) ||
+ copy_in_user(up->reserved, kp->reserved, sizeof(up->reserved)) ||
+ get_user(kcontrols, &kp->controls))
+ return -EFAULT;
+ if (!count)
+ return 0;
if (get_user(p, &up->controls))
return -EFAULT;
ucontrols = compat_ptr(p);
- if (!access_ok(VERIFY_WRITE, ucontrols,
- n * sizeof(struct v4l2_ext_control32)))
+ if (!access_ok(VERIFY_WRITE, ucontrols, count * sizeof(*ucontrols)))
return -EFAULT;
- while (--n >= 0) {
- unsigned size = sizeof(*ucontrols);
+ for (n = 0; n < count; n++) {
+ unsigned int size = sizeof(*ucontrols);
u32 id;
- if (get_user(id, &kcontrols->id))
+ if (get_user(id, &kcontrols->id) ||
+ put_user(id, &ucontrols->id) ||
+ assign_in_user(&ucontrols->size, &kcontrols->size) ||
+ copy_in_user(&ucontrols->reserved2, &kcontrols->reserved2,
+ sizeof(ucontrols->reserved2)))
return -EFAULT;
- /* Do not modify the pointer when copying a pointer control.
- The contents of the pointer was changed, not the pointer
- itself. */
- if (ctrl_is_pointer(id))
+
+ /*
+ * Do not modify the pointer when copying a pointer control.
+ * The contents of the pointer was changed, not the pointer
+ * itself.
+ */
+ if (ctrl_is_pointer(file, id))
size -= sizeof(ucontrols->value64);
+
if (copy_in_user(ucontrols, kcontrols, size))
return -EFAULT;
+
ucontrols++;
kcontrols++;
}
@@ -781,18 +924,19 @@ struct v4l2_event32 {
__u32 reserved[8];
};
-static int put_v4l2_event32(struct v4l2_event *kp, struct v4l2_event32 __user *up)
+static int put_v4l2_event32(struct v4l2_event __user *kp,
+ struct v4l2_event32 __user *up)
{
- if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_event32)) ||
- put_user(kp->type, &up->type) ||
- copy_to_user(&up->u, &kp->u, sizeof(kp->u)) ||
- put_user(kp->pending, &up->pending) ||
- put_user(kp->sequence, &up->sequence) ||
- put_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) ||
- put_user(kp->timestamp.tv_nsec, &up->timestamp.tv_nsec) ||
- put_user(kp->id, &up->id) ||
- copy_to_user(up->reserved, kp->reserved, 8 * sizeof(__u32)))
- return -EFAULT;
+ if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) ||
+ assign_in_user(&up->type, &kp->type) ||
+ copy_in_user(&up->u, &kp->u, sizeof(kp->u)) ||
+ assign_in_user(&up->pending, &kp->pending) ||
+ assign_in_user(&up->sequence, &kp->sequence) ||
+ assign_in_user(&up->timestamp.tv_sec, &kp->timestamp.tv_sec) ||
+ assign_in_user(&up->timestamp.tv_nsec, &kp->timestamp.tv_nsec) ||
+ assign_in_user(&up->id, &kp->id) ||
+ copy_in_user(up->reserved, kp->reserved, sizeof(up->reserved)))
+ return -EFAULT;
return 0;
}
@@ -804,32 +948,35 @@ struct v4l2_edid32 {
compat_caddr_t edid;
};
-static int get_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up)
+static int get_v4l2_edid32(struct v4l2_edid __user *kp,
+ struct v4l2_edid32 __user *up)
{
- u32 tmp;
-
- if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_edid32)) ||
- get_user(kp->pad, &up->pad) ||
- get_user(kp->start_block, &up->start_block) ||
- get_user(kp->blocks, &up->blocks) ||
- get_user(tmp, &up->edid) ||
- copy_from_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
- return -EFAULT;
- kp->edid = (__force u8 *)compat_ptr(tmp);
+ compat_uptr_t tmp;
+
+ if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
+ assign_in_user(&kp->pad, &up->pad) ||
+ assign_in_user(&kp->start_block, &up->start_block) ||
+ assign_in_user(&kp->blocks, &up->blocks) ||
+ get_user(tmp, &up->edid) ||
+ put_user(compat_ptr(tmp), &kp->edid) ||
+ copy_in_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
+ return -EFAULT;
return 0;
}
-static int put_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up)
+static int put_v4l2_edid32(struct v4l2_edid __user *kp,
+ struct v4l2_edid32 __user *up)
{
- u32 tmp = (u32)((unsigned long)kp->edid);
-
- if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_edid32)) ||
- put_user(kp->pad, &up->pad) ||
- put_user(kp->start_block, &up->start_block) ||
- put_user(kp->blocks, &up->blocks) ||
- put_user(tmp, &up->edid) ||
- copy_to_user(up->reserved, kp->reserved, sizeof(up->reserved)))
- return -EFAULT;
+ void *edid;
+
+ if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) ||
+ assign_in_user(&up->pad, &kp->pad) ||
+ assign_in_user(&up->start_block, &kp->start_block) ||
+ assign_in_user(&up->blocks, &kp->blocks) ||
+ get_user(edid, &kp->edid) ||
+ put_user(ptr_to_compat(edid), &up->edid) ||
+ copy_in_user(up->reserved, kp->reserved, sizeof(up->reserved)))
+ return -EFAULT;
return 0;
}
@@ -845,7 +992,7 @@ static int put_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up)
#define VIDIOC_ENUMINPUT32 _IOWR('V', 26, struct v4l2_input32)
#define VIDIOC_G_EDID32 _IOWR('V', 40, struct v4l2_edid32)
#define VIDIOC_S_EDID32 _IOWR('V', 41, struct v4l2_edid32)
-#define VIDIOC_TRY_FMT32 _IOWR('V', 64, struct v4l2_format32)
+#define VIDIOC_TRY_FMT32 _IOWR('V', 64, struct v4l2_format32)
#define VIDIOC_G_EXT_CTRLS32 _IOWR('V', 71, struct v4l2_ext_controls32)
#define VIDIOC_S_EXT_CTRLS32 _IOWR('V', 72, struct v4l2_ext_controls32)
#define VIDIOC_TRY_EXT_CTRLS32 _IOWR('V', 73, struct v4l2_ext_controls32)
@@ -861,26 +1008,26 @@ static int put_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up)
#define VIDIOC_G_OUTPUT32 _IOR ('V', 46, s32)
#define VIDIOC_S_OUTPUT32 _IOWR('V', 47, s32)
+static int alloc_userspace(unsigned int size, u32 aux_space,
+ void __user **up_native)
+{
+ *up_native = compat_alloc_user_space(size + aux_space);
+ if (!*up_native)
+ return -ENOMEM;
+ if (clear_user(*up_native, size))
+ return -EFAULT;
+ return 0;
+}
+
static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
- union {
- struct v4l2_format v2f;
- struct v4l2_buffer v2b;
- struct v4l2_framebuffer v2fb;
- struct v4l2_input v2i;
- struct v4l2_standard v2s;
- struct v4l2_ext_controls v2ecs;
- struct v4l2_event v2ev;
- struct v4l2_create_buffers v2crt;
- struct v4l2_edid v2edid;
- unsigned long vx;
- int vi;
- } karg;
void __user *up = compat_ptr(arg);
+ void __user *up_native = NULL;
+ void __user *aux_buf;
+ u32 aux_space;
int compatible_arg = 1;
long err = 0;
- memset(&karg, 0, sizeof(karg));
/* First, convert the command. */
switch (cmd) {
case VIDIOC_G_FMT32: cmd = VIDIOC_G_FMT; break;
@@ -916,30 +1063,52 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
case VIDIOC_STREAMOFF:
case VIDIOC_S_INPUT:
case VIDIOC_S_OUTPUT:
- err = get_user(karg.vi, (s32 __user *)up);
+ err = alloc_userspace(sizeof(unsigned int), 0, &up_native);
+ if (!err && assign_in_user((unsigned int __user *)up_native,
+ (compat_uint_t __user *)up))
+ err = -EFAULT;
compatible_arg = 0;
break;
case VIDIOC_G_INPUT:
case VIDIOC_G_OUTPUT:
+ err = alloc_userspace(sizeof(unsigned int), 0, &up_native);
compatible_arg = 0;
break;
case VIDIOC_G_EDID:
case VIDIOC_S_EDID:
- err = get_v4l2_edid32(&karg.v2edid, up);
+ err = alloc_userspace(sizeof(struct v4l2_edid), 0, &up_native);
+ if (!err)
+ err = get_v4l2_edid32(up_native, up);
compatible_arg = 0;
break;
case VIDIOC_G_FMT:
case VIDIOC_S_FMT:
case VIDIOC_TRY_FMT:
- err = get_v4l2_format32(&karg.v2f, up);
+ err = bufsize_v4l2_format(up, &aux_space);
+ if (!err)
+ err = alloc_userspace(sizeof(struct v4l2_format),
+ aux_space, &up_native);
+ if (!err) {
+ aux_buf = up_native + sizeof(struct v4l2_format);
+ err = get_v4l2_format32(up_native, up,
+ aux_buf, aux_space);
+ }
compatible_arg = 0;
break;
case VIDIOC_CREATE_BUFS:
- err = get_v4l2_create32(&karg.v2crt, up);
+ err = bufsize_v4l2_create(up, &aux_space);
+ if (!err)
+ err = alloc_userspace(sizeof(struct v4l2_create_buffers),
+ aux_space, &up_native);
+ if (!err) {
+ aux_buf = up_native + sizeof(struct v4l2_create_buffers);
+ err = get_v4l2_create32(up_native, up,
+ aux_buf, aux_space);
+ }
compatible_arg = 0;
break;
@@ -947,36 +1116,63 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
case VIDIOC_QUERYBUF:
case VIDIOC_QBUF:
case VIDIOC_DQBUF:
- err = get_v4l2_buffer32(&karg.v2b, up);
+ err = bufsize_v4l2_buffer(up, &aux_space);
+ if (!err)
+ err = alloc_userspace(sizeof(struct v4l2_buffer),
+ aux_space, &up_native);
+ if (!err) {
+ aux_buf = up_native + sizeof(struct v4l2_buffer);
+ err = get_v4l2_buffer32(up_native, up,
+ aux_buf, aux_space);
+ }
compatible_arg = 0;
break;
case VIDIOC_S_FBUF:
- err = get_v4l2_framebuffer32(&karg.v2fb, up);
+ err = alloc_userspace(sizeof(struct v4l2_framebuffer), 0,
+ &up_native);
+ if (!err)
+ err = get_v4l2_framebuffer32(up_native, up);
compatible_arg = 0;
break;
case VIDIOC_G_FBUF:
+ err = alloc_userspace(sizeof(struct v4l2_framebuffer), 0,
+ &up_native);
compatible_arg = 0;
break;
case VIDIOC_ENUMSTD:
- err = get_v4l2_standard32(&karg.v2s, up);
+ err = alloc_userspace(sizeof(struct v4l2_standard), 0,
+ &up_native);
+ if (!err)
+ err = get_v4l2_standard32(up_native, up);
compatible_arg = 0;
break;
case VIDIOC_ENUMINPUT:
- err = get_v4l2_input32(&karg.v2i, up);
+ err = alloc_userspace(sizeof(struct v4l2_input), 0, &up_native);
+ if (!err)
+ err = get_v4l2_input32(up_native, up);
compatible_arg = 0;
break;
case VIDIOC_G_EXT_CTRLS:
case VIDIOC_S_EXT_CTRLS:
case VIDIOC_TRY_EXT_CTRLS:
- err = get_v4l2_ext_controls32(&karg.v2ecs, up);
+ err = bufsize_v4l2_ext_controls(up, &aux_space);
+ if (!err)
+ err = alloc_userspace(sizeof(struct v4l2_ext_controls),
+ aux_space, &up_native);
+ if (!err) {
+ aux_buf = up_native + sizeof(struct v4l2_ext_controls);
+ err = get_v4l2_ext_controls32(file, up_native, up,
+ aux_buf, aux_space);
+ }
compatible_arg = 0;
break;
case VIDIOC_DQEVENT:
+ err = alloc_userspace(sizeof(struct v4l2_event), 0, &up_native);
compatible_arg = 0;
break;
}
@@ -985,22 +1181,26 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
if (compatible_arg)
err = native_ioctl(file, cmd, (unsigned long)up);
- else {
- mm_segment_t old_fs = get_fs();
+ else
+ err = native_ioctl(file, cmd, (unsigned long)up_native);
- set_fs(KERNEL_DS);
- err = native_ioctl(file, cmd, (unsigned long)&karg);
- set_fs(old_fs);
- }
+ if (err == -ENOTTY)
+ return err;
- /* Special case: even after an error we need to put the
- results back for these ioctls since the error_idx will
- contain information on which control failed. */
+ /*
+ * Special case: even after an error we need to put the
+ * results back for these ioctls since the error_idx will
+ * contain information on which control failed.
+ */
switch (cmd) {
case VIDIOC_G_EXT_CTRLS:
case VIDIOC_S_EXT_CTRLS:
case VIDIOC_TRY_EXT_CTRLS:
- if (put_v4l2_ext_controls32(&karg.v2ecs, up))
+ if (put_v4l2_ext_controls32(file, up_native, up))
+ err = -EFAULT;
+ break;
+ case VIDIOC_S_EDID:
+ if (put_v4l2_edid32(up_native, up))
err = -EFAULT;
break;
}
@@ -1012,44 +1212,46 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
case VIDIOC_S_OUTPUT:
case VIDIOC_G_INPUT:
case VIDIOC_G_OUTPUT:
- err = put_user(((s32)karg.vi), (s32 __user *)up);
+ if (assign_in_user((compat_uint_t __user *)up,
+ ((unsigned int __user *)up_native)))
+ err = -EFAULT;
break;
case VIDIOC_G_FBUF:
- err = put_v4l2_framebuffer32(&karg.v2fb, up);
+ err = put_v4l2_framebuffer32(up_native, up);
break;
case VIDIOC_DQEVENT:
- err = put_v4l2_event32(&karg.v2ev, up);
+ err = put_v4l2_event32(up_native, up);
break;
case VIDIOC_G_EDID:
- case VIDIOC_S_EDID:
- err = put_v4l2_edid32(&karg.v2edid, up);
+ err = put_v4l2_edid32(up_native, up);
break;
case VIDIOC_G_FMT:
case VIDIOC_S_FMT:
case VIDIOC_TRY_FMT:
- err = put_v4l2_format32(&karg.v2f, up);
+ err = put_v4l2_format32(up_native, up);
break;
case VIDIOC_CREATE_BUFS:
- err = put_v4l2_create32(&karg.v2crt, up);
+ err = put_v4l2_create32(up_native, up);
break;
+ case VIDIOC_PREPARE_BUF:
case VIDIOC_QUERYBUF:
case VIDIOC_QBUF:
case VIDIOC_DQBUF:
- err = put_v4l2_buffer32(&karg.v2b, up);
+ err = put_v4l2_buffer32(up_native, up);
break;
case VIDIOC_ENUMSTD:
- err = put_v4l2_standard32(&karg.v2s, up);
+ err = put_v4l2_standard32(up_native, up);
break;
case VIDIOC_ENUMINPUT:
- err = put_v4l2_input32(&karg.v2i, up);
+ err = put_v4l2_input32(up_native, up);
break;
}
return err;
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index d9e04bc8d0f1..79829f56a816 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -2871,8 +2871,11 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
/* Handles IOCTL */
err = func(file, cmd, parg);
- if (err == -ENOIOCTLCMD)
+ if (err == -ENOTTY || err == -ENOIOCTLCMD) {
err = -ENOTTY;
+ goto out;
+ }
+
if (err == 0) {
if (cmd == VIDIOC_DQBUF)
trace_v4l2_dqbuf(video_devdata(file)->minor, parg);
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index 3dc9ed2e0774..bb1e19f7ed5a 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -205,6 +205,10 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory,
struct vb2_buffer *vb;
int ret;
+ /* Ensure that q->num_buffers+num_buffers is below VB2_MAX_FRAME */
+ num_buffers = min_t(unsigned int, num_buffers,
+ VB2_MAX_FRAME - q->num_buffers);
+
for (buffer = 0; buffer < num_buffers; ++buffer) {
/* Allocate videobuf buffer structures */
vb = kzalloc(q->buf_struct_size, GFP_KERNEL);
diff --git a/drivers/media/v4l2-core/videobuf2-v4l2.c b/drivers/media/v4l2-core/videobuf2-v4l2.c
index 6c441be8f893..b44d0e755675 100644
--- a/drivers/media/v4l2-core/videobuf2-v4l2.c
+++ b/drivers/media/v4l2-core/videobuf2-v4l2.c
@@ -133,23 +133,6 @@ static int __set_timestamp(struct vb2_buffer *vb, const void *pb)
return 0;
};
-static void vb2_warn_zero_bytesused(struct vb2_buffer *vb)
-{
- static bool check_once;
-
- if (check_once)
- return;
-
- check_once = true;
- WARN_ON(1);
-
- pr_warn("use of bytesused == 0 is deprecated and will be removed in the future,\n");
- if (vb->vb2_queue->allow_zero_bytesused)
- pr_warn("use VIDIOC_DECODER_CMD(V4L2_DEC_CMD_STOP) instead.\n");
- else
- pr_warn("use the actual size instead.\n");
-}
-
static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b,
const char *opname)
{
@@ -357,9 +340,6 @@ static int __fill_vb2_buffer(struct vb2_buffer *vb,
struct vb2_plane *pdst = &planes[plane];
struct v4l2_plane *psrc = &b->m.planes[plane];
- if (psrc->bytesused == 0)
- vb2_warn_zero_bytesused(vb);
-
if (vb->vb2_queue->allow_zero_bytesused)
pdst->bytesused = psrc->bytesused;
else
@@ -394,9 +374,6 @@ static int __fill_vb2_buffer(struct vb2_buffer *vb,
}
if (V4L2_TYPE_IS_OUTPUT(b->type)) {
- if (b->bytesused == 0)
- vb2_warn_zero_bytesused(vb);
-
if (vb->vb2_queue->allow_zero_bytesused)
planes[0].bytesused = b->bytesused;
else
@@ -593,6 +570,12 @@ static int vb2_internal_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b,
b->flags & V4L2_BUF_FLAG_LAST)
q->last_buffer_dequeued = true;
+ /*
+ * After calling the VIDIOC_DQBUF V4L2_BUF_FLAG_DONE must be
+ * cleared.
+ */
+ b->flags &= ~V4L2_BUF_FLAG_DONE;
+
return ret;
}
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 5dcc0313c38a..207370d68c17 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -6848,6 +6848,7 @@ mpt_print_ioc_summary(MPT_ADAPTER *ioc, char *buffer, int *size, int len, int sh
*size = y;
}
+#ifdef CONFIG_PROC_FS
static void seq_mpt_print_ioc_summary(MPT_ADAPTER *ioc, struct seq_file *m, int showlan)
{
char expVer[32];
@@ -6879,6 +6880,7 @@ static void seq_mpt_print_ioc_summary(MPT_ADAPTER *ioc, struct seq_file *m, int
seq_putc(m, '\n');
}
+#endif
/**
* mpt_set_taskmgmt_in_progress_flag - set flags associated with task management
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 7ebccfa8072a..cb790b68920f 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -1994,6 +1994,7 @@ static struct scsi_host_template mptsas_driver_template = {
.cmd_per_lun = 7,
.use_clustering = ENABLE_CLUSTERING,
.shost_attrs = mptscsih_host_attrs,
+ .no_write_same = 1,
};
static int mptsas_get_linkerrors(struct sas_phy *phy)
diff --git a/drivers/mfd/palmas.c b/drivers/mfd/palmas.c
index 8f8bacb67a15..a6b5259ffbdd 100644
--- a/drivers/mfd/palmas.c
+++ b/drivers/mfd/palmas.c
@@ -430,6 +430,20 @@ static void palmas_power_off(void)
{
unsigned int addr;
int ret, slave;
+ struct device_node *np = palmas_dev->dev->of_node;
+
+ if (of_property_read_bool(np, "ti,palmas-override-powerhold")) {
+ addr = PALMAS_BASE_TO_REG(PALMAS_PU_PD_OD_BASE,
+ PALMAS_PRIMARY_SECONDARY_PAD2);
+ slave = PALMAS_BASE_TO_SLAVE(PALMAS_PU_PD_OD_BASE);
+
+ ret = regmap_update_bits(palmas_dev->regmap[slave], addr,
+ PALMAS_PRIMARY_SECONDARY_PAD2_GPIO_7_MASK, 0);
+ if (ret)
+ dev_err(palmas_dev->dev,
+ "Unable to write PRIMARY_SECONDARY_PAD2 %d\n",
+ ret);
+ }
if (!palmas_dev)
return;
diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
index cc91f7b3d90c..eb29113e0bac 100644
--- a/drivers/misc/enclosure.c
+++ b/drivers/misc/enclosure.c
@@ -148,7 +148,7 @@ enclosure_register(struct device *dev, const char *name, int components,
for (i = 0; i < components; i++) {
edev->component[i].number = -1;
edev->component[i].slot = -1;
- edev->component[i].power_status = 1;
+ edev->component[i].power_status = -1;
}
mutex_lock(&container_list_lock);
@@ -600,6 +600,11 @@ static ssize_t get_component_power_status(struct device *cdev,
if (edev->cb->get_power_status)
edev->cb->get_power_status(edev, ecomp);
+
+ /* If still uninitialized, the callback failed or does not exist. */
+ if (ecomp->power_status == -1)
+ return (edev->cb->get_power_status) ? -EIO : -ENOTTY;
+
return snprintf(buf, 40, "%s\n", ecomp->power_status ? "on" : "off");
}
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 4ef189a7a2fb..8c04e342e30a 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -571,7 +571,6 @@ static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data)
break;
default:
- dev_err(dev->dev, ": unsupported ioctl %d.\n", cmd);
rets = -ENOIOCTLCMD;
}
diff --git a/drivers/misc/profiler.c b/drivers/misc/profiler.c
index a2887fcefbab..92e6a6651b68 100644
--- a/drivers/misc/profiler.c
+++ b/drivers/misc/profiler.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -169,6 +169,7 @@ static int bw_profiling_get(void __user *argp, struct tz_bw_svc_buf *bwbuf)
const int numberofregs = 3;
struct profiler_bw_cntrs_req cnt_buf;
+ memset(&cnt_buf, 0, sizeof(cnt_buf));
bwgetreq = (struct tz_bw_svc_get_req *) &bwbuf->bwreq;
/* Allocate memory for get buffer */
buf = kzalloc(PAGE_ALIGN(numberofregs * sizeof(uint32_t)), GFP_KERNEL);
diff --git a/drivers/misc/qcom/qdsp6v2/audio_multi_aac.c b/drivers/misc/qcom/qdsp6v2/audio_multi_aac.c
index 42b45ec7d9d9..92faa1b899c9 100644
--- a/drivers/misc/qcom/qdsp6v2/audio_multi_aac.c
+++ b/drivers/misc/qcom/qdsp6v2/audio_multi_aac.c
@@ -2,7 +2,7 @@
*
* Copyright (C) 2008 Google, Inc.
* Copyright (C) 2008 HTC Corporation
- * Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -58,7 +58,7 @@ static long audio_ioctl_shared(struct file *file, unsigned int cmd,
audio->ac->session);
if (audio->feedback == NON_TUNNEL_MODE) {
/* Configure PCM output block */
- rc = q6asm_enc_cfg_blk_pcm(audio->ac,
+ rc = q6asm_enc_cfg_blk_pcm_native(audio->ac,
audio->pcm_cfg.sample_rate,
audio->pcm_cfg.channel_count);
if (rc < 0) {
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index 98657d0a6822..ce47780e5936 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -1,6 +1,6 @@
/*Qualcomm Secure Execution Environment Communicator (QSEECOM) driver
*
- * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1845,7 +1845,7 @@ static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data,
return ret;
}
-static int __qseecom_process_blocked_on_listener_legacy(
+static int __qseecom_process_reentrancy_blocked_on_listener(
struct qseecom_command_scm_resp *resp,
struct qseecom_registered_app_list *ptr_app,
struct qseecom_dev_handle *data)
@@ -1854,8 +1854,11 @@ static int __qseecom_process_blocked_on_listener_legacy(
int ret = 0;
struct qseecom_continue_blocked_request_ireq ireq;
struct qseecom_command_scm_resp continue_resp;
- bool found_app = false;
+ unsigned int session_id;
+ sigset_t new_sigset;
+ sigset_t old_sigset;
unsigned long flags;
+ bool found_app = false;
if (!resp || !data) {
pr_err("invalid resp or data pointer\n");
@@ -1886,137 +1889,81 @@ static int __qseecom_process_blocked_on_listener_legacy(
}
}
- list_ptr = __qseecom_find_svc(resp->data);
- if (!list_ptr) {
- pr_err("Invalid listener ID\n");
- ret = -ENODATA;
- goto exit;
- }
- pr_debug("lsntr %d in_use = %d\n",
- resp->data, list_ptr->listener_in_use);
- ptr_app->blocked_on_listener_id = resp->data;
-
- /* sleep until listener is available */
do {
- qseecom.app_block_ref_cnt++;
- ptr_app->app_blocked = true;
- mutex_unlock(&app_access_lock);
- if (wait_event_freezable(
- list_ptr->listener_block_app_wq,
- !list_ptr->listener_in_use)) {
- pr_err("Interrupted: listener_id %d, app_id %d\n",
- resp->data, ptr_app->app_id);
- ret = -ERESTARTSYS;
+ session_id = resp->resp_type;
+ list_ptr = __qseecom_find_svc(resp->data);
+ if (!list_ptr) {
+ pr_err("Invalid listener ID %d\n", resp->data);
+ ret = -ENODATA;
goto exit;
}
- mutex_lock(&app_access_lock);
- ptr_app->app_blocked = false;
- qseecom.app_block_ref_cnt--;
- } while (list_ptr->listener_in_use);
-
- ptr_app->blocked_on_listener_id = 0;
- /* notify the blocked app that listener is available */
- pr_warn("Lsntr %d is available, unblock app(%d) %s in TZ\n",
- resp->data, data->client.app_id,
- data->client.app_name);
- ireq.qsee_cmd_id = QSEOS_CONTINUE_BLOCKED_REQ_COMMAND;
- ireq.app_or_session_id = data->client.app_id;
- ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
- &ireq, sizeof(ireq),
- &continue_resp, sizeof(continue_resp));
- if (ret) {
- pr_err("scm_call for continue blocked req for app(%d) %s failed, ret %d\n",
- data->client.app_id,
- data->client.app_name, ret);
- goto exit;
- }
- /*
- * After TZ app is unblocked, then continue to next case
- * for incomplete request processing
- */
- resp->result = QSEOS_RESULT_INCOMPLETE;
-exit:
- return ret;
-}
+ ptr_app->blocked_on_listener_id = resp->data;
-static int __qseecom_process_blocked_on_listener_smcinvoke(
- struct qseecom_command_scm_resp *resp, uint32_t app_id)
-{
- struct qseecom_registered_listener_list *list_ptr;
- int ret = 0;
- struct qseecom_continue_blocked_request_ireq ireq;
- struct qseecom_command_scm_resp continue_resp;
- unsigned int session_id;
+ pr_warn("Lsntr %d in_use %d, block session(%d) app(%d)\n",
+ resp->data, list_ptr->listener_in_use,
+ session_id, data->client.app_id);
- if (!resp) {
- pr_err("invalid resp pointer\n");
- ret = -EINVAL;
- goto exit;
- }
- session_id = resp->resp_type;
- list_ptr = __qseecom_find_svc(resp->data);
- if (!list_ptr) {
- pr_err("Invalid listener ID\n");
- ret = -ENODATA;
- goto exit;
- }
- pr_debug("lsntr %d in_use = %d\n",
- resp->data, list_ptr->listener_in_use);
- /* sleep until listener is available */
- do {
- qseecom.app_block_ref_cnt++;
- mutex_unlock(&app_access_lock);
- if (wait_event_freezable(
- list_ptr->listener_block_app_wq,
- !list_ptr->listener_in_use)) {
- pr_err("Interrupted: listener_id %d, session_id %d\n",
- resp->data, session_id);
- ret = -ERESTARTSYS;
- goto exit;
- }
- mutex_lock(&app_access_lock);
- qseecom.app_block_ref_cnt--;
- } while (list_ptr->listener_in_use);
+ /* sleep until listener is available */
+ sigfillset(&new_sigset);
+ sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
+
+ do {
+ qseecom.app_block_ref_cnt++;
+ ptr_app->app_blocked = true;
+ mutex_unlock(&app_access_lock);
+ wait_event_freezable(
+ list_ptr->listener_block_app_wq,
+ !list_ptr->listener_in_use);
+ mutex_lock(&app_access_lock);
+ ptr_app->app_blocked = false;
+ qseecom.app_block_ref_cnt--;
+ } while (list_ptr->listener_in_use);
+
+ sigprocmask(SIG_SETMASK, &old_sigset, NULL);
+
+ ptr_app->blocked_on_listener_id = 0;
+ pr_warn("Lsntr %d is available, unblock session(%d) app(%d)\n",
+ resp->data, session_id, data->client.app_id);
+
+ /* notify TZ that listener is available */
+ ireq.qsee_cmd_id = QSEOS_CONTINUE_BLOCKED_REQ_COMMAND;
+
+ if (qseecom.smcinvoke_support)
+ ireq.app_or_session_id = session_id;
+ else
+ ireq.app_or_session_id = data->client.app_id;
- /* notify TZ that listener is available */
- pr_warn("Lsntr %d is available, unblock session(%d) in TZ\n",
- resp->data, session_id);
- ireq.qsee_cmd_id = QSEOS_CONTINUE_BLOCKED_REQ_COMMAND;
- ireq.app_or_session_id = session_id;
- ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
- &ireq, sizeof(ireq),
- &continue_resp, sizeof(continue_resp));
- if (ret) {
- /* retry with legacy cmd */
- qseecom.smcinvoke_support = false;
- ireq.app_or_session_id = app_id;
ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
- &ireq, sizeof(ireq),
- &continue_resp, sizeof(continue_resp));
- qseecom.smcinvoke_support = true;
- if (ret) {
- pr_err("cont block req for app %d or session %d fail\n",
- app_id, session_id);
- goto exit;
+ &ireq, sizeof(ireq),
+ &continue_resp, sizeof(continue_resp));
+ if (ret && qseecom.smcinvoke_support) {
+ /* retry with legacy cmd */
+ qseecom.smcinvoke_support = false;
+ ireq.app_or_session_id = data->client.app_id;
+ ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+ &ireq, sizeof(ireq),
+ &continue_resp, sizeof(continue_resp));
+ qseecom.smcinvoke_support = true;
+ if (ret) {
+ pr_err("unblock app %d or session %d fail\n",
+ data->client.app_id, session_id);
+ goto exit;
+ }
}
+ resp->result = continue_resp.result;
+ resp->resp_type = continue_resp.resp_type;
+ resp->data = continue_resp.data;
+ pr_debug("unblock resp = %d\n", resp->result);
+ } while (resp->result == QSEOS_RESULT_BLOCKED_ON_LISTENER);
+
+ if (resp->result != QSEOS_RESULT_INCOMPLETE) {
+ pr_err("Unexpected unblock resp %d\n", resp->result);
+ ret = -EINVAL;
}
- resp->result = QSEOS_RESULT_INCOMPLETE;
exit:
return ret;
}
-static int __qseecom_process_reentrancy_blocked_on_listener(
- struct qseecom_command_scm_resp *resp,
- struct qseecom_registered_app_list *ptr_app,
- struct qseecom_dev_handle *data)
-{
- if (!qseecom.smcinvoke_support)
- return __qseecom_process_blocked_on_listener_legacy(
- resp, ptr_app, data);
- else
- return __qseecom_process_blocked_on_listener_smcinvoke(
- resp, data->client.app_id);
-}
static int __qseecom_reentrancy_process_incomplete_cmd(
struct qseecom_dev_handle *data,
struct qseecom_command_scm_resp *resp)
diff --git a/drivers/misc/uid_sys_stats.c b/drivers/misc/uid_sys_stats.c
index 0e6ab4e7c686..c52c8ccc90b7 100644
--- a/drivers/misc/uid_sys_stats.c
+++ b/drivers/misc/uid_sys_stats.c
@@ -14,6 +14,7 @@
*/
#include <linux/atomic.h>
+#include <linux/cpufreq_times.h>
#include <linux/err.h>
#include <linux/hashtable.h>
#include <linux/init.h>
@@ -344,13 +345,13 @@ static int uid_cputime_show(struct seq_file *m, void *v)
uid_entry->active_utime = 0;
}
- read_lock(&tasklist_lock);
+ rcu_read_lock();
do_each_thread(temp, task) {
uid = from_kuid_munged(user_ns, task_uid(task));
if (!uid_entry || uid_entry->uid != uid)
uid_entry = find_or_register_uid(uid);
if (!uid_entry) {
- read_unlock(&tasklist_lock);
+ rcu_read_unlock();
rt_mutex_unlock(&uid_lock);
pr_err("%s: failed to find the uid_entry for uid %d\n",
__func__, uid);
@@ -360,7 +361,7 @@ static int uid_cputime_show(struct seq_file *m, void *v)
uid_entry->active_utime += utime;
uid_entry->active_stime += stime;
} while_each_thread(temp, task);
- read_unlock(&tasklist_lock);
+ rcu_read_unlock();
hash_for_each(hash_table, bkt, uid_entry, hash) {
cputime_t total_utime = uid_entry->utime +
@@ -421,6 +422,10 @@ static ssize_t uid_remove_write(struct file *file,
kstrtol(end_uid, 10, &uid_end) != 0) {
return -EINVAL;
}
+
+ /* Also remove uids from /proc/uid_time_in_state */
+ cpufreq_task_times_remove_uids(uid_start, uid_end);
+
rt_mutex_lock(&uid_lock);
for (; uid_start <= uid_end; uid_start++) {
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
index f42d9c4e4561..cc277f7849b0 100644
--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
@@ -298,8 +298,11 @@ static void *qp_alloc_queue(u64 size, u32 flags)
size_t pas_size;
size_t vas_size;
size_t queue_size = sizeof(*queue) + sizeof(*queue->kernel_if);
- const u64 num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
+ u64 num_pages;
+ if (size > SIZE_MAX - PAGE_SIZE)
+ return NULL;
+ num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
if (num_pages >
(SIZE_MAX - queue_size) /
(sizeof(*queue->kernel_if->u.g.pas) +
@@ -624,9 +627,12 @@ static struct vmci_queue *qp_host_alloc_queue(u64 size)
{
struct vmci_queue *queue;
size_t queue_page_size;
- const u64 num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
+ u64 num_pages;
const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if));
+ if (size > SIZE_MAX - PAGE_SIZE)
+ return NULL;
+ num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
if (num_pages > (SIZE_MAX - queue_size) /
sizeof(*queue->kernel_if->u.h.page))
return NULL;
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 7d2ceda7f80e..13e0df67d3b7 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -1048,6 +1048,12 @@ static int mmc_blk_ioctl_rpmb_cmd(struct block_device *bdev,
goto idata_free;
}
+ /*
+ * Ensure rpmb_req_pending flag is synchronized between multiple
+ * entities which may use rpmb ioclts with a lock.
+ */
+ mutex_lock(&card->host->rpmb_req_mutex);
+ atomic_set(&card->host->rpmb_req_pending, 1);
mmc_get_card(card);
if (mmc_card_doing_bkops(card)) {
@@ -1163,6 +1169,9 @@ static int mmc_blk_ioctl_rpmb_cmd(struct block_device *bdev,
cmd_rel_host:
mmc_put_card(card);
+ atomic_set(&card->host->rpmb_req_pending, 0);
+ mutex_unlock(&card->host->rpmb_req_mutex);
+
idata_free:
for (i = 0; i < MMC_IOC_MAX_RPMB_CMD; i++) {
@@ -1292,9 +1301,26 @@ static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev,
mmc_get_card(card);
+ if (mmc_card_cmdq(card)) {
+ err = mmc_cmdq_halt(card->host, true);
+ if (err) {
+ pr_err("%s: halt failed while doing %s err (%d)\n",
+ mmc_hostname(card->host),
+ __func__, err);
+ mmc_put_card(card);
+ goto cmd_done;
+ }
+ }
+
for (i = 0; i < num_of_cmds && !ioc_err; i++)
ioc_err = __mmc_blk_ioctl_cmd(card, md, idata[i]);
+ if (mmc_card_cmdq(card)) {
+ if (mmc_cmdq_halt(card->host, false))
+ pr_err("%s: %s: cmdq unhalt failed\n",
+ mmc_hostname(card->host), __func__);
+ }
+
mmc_put_card(card);
/* copy to user if data and response */
@@ -3173,11 +3199,11 @@ static struct mmc_cmdq_req *mmc_blk_cmdq_rw_prep(
static void mmc_blk_cmdq_requeue_rw_rq(struct mmc_queue *mq,
struct request *req)
{
- struct mmc_card *card = mq->card;
- struct mmc_host *host = card->host;
+ struct request_queue *q = req->q;
- blk_requeue_request(req->q, req);
- mmc_put_card(host->card);
+ spin_lock_irq(q->queue_lock);
+ blk_requeue_request(q, req);
+ spin_unlock_irq(q->queue_lock);
}
static int mmc_blk_cmdq_issue_rw_rq(struct mmc_queue *mq, struct request *req)
@@ -3605,7 +3631,7 @@ void mmc_blk_cmdq_complete_rq(struct request *rq)
* or disable state so cannot receive any completion of
* other requests.
*/
- BUG_ON(test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state));
+ WARN_ON(test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state));
/* clear pending request */
BUG_ON(!test_and_clear_bit(cmdq_req->tag,
@@ -3639,7 +3665,7 @@ void mmc_blk_cmdq_complete_rq(struct request *rq)
out:
mmc_cmdq_clk_scaling_stop_busy(host, true, is_dcmd);
- if (!test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state)) {
+ if (!(err || cmdq_req->resp_err)) {
mmc_host_clk_release(host);
wake_up(&ctx_info->wait);
mmc_put_card(host->card);
@@ -4065,9 +4091,16 @@ static int mmc_blk_cmdq_issue_rq(struct mmc_queue *mq, struct request *req)
* If issuing of the request fails with eitehr EBUSY or
* EAGAIN error, re-queue the request.
* This case would occur with ICE calls.
+ * For request which gets completed successfully or
+ * errored out, we release host lock in completion or
+ * error handling softirq context. But here the request
+ * is neither completed nor erred-out, so release the
+ * host lock explicitly.
*/
- if (ret == -EBUSY || ret == -EAGAIN)
+ if (ret == -EBUSY || ret == -EAGAIN) {
mmc_blk_cmdq_requeue_rw_rq(mq, req);
+ mmc_put_card(host->card);
+ }
}
}
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index ccf22eb5bdc0..397bbd09034d 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -95,7 +95,9 @@ static inline void mmc_cmdq_ready_wait(struct mmc_host *host,
* be any other direct command active.
* 3. cmdq state should be unhalted.
* 4. cmdq state shouldn't be in error state.
- * 5. free tag available to process the new request.
+ * 5. There is no outstanding RPMB request pending.
+ * 6. free tag available to process the new request.
+ * (This must be the last condtion to check)
*/
wait_event(ctx->wait, kthread_should_stop()
|| (mmc_peek_request(mq) &&
@@ -106,6 +108,7 @@ static inline void mmc_cmdq_ready_wait(struct mmc_host *host,
&& !(!host->card->part_curr && mmc_host_cq_disable(host) &&
!mmc_card_suspended(host->card))
&& !test_bit(CMDQ_STATE_ERR, &ctx->curr_state)
+ && !atomic_read(&host->rpmb_req_pending)
&& !mmc_check_blk_queue_start_tag(q, mq->cmdq_req_peeked)));
}
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index 548a9e8b72ae..0b527a708bd7 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -373,12 +373,13 @@ int mmc_add_card(struct mmc_card *card)
mmc_card_ddr52(card) ? "DDR " : "",
type);
} else {
- pr_info("%s: new %s%s%s%s%s card at address %04x\n",
+ pr_info("%s: new %s%s%s%s%s%s card at address %04x\n",
mmc_hostname(card->host),
mmc_card_uhs(card) ? "ultra high speed " :
(mmc_card_hs(card) ? "high speed " : ""),
mmc_card_hs400(card) ? "HS400 " :
(mmc_card_hs200(card) ? "HS200 " : ""),
+ mmc_card_hs400es(card) ? "Enhanced strobe " : "",
mmc_card_ddr52(card) ? "DDR " : "",
uhs_bus_speed_mode, type, card->rca);
}
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index d1d045f04368..547d18c9feef 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -1170,6 +1170,46 @@ static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
return 0;
}
+static int mmc_cmdq_check_retune(struct mmc_host *host)
+{
+ bool cmdq_mode;
+ int err = 0;
+
+ if (!host->need_retune || host->doing_retune || !host->card ||
+ mmc_card_hs400es(host->card) ||
+ (host->ios.clock <= MMC_HIGH_DDR_MAX_DTR))
+ return 0;
+
+ cmdq_mode = mmc_card_cmdq(host->card);
+ if (cmdq_mode) {
+ err = mmc_cmdq_halt(host, true);
+ if (err) {
+ pr_err("%s: %s: failed halting queue (%d)\n",
+ mmc_hostname(host), __func__, err);
+ host->cmdq_ops->dumpstate(host);
+ goto halt_failed;
+ }
+ }
+
+ mmc_retune_hold(host);
+ err = mmc_retune(host);
+ mmc_retune_release(host);
+
+ if (cmdq_mode) {
+ if (mmc_cmdq_halt(host, false)) {
+ pr_err("%s: %s: cmdq unhalt failed\n",
+ mmc_hostname(host), __func__);
+ host->cmdq_ops->dumpstate(host);
+ }
+ }
+
+halt_failed:
+ pr_debug("%s: %s: Retuning done err: %d\n",
+ mmc_hostname(host), __func__, err);
+
+ return err;
+}
+
static int mmc_start_cmdq_request(struct mmc_host *host,
struct mmc_request *mrq)
{
@@ -1196,6 +1236,7 @@ static int mmc_start_cmdq_request(struct mmc_host *host,
}
mmc_host_clk_hold(host);
+ mmc_cmdq_check_retune(host);
if (likely(host->cmdq_ops->request)) {
ret = host->cmdq_ops->request(host, mrq);
} else {
@@ -1558,7 +1599,8 @@ static void mmc_wait_for_req_done(struct mmc_host *host,
mmc_card_removed(host->card)) {
if (cmd->error && !cmd->retries &&
cmd->opcode != MMC_SEND_STATUS &&
- cmd->opcode != MMC_SEND_TUNING_BLOCK)
+ cmd->opcode != MMC_SEND_TUNING_BLOCK &&
+ cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200)
mmc_recovery_fallback_lower_speed(host);
break;
}
@@ -2969,8 +3011,16 @@ int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, u32 ocr)
*/
mmc_host_clk_hold(host);
err = mmc_wait_for_cmd(host, &cmd, 0);
- if (err)
- goto err_command;
+ if (err) {
+ if (err == -ETIMEDOUT) {
+ pr_debug("%s: voltage switching failed with err %d\n",
+ mmc_hostname(host), err);
+ err = -EAGAIN;
+ goto power_cycle;
+ } else {
+ goto err_command;
+ }
+ }
if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR)) {
err = -EIO;
@@ -4493,6 +4543,14 @@ int mmc_pm_notify(struct notifier_block *notify_block,
if (!err)
break;
+ if (!mmc_card_is_removable(host)) {
+ dev_warn(mmc_dev(host),
+ "pre_suspend failed for non-removable host: "
+ "%d\n", err);
+ /* Avoid removing non-removable hosts */
+ break;
+ }
+
/* Calling bus_ops->remove() with a claimed host can deadlock */
host->bus_ops->remove(host);
mmc_claim_host(host);
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 88699f852aa2..b3b9d78f789a 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -388,7 +388,8 @@ int mmc_retune(struct mmc_host *host)
else
return 0;
- if (!host->need_retune || host->doing_retune || !host->card)
+ if (!host->need_retune || host->doing_retune || !host->card ||
+ mmc_card_hs400es(host->card))
return 0;
host->need_retune = 0;
@@ -635,6 +636,8 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
#endif
setup_timer(&host->retune_timer, mmc_retune_timer, (unsigned long)host);
+ mutex_init(&host->rpmb_req_mutex);
+
/*
* By default, hosts do not support SGIO or large requests.
* They have to set these according to their abilities.
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 6f4f81a370d8..c8f85b31e2ac 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1275,6 +1275,8 @@ static int mmc_select_hs400(struct mmc_card *card)
if (card->ext_csd.strobe_support && host->ops->enhanced_strobe) {
mmc_host_clk_hold(host);
err = host->ops->enhanced_strobe(host);
+ if (!err)
+ host->ios.enhanced_strobe = true;
mmc_host_clk_release(host);
} else if ((host->caps2 & MMC_CAP2_HS400_POST_TUNING) &&
host->ops->execute_tuning) {
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 65d7dbe1dea4..2aa04b6bdfb3 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -335,6 +335,7 @@ static int mmc_read_switch(struct mmc_card *card)
card->sw_caps.sd3_bus_mode = status[13];
/* Driver Strengths supported by the card */
card->sw_caps.sd3_drv_type = status[9];
+ card->sw_caps.sd3_curr_limit = status[7] | status[6] << 8;
}
out:
@@ -557,14 +558,25 @@ static int sd_set_current_limit(struct mmc_card *card, u8 *status)
* when we set current limit to 200ma, the card will draw 200ma, and
* when we set current limit to 400/600/800ma, the card will draw its
* maximum 300ma from the host.
+ *
+ * The above is incorrect: if we try to set a current limit that is
+ * not supported by the card, the card can rightfully error out the
+ * attempt, and remain at the default current limit. This results
+ * in a 300mA card being limited to 200mA even though the host
+ * supports 800mA. Failures seen with SanDisk 8GB UHS cards with
+ * an iMX6 host. --rmk
*/
- if (max_current >= 800)
+ if (max_current >= 800 &&
+ card->sw_caps.sd3_curr_limit & SD_MAX_CURRENT_800)
current_limit = SD_SET_CURRENT_LIMIT_800;
- else if (max_current >= 600)
+ else if (max_current >= 600 &&
+ card->sw_caps.sd3_curr_limit & SD_MAX_CURRENT_600)
current_limit = SD_SET_CURRENT_LIMIT_600;
- else if (max_current >= 400)
+ else if (max_current >= 400 &&
+ card->sw_caps.sd3_curr_limit & SD_MAX_CURRENT_400)
current_limit = SD_SET_CURRENT_LIMIT_400;
- else if (max_current >= 200)
+ else if (max_current >= 200 &&
+ card->sw_caps.sd3_curr_limit & SD_MAX_CURRENT_200)
current_limit = SD_SET_CURRENT_LIMIT_200;
if (current_limit != SD_SET_CURRENT_NO_CHANGE) {
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index fb204ee6ff89..581f5d0271f4 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -619,6 +619,7 @@ static int dw_mci_idmac_init(struct dw_mci *host)
(sizeof(struct idmac_desc_64addr) *
(i + 1))) >> 32;
/* Initialize reserved and buffer size fields to "0" */
+ p->des0 = 0;
p->des1 = 0;
p->des2 = 0;
p->des3 = 0;
@@ -640,6 +641,7 @@ static int dw_mci_idmac_init(struct dw_mci *host)
i++, p++) {
p->des3 = cpu_to_le32(host->sg_dma +
(sizeof(struct idmac_desc) * (i + 1)));
+ p->des0 = 0;
p->des1 = 0;
}
@@ -2807,8 +2809,8 @@ static bool dw_mci_reset(struct dw_mci *host)
}
if (host->use_dma == TRANS_MODE_IDMAC)
- /* It is also recommended that we reset and reprogram idmac */
- dw_mci_idmac_reset(host);
+ /* It is also required that we reinit idmac */
+ dw_mci_idmac_init(host);
ret = true;
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index 76e8bce6f46e..ad572a0f2124 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -368,9 +368,9 @@ static void jz4740_mmc_set_irq_enabled(struct jz4740_mmc_host *host,
host->irq_mask &= ~irq;
else
host->irq_mask |= irq;
- spin_unlock_irqrestore(&host->lock, flags);
writew(host->irq_mask, host->base + JZ_REG_MMC_IMASK);
+ spin_unlock_irqrestore(&host->lock, flags);
}
static void jz4740_mmc_clock_enable(struct jz4740_mmc_host *host,
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 7fb0753abe30..6b814d7d6560 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -1776,8 +1776,8 @@ static int omap_hsmmc_configure_wake_irq(struct omap_hsmmc_host *host)
*/
if (host->pdata->controller_flags & OMAP_HSMMC_SWAKEUP_MISSING) {
struct pinctrl *p = devm_pinctrl_get(host->dev);
- if (!p) {
- ret = -ENODEV;
+ if (IS_ERR(p)) {
+ ret = PTR_ERR(p);
goto err_free_irq;
}
if (IS_ERR(pinctrl_lookup_state(p, PINCTRL_STATE_DEFAULT))) {
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index aea00ce708b6..81a781c1f9d6 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -39,6 +39,7 @@
#include <linux/msm-bus.h>
#include <linux/pm_runtime.h>
#include <trace/events/mmc.h>
+#include <soc/qcom/boot_stats.h>
#include "sdhci-msm.h"
#include "sdhci-msm-ice.h"
@@ -801,19 +802,23 @@ static int msm_init_cm_dll(struct sdhci_host *host)
| CORE_CK_OUT_EN), host->ioaddr +
msm_host_offset->CORE_DLL_CONFIG);
- wait_cnt = 50;
- /* Wait until DLL_LOCK bit of DLL_STATUS register becomes '1' */
- while (!(readl_relaxed(host->ioaddr +
- msm_host_offset->CORE_DLL_STATUS) & CORE_DLL_LOCK)) {
- /* max. wait for 50us sec for LOCK bit to be set */
- if (--wait_cnt == 0) {
- pr_err("%s: %s: DLL failed to LOCK\n",
- mmc_hostname(mmc), __func__);
- rc = -ETIMEDOUT;
- goto out;
+ /* For hs400es mode, no need to wait for core dll lock */
+ if (!(msm_host->enhanced_strobe &&
+ mmc_card_strobe(msm_host->mmc->card))) {
+ wait_cnt = 50;
+ /* Wait until DLL_LOCK bit of DLL_STATUS register becomes '1' */
+ while (!(readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_STATUS) & CORE_DLL_LOCK)) {
+ /* max. wait for 50us sec for LOCK bit to be set */
+ if (--wait_cnt == 0) {
+ pr_err("%s: %s: DLL failed to LOCK\n",
+ mmc_hostname(mmc), __func__);
+ rc = -ETIMEDOUT;
+ goto out;
+ }
+ /* wait for 1us before polling again */
+ udelay(1);
}
- /* wait for 1us before polling again */
- udelay(1);
}
out:
@@ -3166,7 +3171,10 @@ static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
| CORE_HC_SELECT_IN_EN), host->ioaddr +
msm_host_offset->CORE_VENDOR_SPEC);
}
- if (!host->mmc->ios.old_rate && !msm_host->use_cdclp533) {
+ /* No need to check for DLL lock for HS400es mode */
+ if (!host->mmc->ios.old_rate && !msm_host->use_cdclp533 &&
+ !((card && mmc_card_strobe(card) &&
+ msm_host->enhanced_strobe))) {
/*
* Poll on DLL_LOCK and DDR_DLL_LOCK bits in
* CORE_DLL_STATUS to be set. This should get set
@@ -4250,6 +4258,7 @@ static int sdhci_msm_probe(struct platform_device *pdev)
void __iomem *tlmm_mem;
unsigned long flags;
bool force_probe;
+ char boot_marker[40];
pr_debug("%s: Enter %s\n", dev_name(&pdev->dev), __func__);
msm_host = devm_kzalloc(&pdev->dev, sizeof(struct sdhci_msm_host),
@@ -4274,6 +4283,10 @@ static int sdhci_msm_probe(struct platform_device *pdev)
goto out_host_free;
}
+ snprintf(boot_marker, sizeof(boot_marker),
+ "M - DRIVER %s Init", mmc_hostname(host->mmc));
+ place_marker(boot_marker);
+
pltfm_host = sdhci_priv(host);
pltfm_host->priv = msm_host;
msm_host->mmc = host->mmc;
@@ -4747,6 +4760,10 @@ static int sdhci_msm_probe(struct platform_device *pdev)
if (sdhci_msm_is_bootdevice(&pdev->dev))
mmc_flush_detect_work(host->mmc);
+ snprintf(boot_marker, sizeof(boot_marker),
+ "M - DRIVER %s Ready", mmc_hostname(host->mmc));
+ place_marker(boot_marker);
+
/* Successful initialization */
goto out;
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index 83b1226471c1..ac66c61d9433 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -418,6 +418,20 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
if (esdhc->vendor_ver < VENDOR_V_23)
pre_div = 2;
+ /*
+ * Limit SD clock to 167MHz for ls1046a according to its datasheet
+ */
+ if (clock > 167000000 &&
+ of_find_compatible_node(NULL, NULL, "fsl,ls1046a-esdhc"))
+ clock = 167000000;
+
+ /*
+ * Limit SD clock to 125MHz for ls1012a according to its datasheet
+ */
+ if (clock > 125000000 &&
+ of_find_compatible_node(NULL, NULL, "fsl,ls1012a-esdhc"))
+ clock = 125000000;
+
/* Workaround to reduce the clock frequency for p1010 esdhc */
if (of_find_compatible_node(NULL, NULL, "fsl,p1010-esdhc")) {
if (clock > 20000000)
diff --git a/drivers/mtd/chips/Kconfig b/drivers/mtd/chips/Kconfig
index 8a25adced79f..bbfa1f129266 100644
--- a/drivers/mtd/chips/Kconfig
+++ b/drivers/mtd/chips/Kconfig
@@ -67,6 +67,10 @@ endchoice
config MTD_CFI_GEOMETRY
bool "Specific CFI Flash geometry selection"
depends on MTD_CFI_ADV_OPTIONS
+ select MTD_MAP_BANK_WIDTH_1 if !(MTD_MAP_BANK_WIDTH_2 || \
+ MTD_MAP_BANK_WIDTH_4 || MTD_MAP_BANK_WIDTH_8 || \
+ MTD_MAP_BANK_WIDTH_16 || MTD_MAP_BANK_WIDTH_32)
+ select MTD_CFI_I1 if !(MTD_CFI_I2 || MTD_CFI_I4 || MTD_CFI_I8)
help
This option does not affect the code directly, but will enable
some other configuration options which would allow you to reduce
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index 286b97a304cf..4509ee0b294a 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -45,6 +45,7 @@
#define I82802AB 0x00ad
#define I82802AC 0x00ac
#define PF38F4476 0x881c
+#define M28F00AP30 0x8963
/* STMicroelectronics chips */
#define M50LPW080 0x002F
#define M50FLW080A 0x0080
@@ -375,6 +376,17 @@ static void cfi_fixup_major_minor(struct cfi_private *cfi,
extp->MinorVersion = '1';
}
+static int cfi_is_micron_28F00AP30(struct cfi_private *cfi, struct flchip *chip)
+{
+ /*
+ * Micron(was Numonyx) 1Gbit bottom boot are buggy w.r.t
+ * Erase Supend for their small Erase Blocks(0x8000)
+ */
+ if (cfi->mfr == CFI_MFR_INTEL && cfi->id == M28F00AP30)
+ return 1;
+ return 0;
+}
+
static inline struct cfi_pri_intelext *
read_pri_intelext(struct map_info *map, __u16 adr)
{
@@ -825,21 +837,30 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
(mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
goto sleep;
+ /* Do not allow suspend iff read/write to EB address */
+ if ((adr & chip->in_progress_block_mask) ==
+ chip->in_progress_block_addr)
+ goto sleep;
+
+ /* do not suspend small EBs, buggy Micron Chips */
+ if (cfi_is_micron_28F00AP30(cfi, chip) &&
+ (chip->in_progress_block_mask == ~(0x8000-1)))
+ goto sleep;
/* Erase suspend */
- map_write(map, CMD(0xB0), adr);
+ map_write(map, CMD(0xB0), chip->in_progress_block_addr);
/* If the flash has finished erasing, then 'erase suspend'
* appears to make some (28F320) flash devices switch to
* 'read' mode. Make sure that we switch to 'read status'
* mode so we get the right data. --rmk
*/
- map_write(map, CMD(0x70), adr);
+ map_write(map, CMD(0x70), chip->in_progress_block_addr);
chip->oldstate = FL_ERASING;
chip->state = FL_ERASE_SUSPENDING;
chip->erase_suspended = 1;
for (;;) {
- status = map_read(map, adr);
+ status = map_read(map, chip->in_progress_block_addr);
if (map_word_andequal(map, status, status_OK, status_OK))
break;
@@ -1035,8 +1056,8 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
sending the 0x70 (Read Status) command to an erasing
chip and expecting it to be ignored, that's what we
do. */
- map_write(map, CMD(0xd0), adr);
- map_write(map, CMD(0x70), adr);
+ map_write(map, CMD(0xd0), chip->in_progress_block_addr);
+ map_write(map, CMD(0x70), chip->in_progress_block_addr);
chip->oldstate = FL_READY;
chip->state = FL_ERASING;
break;
@@ -1927,6 +1948,8 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
map_write(map, CMD(0xD0), adr);
chip->state = FL_ERASING;
chip->erase_suspended = 0;
+ chip->in_progress_block_addr = adr;
+ chip->in_progress_block_mask = ~(len - 1);
ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
adr, len,
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index c3624eb571d1..31448a2b39ae 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -814,9 +814,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
(mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
goto sleep;
- /* We could check to see if we're trying to access the sector
- * that is currently being erased. However, no user will try
- * anything like that so we just wait for the timeout. */
+ /* Do not allow suspend iff read/write to EB address */
+ if ((adr & chip->in_progress_block_mask) ==
+ chip->in_progress_block_addr)
+ goto sleep;
/* Erase suspend */
/* It's harmless to issue the Erase-Suspend and Erase-Resume
@@ -2265,6 +2266,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
chip->state = FL_ERASING;
chip->erase_suspended = 0;
chip->in_progress_block_addr = adr;
+ chip->in_progress_block_mask = ~(map->size - 1);
INVALIDATE_CACHE_UDELAY(map, chip,
adr, map->size,
@@ -2354,6 +2356,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
chip->state = FL_ERASING;
chip->erase_suspended = 0;
chip->in_progress_block_addr = adr;
+ chip->in_progress_block_mask = ~(len - 1);
INVALIDATE_CACHE_UDELAY(map, chip,
adr, len,
diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c
index 7c0b27d132b1..b479bd81120b 100644
--- a/drivers/mtd/chips/jedec_probe.c
+++ b/drivers/mtd/chips/jedec_probe.c
@@ -1889,6 +1889,8 @@ static inline u32 jedec_read_mfr(struct map_info *map, uint32_t base,
do {
uint32_t ofs = cfi_build_cmd_addr(0 + (bank << 8), map, cfi);
mask = (1 << (cfi->device_type * 8)) - 1;
+ if (ofs >= map->size)
+ return 0;
result = map_read(map, base + ofs);
bank++;
} while ((result.x[0] & mask) == CFI_MFR_CONTINUATION);
diff --git a/drivers/mtd/maps/ck804xrom.c b/drivers/mtd/maps/ck804xrom.c
index 0455166f05fa..4f206a99164c 100644
--- a/drivers/mtd/maps/ck804xrom.c
+++ b/drivers/mtd/maps/ck804xrom.c
@@ -112,8 +112,8 @@ static void ck804xrom_cleanup(struct ck804xrom_window *window)
}
-static int ck804xrom_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int __init ck804xrom_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
u8 byte;
diff --git a/drivers/mtd/maps/esb2rom.c b/drivers/mtd/maps/esb2rom.c
index 76ed651b515b..9646b0766ce0 100644
--- a/drivers/mtd/maps/esb2rom.c
+++ b/drivers/mtd/maps/esb2rom.c
@@ -144,8 +144,8 @@ static void esb2rom_cleanup(struct esb2rom_window *window)
pci_dev_put(window->pdev);
}
-static int esb2rom_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int __init esb2rom_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
struct esb2rom_window *window = &esb2rom_window;
diff --git a/drivers/mtd/maps/ichxrom.c b/drivers/mtd/maps/ichxrom.c
index 8636bba42200..976d42f63aef 100644
--- a/drivers/mtd/maps/ichxrom.c
+++ b/drivers/mtd/maps/ichxrom.c
@@ -57,10 +57,12 @@ static void ichxrom_cleanup(struct ichxrom_window *window)
{
struct ichxrom_map_info *map, *scratch;
u16 word;
+ int ret;
/* Disable writes through the rom window */
- pci_read_config_word(window->pdev, BIOS_CNTL, &word);
- pci_write_config_word(window->pdev, BIOS_CNTL, word & ~1);
+ ret = pci_read_config_word(window->pdev, BIOS_CNTL, &word);
+ if (!ret)
+ pci_write_config_word(window->pdev, BIOS_CNTL, word & ~1);
pci_dev_put(window->pdev);
/* Free all of the mtd devices */
@@ -84,8 +86,8 @@ static void ichxrom_cleanup(struct ichxrom_window *window)
}
-static int ichxrom_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int __init ichxrom_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
struct ichxrom_window *window = &ichxrom_window;
diff --git a/drivers/mtd/nand/brcmnand/brcmnand.c b/drivers/mtd/nand/brcmnand/brcmnand.c
index 4a07ba1195b5..d125d19a35e4 100644
--- a/drivers/mtd/nand/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/brcmnand/brcmnand.c
@@ -1922,16 +1922,9 @@ static int brcmnand_setup_dev(struct brcmnand_host *host)
tmp &= ~ACC_CONTROL_PARTIAL_PAGE;
tmp &= ~ACC_CONTROL_RD_ERASED;
tmp &= ~ACC_CONTROL_FAST_PGM_RDIN;
- if (ctrl->features & BRCMNAND_HAS_PREFETCH) {
- /*
- * FIXME: Flash DMA + prefetch may see spurious erased-page ECC
- * errors
- */
- if (has_flash_dma(ctrl))
- tmp &= ~ACC_CONTROL_PREFETCH;
- else
- tmp |= ACC_CONTROL_PREFETCH;
- }
+ if (ctrl->features & BRCMNAND_HAS_PREFETCH)
+ tmp &= ~ACC_CONTROL_PREFETCH;
+
nand_writereg(ctrl, offs, tmp);
return 0;
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
index 7f4ac8c19001..5e3fa5861039 100644
--- a/drivers/mtd/nand/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/fsl_ifc_nand.c
@@ -726,6 +726,7 @@ static int fsl_ifc_wait(struct mtd_info *mtd, struct nand_chip *chip)
struct fsl_ifc_ctrl *ctrl = priv->ctrl;
struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
u32 nand_fsr;
+ int status;
/* Use READ_STATUS command, but wait for the device to be ready */
ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
@@ -740,12 +741,12 @@ static int fsl_ifc_wait(struct mtd_info *mtd, struct nand_chip *chip)
fsl_ifc_run_command(mtd);
nand_fsr = ifc_in32(&ifc->ifc_nand.nand_fsr);
-
+ status = nand_fsr >> 24;
/*
* The chip always seems to report that it is
* write-protected, even when it is not.
*/
- return nand_fsr | NAND_STATUS_WP;
+ return status | NAND_STATUS_WP;
}
static int fsl_ifc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index 2064adac1d17..40a335c6b792 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -1029,24 +1029,97 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
return ret;
}
- /* handle the block mark swapping */
- block_mark_swapping(this, payload_virt, auxiliary_virt);
-
/* Loop over status bytes, accumulating ECC status. */
status = auxiliary_virt + nfc_geo->auxiliary_status_offset;
+ read_page_swap_end(this, buf, nfc_geo->payload_size,
+ this->payload_virt, this->payload_phys,
+ nfc_geo->payload_size,
+ payload_virt, payload_phys);
+
for (i = 0; i < nfc_geo->ecc_chunk_count; i++, status++) {
if ((*status == STATUS_GOOD) || (*status == STATUS_ERASED))
continue;
if (*status == STATUS_UNCORRECTABLE) {
+ int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len;
+ u8 *eccbuf = this->raw_buffer;
+ int offset, bitoffset;
+ int eccbytes;
+ int flips;
+
+ /* Read ECC bytes into our internal raw_buffer */
+ offset = nfc_geo->metadata_size * 8;
+ offset += ((8 * nfc_geo->ecc_chunk_size) + eccbits) * (i + 1);
+ offset -= eccbits;
+ bitoffset = offset % 8;
+ eccbytes = DIV_ROUND_UP(offset + eccbits, 8);
+ offset /= 8;
+ eccbytes -= offset;
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, offset, -1);
+ chip->read_buf(mtd, eccbuf, eccbytes);
+
+ /*
+ * ECC data are not byte aligned and we may have
+ * in-band data in the first and last byte of
+ * eccbuf. Set non-eccbits to one so that
+ * nand_check_erased_ecc_chunk() does not count them
+ * as bitflips.
+ */
+ if (bitoffset)
+ eccbuf[0] |= GENMASK(bitoffset - 1, 0);
+
+ bitoffset = (bitoffset + eccbits) % 8;
+ if (bitoffset)
+ eccbuf[eccbytes - 1] |= GENMASK(7, bitoffset);
+
+ /*
+ * The ECC hardware has an uncorrectable ECC status
+ * code in case we have bitflips in an erased page. As
+ * nothing was written into this subpage the ECC is
+ * obviously wrong and we can not trust it. We assume
+ * at this point that we are reading an erased page and
+ * try to correct the bitflips in buffer up to
+ * ecc_strength bitflips. If this is a page with random
+ * data, we exceed this number of bitflips and have a
+ * ECC failure. Otherwise we use the corrected buffer.
+ */
+ if (i == 0) {
+ /* The first block includes metadata */
+ flips = nand_check_erased_ecc_chunk(
+ buf + i * nfc_geo->ecc_chunk_size,
+ nfc_geo->ecc_chunk_size,
+ eccbuf, eccbytes,
+ auxiliary_virt,
+ nfc_geo->metadata_size,
+ nfc_geo->ecc_strength);
+ } else {
+ flips = nand_check_erased_ecc_chunk(
+ buf + i * nfc_geo->ecc_chunk_size,
+ nfc_geo->ecc_chunk_size,
+ eccbuf, eccbytes,
+ NULL, 0,
+ nfc_geo->ecc_strength);
+ }
+
+ if (flips > 0) {
+ max_bitflips = max_t(unsigned int, max_bitflips,
+ flips);
+ mtd->ecc_stats.corrected += flips;
+ continue;
+ }
+
mtd->ecc_stats.failed++;
continue;
}
+
mtd->ecc_stats.corrected += *status;
max_bitflips = max_t(unsigned int, max_bitflips, *status);
}
+ /* handle the block mark swapping */
+ block_mark_swapping(this, buf, auxiliary_virt);
+
if (oob_required) {
/*
* It's time to deliver the OOB bytes. See gpmi_ecc_read_oob()
@@ -1062,11 +1135,6 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
chip->oob_poi[0] = ((uint8_t *) auxiliary_virt)[0];
}
- read_page_swap_end(this, buf, nfc_geo->payload_size,
- this->payload_virt, this->payload_phys,
- nfc_geo->payload_size,
- payload_virt, payload_phys);
-
return max_bitflips;
}
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 7ba109e8cf88..8406f346b0be 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -626,7 +626,8 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
chip->cmd_ctrl(mtd, readcmd, ctrl);
ctrl &= ~NAND_CTRL_CHANGE;
}
- chip->cmd_ctrl(mtd, command, ctrl);
+ if (command != NAND_CMD_NONE)
+ chip->cmd_ctrl(mtd, command, ctrl);
/* Address cycle, when necessary */
ctrl = NAND_CTRL_ALE | NAND_CTRL_CHANGE;
@@ -655,6 +656,7 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
*/
switch (command) {
+ case NAND_CMD_NONE:
case NAND_CMD_PAGEPROG:
case NAND_CMD_ERASE1:
case NAND_CMD_ERASE2:
@@ -717,7 +719,9 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
}
/* Command latch cycle */
- chip->cmd_ctrl(mtd, command, NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
+ if (command != NAND_CMD_NONE)
+ chip->cmd_ctrl(mtd, command,
+ NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
if (column != -1 || page_addr != -1) {
int ctrl = NAND_CTRL_CHANGE | NAND_NCE | NAND_ALE;
@@ -750,6 +754,7 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
*/
switch (command) {
+ case NAND_CMD_NONE:
case NAND_CMD_CACHEDPROG:
case NAND_CMD_PAGEPROG:
case NAND_CMD_ERASE1:
@@ -2023,6 +2028,7 @@ static int nand_write_oob_syndrome(struct mtd_info *mtd,
static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
+ unsigned int max_bitflips = 0;
int page, realpage, chipnr;
struct nand_chip *chip = mtd->priv;
struct mtd_ecc_stats stats;
@@ -2083,6 +2089,8 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
nand_wait_ready(mtd);
}
+ max_bitflips = max_t(unsigned int, max_bitflips, ret);
+
readlen -= len;
if (!readlen)
break;
@@ -2108,7 +2116,7 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
if (mtd->ecc_stats.failed - stats.failed)
return -EBADMSG;
- return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
+ return max_bitflips;
}
/**
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
index bcba1a924c75..1f2785ee909f 100644
--- a/drivers/mtd/nand/sh_flctl.c
+++ b/drivers/mtd/nand/sh_flctl.c
@@ -160,7 +160,7 @@ static void flctl_setup_dma(struct sh_flctl *flctl)
memset(&cfg, 0, sizeof(cfg));
cfg.direction = DMA_MEM_TO_DEV;
- cfg.dst_addr = (dma_addr_t)FLDTFIFO(flctl);
+ cfg.dst_addr = flctl->fifo;
cfg.src_addr = 0;
ret = dmaengine_slave_config(flctl->chan_fifo0_tx, &cfg);
if (ret < 0)
@@ -176,7 +176,7 @@ static void flctl_setup_dma(struct sh_flctl *flctl)
cfg.direction = DMA_DEV_TO_MEM;
cfg.dst_addr = 0;
- cfg.src_addr = (dma_addr_t)FLDTFIFO(flctl);
+ cfg.src_addr = flctl->fifo;
ret = dmaengine_slave_config(flctl->chan_fifo0_rx, &cfg);
if (ret < 0)
goto err;
@@ -1096,6 +1096,7 @@ static int flctl_probe(struct platform_device *pdev)
flctl->reg = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(flctl->reg))
return PTR_ERR(flctl->reg);
+ flctl->fifo = res->start + 0x24; /* FLDTFIFO */
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c
index 824711845c44..3bb9b34d9e77 100644
--- a/drivers/mtd/nand/sunxi_nand.c
+++ b/drivers/mtd/nand/sunxi_nand.c
@@ -1046,8 +1046,14 @@ static int sunxi_nand_hw_common_ecc_ctrl_init(struct mtd_info *mtd,
/* Add ECC info retrieval from DT */
for (i = 0; i < ARRAY_SIZE(strengths); i++) {
- if (ecc->strength <= strengths[i])
+ if (ecc->strength <= strengths[i]) {
+ /*
+ * Update ecc->strength value with the actual strength
+ * that will be used by the ECC engine.
+ */
+ ecc->strength = strengths[i];
break;
+ }
}
if (i >= ARRAY_SIZE(strengths)) {
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
index ebf46ad2d513..07ad86759d92 100644
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
@@ -99,6 +99,8 @@ struct ubiblock {
/* Linked list of all ubiblock instances */
static LIST_HEAD(ubiblock_devices);
+static DEFINE_IDR(ubiblock_minor_idr);
+/* Protects ubiblock_devices and ubiblock_minor_idr */
static DEFINE_MUTEX(devices_mutex);
static int ubiblock_major;
@@ -242,7 +244,7 @@ static int ubiblock_open(struct block_device *bdev, fmode_t mode)
* in any case.
*/
if (mode & FMODE_WRITE) {
- ret = -EPERM;
+ ret = -EROFS;
goto out_unlock;
}
@@ -354,8 +356,6 @@ static struct blk_mq_ops ubiblock_mq_ops = {
.map_queue = blk_mq_map_queue,
};
-static DEFINE_IDR(ubiblock_minor_idr);
-
int ubiblock_create(struct ubi_volume_info *vi)
{
struct ubiblock *dev;
@@ -368,14 +368,15 @@ int ubiblock_create(struct ubi_volume_info *vi)
/* Check that the volume isn't already handled */
mutex_lock(&devices_mutex);
if (find_dev_nolock(vi->ubi_num, vi->vol_id)) {
- mutex_unlock(&devices_mutex);
- return -EEXIST;
+ ret = -EEXIST;
+ goto out_unlock;
}
- mutex_unlock(&devices_mutex);
dev = kzalloc(sizeof(struct ubiblock), GFP_KERNEL);
- if (!dev)
- return -ENOMEM;
+ if (!dev) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
mutex_init(&dev->dev_mutex);
@@ -440,14 +441,13 @@ int ubiblock_create(struct ubi_volume_info *vi)
goto out_free_queue;
}
- mutex_lock(&devices_mutex);
list_add_tail(&dev->list, &ubiblock_devices);
- mutex_unlock(&devices_mutex);
/* Must be the last step: anyone can call file ops from now on */
add_disk(dev->gd);
dev_info(disk_to_dev(dev->gd), "created from ubi%d:%d(%s)",
dev->ubi_num, dev->vol_id, vi->name);
+ mutex_unlock(&devices_mutex);
return 0;
out_free_queue:
@@ -460,6 +460,8 @@ out_put_disk:
put_disk(dev->gd);
out_free_dev:
kfree(dev);
+out_unlock:
+ mutex_unlock(&devices_mutex);
return ret;
}
@@ -481,30 +483,36 @@ static void ubiblock_cleanup(struct ubiblock *dev)
int ubiblock_remove(struct ubi_volume_info *vi)
{
struct ubiblock *dev;
+ int ret;
mutex_lock(&devices_mutex);
dev = find_dev_nolock(vi->ubi_num, vi->vol_id);
if (!dev) {
- mutex_unlock(&devices_mutex);
- return -ENODEV;
+ ret = -ENODEV;
+ goto out_unlock;
}
/* Found a device, let's lock it so we can check if it's busy */
mutex_lock(&dev->dev_mutex);
if (dev->refcnt > 0) {
- mutex_unlock(&dev->dev_mutex);
- mutex_unlock(&devices_mutex);
- return -EBUSY;
+ ret = -EBUSY;
+ goto out_unlock_dev;
}
/* Remove from device list */
list_del(&dev->list);
- mutex_unlock(&devices_mutex);
-
ubiblock_cleanup(dev);
mutex_unlock(&dev->dev_mutex);
+ mutex_unlock(&devices_mutex);
+
kfree(dev);
return 0;
+
+out_unlock_dev:
+ mutex_unlock(&dev->dev_mutex);
+out_unlock:
+ mutex_unlock(&devices_mutex);
+ return ret;
}
static int ubiblock_resize(struct ubi_volume_info *vi)
@@ -633,6 +641,7 @@ static void ubiblock_remove_all(void)
struct ubiblock *next;
struct ubiblock *dev;
+ mutex_lock(&devices_mutex);
list_for_each_entry_safe(dev, next, &ubiblock_devices, list) {
/* The module is being forcefully removed */
WARN_ON(dev->desc);
@@ -641,6 +650,7 @@ static void ubiblock_remove_all(void)
ubiblock_cleanup(dev);
kfree(dev);
}
+ mutex_unlock(&devices_mutex);
}
int __init ubiblock_init(void)
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 9b7bc6326fa2..9556a4de159c 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -951,6 +951,17 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
return -EINVAL;
}
+ /*
+ * Both UBI and UBIFS have been designed for SLC NAND and NOR flashes.
+ * MLC NAND is different and needs special care, otherwise UBI or UBIFS
+ * will die soon and you will lose all your data.
+ */
+ if (mtd->type == MTD_MLCNANDFLASH) {
+ pr_err("ubi: refuse attaching mtd%d - MLC NAND is not supported\n",
+ mtd->index);
+ return -EINVAL;
+ }
+
if (ubi_num == UBI_DEV_NUM_AUTO) {
/* Search for an empty slot in the @ubi_devices array */
for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++)
diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c
index 30d3999dddba..ed62f1efe6eb 100644
--- a/drivers/mtd/ubi/fastmap-wl.c
+++ b/drivers/mtd/ubi/fastmap-wl.c
@@ -360,7 +360,6 @@ static void ubi_fastmap_close(struct ubi_device *ubi)
{
int i;
- flush_work(&ubi->fm_work);
return_unused_pool_pebs(ubi, &ubi->fm_pool);
return_unused_pool_pebs(ubi, &ubi->fm_wl_pool);
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 3ea4c022cbb9..ccdb3dd74421 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -265,6 +265,12 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
vol->last_eb_bytes = vol->usable_leb_size;
}
+ /* Make volume "available" before it becomes accessible via sysfs */
+ spin_lock(&ubi->volumes_lock);
+ ubi->volumes[vol_id] = vol;
+ ubi->vol_count += 1;
+ spin_unlock(&ubi->volumes_lock);
+
/* Register character device for the volume */
cdev_init(&vol->cdev, &ubi_vol_cdev_operations);
vol->cdev.owner = THIS_MODULE;
@@ -304,11 +310,6 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
if (err)
goto out_sysfs;
- spin_lock(&ubi->volumes_lock);
- ubi->volumes[vol_id] = vol;
- ubi->vol_count += 1;
- spin_unlock(&ubi->volumes_lock);
-
ubi_volume_notify(ubi, vol, UBI_VOLUME_ADDED);
self_check_volumes(ubi);
return err;
@@ -328,6 +329,10 @@ out_sysfs:
out_cdev:
cdev_del(&vol->cdev);
out_mapping:
+ spin_lock(&ubi->volumes_lock);
+ ubi->volumes[vol_id] = NULL;
+ ubi->vol_count -= 1;
+ spin_unlock(&ubi->volumes_lock);
if (do_free)
kfree(vol->eba_tbl);
out_acc:
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index fe75c7d4372d..e9d5c193c773 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -411,6 +411,9 @@ config XEN_NETDEV_BACKEND
config VMXNET3
tristate "VMware VMXNET3 ethernet driver"
depends on PCI && INET
+ depends on !(PAGE_SIZE_64KB || ARM64_64K_PAGES || \
+ IA64_PAGE_SIZE_64KB || MICROBLAZE_64K_PAGES || \
+ PARISC_PAGE_SIZE_64KB || PPC_64K_PAGES)
help
This driver supports VMware's vmxnet3 virtual ethernet NIC.
To compile this driver as a module, choose M here: the
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index bb9e9fc45e1b..82d23bd3a742 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -453,7 +453,7 @@ static void rlb_update_client(struct rlb_client_info *client_info)
{
int i;
- if (!client_info->slave)
+ if (!client_info->slave || !is_valid_ether_addr(client_info->mac_dst))
return;
for (i = 0; i < RLB_ARP_BURST_SIZE; i++) {
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 2cb34b0f3856..339118f3c718 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1490,39 +1490,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
goto err_close;
}
- /* If the mode uses primary, then the following is handled by
- * bond_change_active_slave().
- */
- if (!bond_uses_primary(bond)) {
- /* set promiscuity level to new slave */
- if (bond_dev->flags & IFF_PROMISC) {
- res = dev_set_promiscuity(slave_dev, 1);
- if (res)
- goto err_close;
- }
-
- /* set allmulti level to new slave */
- if (bond_dev->flags & IFF_ALLMULTI) {
- res = dev_set_allmulti(slave_dev, 1);
- if (res)
- goto err_close;
- }
-
- netif_addr_lock_bh(bond_dev);
-
- dev_mc_sync_multiple(slave_dev, bond_dev);
- dev_uc_sync_multiple(slave_dev, bond_dev);
-
- netif_addr_unlock_bh(bond_dev);
- }
-
- if (BOND_MODE(bond) == BOND_MODE_8023AD) {
- /* add lacpdu mc addr to mc list */
- u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
-
- dev_mc_add(slave_dev, lacpdu_multicast);
- }
-
res = vlan_vids_add_by_dev(slave_dev, bond_dev);
if (res) {
netdev_err(bond_dev, "Couldn't add bond vlan ids to %s\n",
@@ -1647,8 +1614,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
} /* switch(bond_mode) */
#ifdef CONFIG_NET_POLL_CONTROLLER
- slave_dev->npinfo = bond->dev->npinfo;
- if (slave_dev->npinfo) {
+ if (bond->dev->npinfo) {
if (slave_enable_netpoll(new_slave)) {
netdev_info(bond_dev, "master_dev is using netpoll, but new slave device does not support netpoll\n");
res = -EBUSY;
@@ -1679,6 +1645,40 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
goto err_upper_unlink;
}
+ /* If the mode uses primary, then the following is handled by
+ * bond_change_active_slave().
+ */
+ if (!bond_uses_primary(bond)) {
+ /* set promiscuity level to new slave */
+ if (bond_dev->flags & IFF_PROMISC) {
+ res = dev_set_promiscuity(slave_dev, 1);
+ if (res)
+ goto err_sysfs_del;
+ }
+
+ /* set allmulti level to new slave */
+ if (bond_dev->flags & IFF_ALLMULTI) {
+ res = dev_set_allmulti(slave_dev, 1);
+ if (res) {
+ if (bond_dev->flags & IFF_PROMISC)
+ dev_set_promiscuity(slave_dev, -1);
+ goto err_sysfs_del;
+ }
+ }
+
+ netif_addr_lock_bh(bond_dev);
+ dev_mc_sync_multiple(slave_dev, bond_dev);
+ dev_uc_sync_multiple(slave_dev, bond_dev);
+ netif_addr_unlock_bh(bond_dev);
+
+ if (BOND_MODE(bond) == BOND_MODE_8023AD) {
+ /* add lacpdu mc addr to mc list */
+ u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
+
+ dev_mc_add(slave_dev, lacpdu_multicast);
+ }
+ }
+
bond->slave_cnt++;
bond_compute_features(bond);
bond_set_carrier(bond);
@@ -1702,6 +1702,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
return 0;
/* Undo stages on error */
+err_sysfs_del:
+ bond_sysfs_slave_del(new_slave);
+
err_upper_unlink:
bond_upper_dev_unlink(bond_dev, slave_dev);
@@ -1709,9 +1712,6 @@ err_unregister:
netdev_rx_handler_unregister(slave_dev);
err_detach:
- if (!bond_uses_primary(bond))
- bond_hw_addr_flush(bond_dev, slave_dev);
-
vlan_vids_del_by_dev(slave_dev, bond_dev);
if (rcu_access_pointer(bond->primary_slave) == new_slave)
RCU_INIT_POINTER(bond->primary_slave, NULL);
@@ -2555,11 +2555,13 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
bond_for_each_slave_rcu(bond, slave, iter) {
unsigned long trans_start = dev_trans_start(slave->dev);
+ slave->new_link = BOND_LINK_NOCHANGE;
+
if (slave->link != BOND_LINK_UP) {
if (bond_time_in_interval(bond, trans_start, 1) &&
bond_time_in_interval(bond, slave->last_rx, 1)) {
- slave->link = BOND_LINK_UP;
+ slave->new_link = BOND_LINK_UP;
slave_state_changed = 1;
/* primary_slave has no meaning in round-robin
@@ -2586,7 +2588,7 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
if (!bond_time_in_interval(bond, trans_start, 2) ||
!bond_time_in_interval(bond, slave->last_rx, 2)) {
- slave->link = BOND_LINK_DOWN;
+ slave->new_link = BOND_LINK_DOWN;
slave_state_changed = 1;
if (slave->link_failure_count < UINT_MAX)
@@ -2617,6 +2619,11 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
if (!rtnl_trylock())
goto re_arm;
+ bond_for_each_slave(bond, slave, iter) {
+ if (slave->new_link != BOND_LINK_NOCHANGE)
+ slave->link = slave->new_link;
+ }
+
if (slave_state_changed) {
bond_slave_state_change(bond);
if (BOND_MODE(bond) == BOND_MODE_XOR)
@@ -3276,12 +3283,17 @@ static void bond_fold_stats(struct rtnl_link_stats64 *_res,
for (i = 0; i < sizeof(*_res) / sizeof(u64); i++) {
u64 nv = new[i];
u64 ov = old[i];
+ s64 delta = nv - ov;
/* detects if this particular field is 32bit only */
if (((nv | ov) >> 32) == 0)
- res[i] += (u32)nv - (u32)ov;
- else
- res[i] += nv - ov;
+ delta = (s64)(s32)((u32)nv - (u32)ov);
+
+ /* filter anomalies, some drivers reset their stats
+ * at down/up events.
+ */
+ if (delta > 0)
+ res[i] += delta;
}
}
diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c
index 1e37313054f3..6da69af103e6 100644
--- a/drivers/net/can/cc770/cc770.c
+++ b/drivers/net/can/cc770/cc770.c
@@ -390,37 +390,23 @@ static int cc770_get_berr_counter(const struct net_device *dev,
return 0;
}
-static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static void cc770_tx(struct net_device *dev, int mo)
{
struct cc770_priv *priv = netdev_priv(dev);
- struct net_device_stats *stats = &dev->stats;
- struct can_frame *cf = (struct can_frame *)skb->data;
- unsigned int mo = obj2msgobj(CC770_OBJ_TX);
+ struct can_frame *cf = (struct can_frame *)priv->tx_skb->data;
u8 dlc, rtr;
u32 id;
int i;
- if (can_dropped_invalid_skb(dev, skb))
- return NETDEV_TX_OK;
-
- if ((cc770_read_reg(priv,
- msgobj[mo].ctrl1) & TXRQST_UNC) == TXRQST_SET) {
- netdev_err(dev, "TX register is still occupied!\n");
- return NETDEV_TX_BUSY;
- }
-
- netif_stop_queue(dev);
-
dlc = cf->can_dlc;
id = cf->can_id;
- if (cf->can_id & CAN_RTR_FLAG)
- rtr = 0;
- else
- rtr = MSGCFG_DIR;
+ rtr = cf->can_id & CAN_RTR_FLAG ? 0 : MSGCFG_DIR;
+
+ cc770_write_reg(priv, msgobj[mo].ctrl0,
+ MSGVAL_RES | TXIE_RES | RXIE_RES | INTPND_RES);
cc770_write_reg(priv, msgobj[mo].ctrl1,
RMTPND_RES | TXRQST_RES | CPUUPD_SET | NEWDAT_RES);
- cc770_write_reg(priv, msgobj[mo].ctrl0,
- MSGVAL_SET | TXIE_SET | RXIE_RES | INTPND_RES);
+
if (id & CAN_EFF_FLAG) {
id &= CAN_EFF_MASK;
cc770_write_reg(priv, msgobj[mo].config,
@@ -439,22 +425,30 @@ static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev)
for (i = 0; i < dlc; i++)
cc770_write_reg(priv, msgobj[mo].data[i], cf->data[i]);
- /* Store echo skb before starting the transfer */
- can_put_echo_skb(skb, dev, 0);
-
cc770_write_reg(priv, msgobj[mo].ctrl1,
- RMTPND_RES | TXRQST_SET | CPUUPD_RES | NEWDAT_UNC);
+ RMTPND_UNC | TXRQST_SET | CPUUPD_RES | NEWDAT_UNC);
+ cc770_write_reg(priv, msgobj[mo].ctrl0,
+ MSGVAL_SET | TXIE_SET | RXIE_SET | INTPND_UNC);
+}
- stats->tx_bytes += dlc;
+static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+ unsigned int mo = obj2msgobj(CC770_OBJ_TX);
+ if (can_dropped_invalid_skb(dev, skb))
+ return NETDEV_TX_OK;
- /*
- * HM: We had some cases of repeated IRQs so make sure the
- * INT is acknowledged I know it's already further up, but
- * doing again fixed the issue
- */
- cc770_write_reg(priv, msgobj[mo].ctrl0,
- MSGVAL_UNC | TXIE_UNC | RXIE_UNC | INTPND_RES);
+ netif_stop_queue(dev);
+
+ if ((cc770_read_reg(priv,
+ msgobj[mo].ctrl1) & TXRQST_UNC) == TXRQST_SET) {
+ netdev_err(dev, "TX register is still occupied!\n");
+ return NETDEV_TX_BUSY;
+ }
+
+ priv->tx_skb = skb;
+ cc770_tx(dev, mo);
return NETDEV_TX_OK;
}
@@ -680,19 +674,46 @@ static void cc770_tx_interrupt(struct net_device *dev, unsigned int o)
struct cc770_priv *priv = netdev_priv(dev);
struct net_device_stats *stats = &dev->stats;
unsigned int mo = obj2msgobj(o);
+ struct can_frame *cf;
+ u8 ctrl1;
+
+ ctrl1 = cc770_read_reg(priv, msgobj[mo].ctrl1);
- /* Nothing more to send, switch off interrupts */
cc770_write_reg(priv, msgobj[mo].ctrl0,
MSGVAL_RES | TXIE_RES | RXIE_RES | INTPND_RES);
- /*
- * We had some cases of repeated IRQ so make sure the
- * INT is acknowledged
+ cc770_write_reg(priv, msgobj[mo].ctrl1,
+ RMTPND_RES | TXRQST_RES | MSGLST_RES | NEWDAT_RES);
+
+ if (unlikely(!priv->tx_skb)) {
+ netdev_err(dev, "missing tx skb in tx interrupt\n");
+ return;
+ }
+
+ if (unlikely(ctrl1 & MSGLST_SET)) {
+ stats->rx_over_errors++;
+ stats->rx_errors++;
+ }
+
+ /* When the CC770 is sending an RTR message and it receives a regular
+ * message that matches the id of the RTR message, it will overwrite the
+ * outgoing message in the TX register. When this happens we must
+ * process the received message and try to transmit the outgoing skb
+ * again.
*/
- cc770_write_reg(priv, msgobj[mo].ctrl0,
- MSGVAL_UNC | TXIE_UNC | RXIE_UNC | INTPND_RES);
+ if (unlikely(ctrl1 & NEWDAT_SET)) {
+ cc770_rx(dev, mo, ctrl1);
+ cc770_tx(dev, mo);
+ return;
+ }
+ cf = (struct can_frame *)priv->tx_skb->data;
+ stats->tx_bytes += cf->can_dlc;
stats->tx_packets++;
+
+ can_put_echo_skb(priv->tx_skb, dev, 0);
can_get_echo_skb(dev, 0);
+ priv->tx_skb = NULL;
+
netif_wake_queue(dev);
}
@@ -804,6 +825,7 @@ struct net_device *alloc_cc770dev(int sizeof_priv)
priv->can.do_set_bittiming = cc770_set_bittiming;
priv->can.do_set_mode = cc770_set_mode;
priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
+ priv->tx_skb = NULL;
memcpy(priv->obj_flags, cc770_obj_flags, sizeof(cc770_obj_flags));
diff --git a/drivers/net/can/cc770/cc770.h b/drivers/net/can/cc770/cc770.h
index a1739db98d91..95752e1d1283 100644
--- a/drivers/net/can/cc770/cc770.h
+++ b/drivers/net/can/cc770/cc770.h
@@ -193,6 +193,8 @@ struct cc770_priv {
u8 cpu_interface; /* CPU interface register */
u8 clkout; /* Clock out register */
u8 bus_config; /* Bus conffiguration register */
+
+ struct sk_buff *tx_skb;
};
struct net_device *alloc_cc770dev(int sizeof_priv);
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 16f7cadda5c3..47f43bdecd51 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -493,7 +493,7 @@ static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev)
data = be32_to_cpup((__be32 *)&cf->data[0]);
flexcan_write(data, &regs->mb[FLEXCAN_TX_BUF_ID].data[0]);
}
- if (cf->can_dlc > 3) {
+ if (cf->can_dlc > 4) {
data = be32_to_cpup((__be32 *)&cf->data[4]);
flexcan_write(data, &regs->mb[FLEXCAN_TX_BUF_ID].data[1]);
}
diff --git a/drivers/net/can/spi/Kconfig b/drivers/net/can/spi/Kconfig
index 5b315573387e..e6b3862230b9 100644
--- a/drivers/net/can/spi/Kconfig
+++ b/drivers/net/can/spi/Kconfig
@@ -18,4 +18,10 @@ config CAN_K61
depends on SPI
---help---
Driver for the Freescale K61 SPI CAN controllers.
+
+config QTI_CAN
+ tristate "Unified driver for QTI CAN controllers"
+ depends on SPI
+ ---help---
+ Unified driver for QTI CAN controllers.
endmenu
diff --git a/drivers/net/can/spi/Makefile b/drivers/net/can/spi/Makefile
index 375a6cbfbb67..c1951ccc0034 100644
--- a/drivers/net/can/spi/Makefile
+++ b/drivers/net/can/spi/Makefile
@@ -6,3 +6,4 @@
obj-$(CONFIG_CAN_MCP251X) += mcp251x.o
obj-$(CONFIG_CAN_RH850) += rh850.o
obj-${CONFIG_CAN_K61} += k61.o
+obj-$(CONFIG_QTI_CAN) += qti-can.o
diff --git a/drivers/net/can/spi/k61.c b/drivers/net/can/spi/k61.c
index 84c13a1c04a5..7830d5badb94 100644
--- a/drivers/net/can/spi/k61.c
+++ b/drivers/net/can/spi/k61.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -918,7 +918,6 @@ static int k61_remove(struct spi_device *spi)
static const struct of_device_id k61_match_table[] = {
{ .compatible = "fsl,k61" },
- { .compatible = "nxp,mpc5746c" },
{ }
};
diff --git a/drivers/net/can/spi/qti-can.c b/drivers/net/can/spi/qti-can.c
new file mode 100644
index 000000000000..7db6ecf8f354
--- /dev/null
+++ b/drivers/net/can/spi/qti-can.c
@@ -0,0 +1,1454 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/workqueue.h>
+#include <linux/spi/spi.h>
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/uaccess.h>
+#include <linux/pm.h>
+#include <asm/div64.h>
+
+#define DEBUG_QTI_CAN 0
+#if DEBUG_QTI_CAN == 1
+#define LOGDI(...) dev_info(&priv_data->spidev->dev, __VA_ARGS__)
+#define LOGNI(...) netdev_info(netdev, __VA_ARGS__)
+#else
+#define LOGDI(...) dev_dbg(&priv_data->spidev->dev, __VA_ARGS__)
+#define LOGNI(...) netdev_dbg(netdev, __VA_ARGS__)
+#endif
+#define LOGDE(...) dev_err(&priv_data->spidev->dev, __VA_ARGS__)
+#define LOGNE(...) netdev_err(netdev, __VA_ARGS__)
+
+#define MAX_TX_BUFFERS 1
+#define XFER_BUFFER_SIZE 64
+#define RX_ASSEMBLY_BUFFER_SIZE 128
+#define QTI_CAN_FW_QUERY_RETRY_COUNT 3
+#define DRIVER_MODE_RAW_FRAMES 0
+#define DRIVER_MODE_PROPERTIES 1
+#define DRIVER_MODE_AMB 2
+#define QUERY_FIRMWARE_TIMEOUT_MS 100
+
+struct qti_can {
+ struct net_device **netdev;
+ struct spi_device *spidev;
+ struct mutex spi_lock; /* SPI device lock */
+ struct workqueue_struct *tx_wq;
+ char *tx_buf, *rx_buf;
+ int xfer_length;
+ atomic_t msg_seq;
+ char *assembly_buffer;
+ u8 assembly_buffer_size;
+ atomic_t netif_queue_stop;
+ struct completion response_completion;
+ int wait_cmd;
+ int cmd_result;
+ int driver_mode;
+ int clk_freq_mhz;
+ int max_can_channels;
+ int bits_per_word;
+ int reset_delay_msec;
+ int reset;
+ bool support_can_fd;
+ bool can_fw_cmd_timeout_req;
+ u32 rem_all_buffering_timeout_ms;
+ u32 can_fw_cmd_timeout_ms;
+};
+
+struct qti_can_netdev_privdata {
+ struct can_priv can;
+ struct qti_can *qti_can;
+ u8 netdev_index;
+};
+
+struct qti_can_tx_work {
+ struct work_struct work;
+ struct sk_buff *skb;
+ struct net_device *netdev;
+};
+
+/* Message definitions */
+struct spi_mosi { /* TLV for MOSI line */
+ u8 cmd;
+ u8 len;
+ u16 seq;
+ u8 data[];
+} __packed;
+
+struct spi_miso { /* TLV for MISO line */
+ u8 cmd;
+ u8 len;
+ u16 seq; /* should match seq field from request, or 0 for unsols */
+ u8 data[];
+} __packed;
+
+#define CMD_GET_FW_VERSION 0x81
+#define CMD_CAN_SEND_FRAME 0x82
+#define CMD_CAN_ADD_FILTER 0x83
+#define CMD_CAN_REMOVE_FILTER 0x84
+#define CMD_CAN_RECEIVE_FRAME 0x85
+#define CMD_CAN_CONFIG_BIT_TIMING 0x86
+#define CMD_CAN_DATA_BUFF_ADD 0x87
+#define CMD_CAN_DATA_BUFF_REMOVE 0X88
+#define CMD_CAN_RELEASE_BUFFER 0x89
+#define CMD_CAN_DATA_BUFF_REMOVE_ALL 0x8A
+#define CMD_PROPERTY_WRITE 0x8B
+#define CMD_PROPERTY_READ 0x8C
+#define CMD_GET_FW_BR_VERSION 0x95
+#define CMD_BEGIN_FIRMWARE_UPGRADE 0x96
+#define CMD_FIRMWARE_UPGRADE_DATA 0x97
+#define CMD_END_FIRMWARE_UPGRADE 0x98
+#define CMD_BEGIN_BOOT_ROM_UPGRADE 0x99
+#define CMD_BOOT_ROM_UPGRADE_DATA 0x9A
+#define CMD_END_BOOT_ROM_UPGRADE 0x9B
+
+#define IOCTL_RELEASE_CAN_BUFFER (SIOCDEVPRIVATE + 0)
+#define IOCTL_ENABLE_BUFFERING (SIOCDEVPRIVATE + 1)
+#define IOCTL_ADD_FRAME_FILTER (SIOCDEVPRIVATE + 2)
+#define IOCTL_REMOVE_FRAME_FILTER (SIOCDEVPRIVATE + 3)
+#define IOCTL_DISABLE_BUFFERING (SIOCDEVPRIVATE + 5)
+#define IOCTL_DISABLE_ALL_BUFFERING (SIOCDEVPRIVATE + 6)
+#define IOCTL_GET_FW_BR_VERSION (SIOCDEVPRIVATE + 7)
+#define IOCTL_BEGIN_FIRMWARE_UPGRADE (SIOCDEVPRIVATE + 8)
+#define IOCTL_FIRMWARE_UPGRADE_DATA (SIOCDEVPRIVATE + 9)
+#define IOCTL_END_FIRMWARE_UPGRADE (SIOCDEVPRIVATE + 10)
+#define IOCTL_BEGIN_BOOT_ROM_UPGRADE (SIOCDEVPRIVATE + 11)
+#define IOCTL_BOOT_ROM_UPGRADE_DATA (SIOCDEVPRIVATE + 12)
+#define IOCTL_END_BOOT_ROM_UPGRADE (SIOCDEVPRIVATE + 13)
+
+#define IFR_DATA_OFFSET 0x100
+struct can_fw_resp {
+ u8 maj;
+ u8 min;
+ u8 ver[48];
+} __packed;
+
+struct can_write_req {
+ u8 can_if;
+ u32 mid;
+ u8 dlc;
+ u8 data[8];
+} __packed;
+
+struct can_write_resp {
+ u8 err;
+} __packed;
+
+struct can_filter_req {
+ u8 can_if;
+ u32 mid;
+ u32 mask;
+} __packed;
+
+struct can_add_filter_resp {
+ u8 err;
+} __packed;
+
+struct can_receive_frame {
+ u8 can_if;
+ u32 ts;
+ u32 mid;
+ u8 dlc;
+ u8 data[8];
+} __packed;
+
+struct can_config_bit_timing {
+ u8 can_if;
+ u32 prop_seg;
+ u32 phase_seg1;
+ u32 phase_seg2;
+ u32 sjw;
+ u32 brp;
+} __packed;
+
+static struct can_bittiming_const rh850_bittiming_const = {
+ .name = "qti_can",
+ .tseg1_min = 1,
+ .tseg1_max = 16,
+ .tseg2_min = 1,
+ .tseg2_max = 16,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 70,
+ .brp_inc = 1,
+};
+
+static struct can_bittiming_const flexcan_bittiming_const = {
+ .name = "qti_can",
+ .tseg1_min = 4,
+ .tseg1_max = 16,
+ .tseg2_min = 2,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 256,
+ .brp_inc = 1,
+};
+
+static struct can_bittiming_const qti_can_bittiming_const;
+
+static struct can_bittiming_const qti_can_data_bittiming_const = {
+ .name = "qti_can",
+ .tseg1_min = 1,
+ .tseg1_max = 16,
+ .tseg2_min = 1,
+ .tseg2_max = 16,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 70,
+ .brp_inc = 1,
+};
+
+struct vehicle_property {
+ int id;
+ u64 ts;
+ int zone;
+ int val_type;
+ u32 data_len;
+ union {
+ u8 bval;
+ int val;
+ int val_arr[4];
+ float f_value;
+ float float_arr[4];
+ u8 str[36];
+ };
+} __packed;
+
+struct qti_can_release_can_buffer {
+ u8 enable;
+} __packed;
+
+struct qti_can_buffer {
+ u8 can_if;
+ u32 mid;
+ u32 mask;
+} __packed;
+
+struct can_fw_br_resp {
+ u8 maj;
+ u8 min;
+ u8 ver[32];
+ u8 br_maj;
+ u8 br_min;
+ u8 curr_exec_mode;
+} __packed;
+
+struct qti_can_ioctl_req {
+ u8 len;
+ u8 data[64];
+} __packed;
+
+static int qti_can_rx_message(struct qti_can *priv_data);
+
+static irqreturn_t qti_can_irq(int irq, void *priv)
+{
+ struct qti_can *priv_data = priv;
+
+ LOGDI("qti_can_irq\n");
+ qti_can_rx_message(priv_data);
+ return IRQ_HANDLED;
+}
+
+static void qti_can_receive_frame(struct qti_can *priv_data,
+ struct can_receive_frame *frame)
+{
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ struct skb_shared_hwtstamps *skt;
+ ktime_t nsec;
+ struct net_device *netdev;
+ int i;
+ struct device *dev;
+
+ dev = &priv_data->spidev->dev;
+ if (frame->can_if >= priv_data->max_can_channels) {
+ LOGDE("qti_can rcv error. Channel is %d\n", frame->can_if);
+ return;
+ }
+
+ netdev = priv_data->netdev[frame->can_if];
+ skb = alloc_can_skb(netdev, &cf);
+ if (!skb) {
+ LOGDE("skb alloc failed. frame->can_if %d\n", frame->can_if);
+ return;
+ }
+
+ LOGDI("rcv frame %d %d %x %d %x %x %x %x %x %x %x %x\n",
+ frame->can_if, frame->ts, frame->mid, frame->dlc,
+ frame->data[0], frame->data[1], frame->data[2], frame->data[3],
+ frame->data[4], frame->data[5], frame->data[6], frame->data[7]);
+ cf->can_id = le32_to_cpu(frame->mid);
+ cf->can_dlc = get_can_dlc(frame->dlc);
+
+ for (i = 0; i < cf->can_dlc; i++)
+ cf->data[i] = frame->data[i];
+
+ nsec = ms_to_ktime(le32_to_cpu(frame->ts));
+ skt = skb_hwtstamps(skb);
+ skt->hwtstamp = nsec;
+ LOGDI(" hwtstamp %lld\n", ktime_to_ms(skt->hwtstamp));
+ skb->tstamp = nsec;
+ netif_rx(skb);
+ netdev->stats.rx_packets++;
+}
+
+static void qti_can_receive_property(struct qti_can *priv_data,
+ struct vehicle_property *property)
+{
+ struct canfd_frame *cfd;
+ u8 *p;
+ struct sk_buff *skb;
+ struct skb_shared_hwtstamps *skt;
+ ktime_t nsec;
+ struct net_device *netdev;
+ struct device *dev;
+ int i;
+
+ /* can0 as the channel with properties */
+ dev = &priv_data->spidev->dev;
+ netdev = priv_data->netdev[0];
+ skb = alloc_canfd_skb(netdev, &cfd);
+ if (!skb) {
+ LOGDE("skb alloc failed. frame->can_if %d\n", 0);
+ return;
+ }
+
+ LOGDI("rcv property:0x%x data:%2x %2x %2x %2x", property->id,
+ property->str[0], property->str[1],
+ property->str[2], property->str[3]);
+ cfd->can_id = 0x00;
+ cfd->len = sizeof(struct vehicle_property);
+
+ p = (u8 *)property;
+ for (i = 0; i < cfd->len; i++)
+ cfd->data[i] = p[i];
+
+ nsec = ns_to_ktime(le64_to_cpu(property->ts));
+ skt = skb_hwtstamps(skb);
+ skt->hwtstamp = nsec;
+ LOGDI(" hwtstamp %lld\n", ktime_to_ms(skt->hwtstamp));
+ skb->tstamp = nsec;
+ netif_rx(skb);
+ netdev->stats.rx_packets++;
+}
+
+static int qti_can_process_response(struct qti_can *priv_data,
+ struct spi_miso *resp, int length)
+{
+ int ret = 0;
+
+ LOGDI("<%x %2d [%d]\n", resp->cmd, resp->len, resp->seq);
+ if (resp->cmd == CMD_CAN_RECEIVE_FRAME) {
+ struct can_receive_frame *frame =
+ (struct can_receive_frame *)&resp->data;
+ if (resp->len > length) {
+ LOGDE("Error. This should never happen\n");
+ LOGDE("process_response: Saving %d bytes\n", length);
+ memcpy(priv_data->assembly_buffer, (char *)resp,
+ length);
+ priv_data->assembly_buffer_size = length;
+ } else {
+ qti_can_receive_frame(priv_data, frame);
+ }
+ } else if (resp->cmd == CMD_PROPERTY_READ) {
+ struct vehicle_property *property =
+ (struct vehicle_property *)&resp->data;
+ if (resp->len > length) {
+ LOGDE("Error. This should never happen\n");
+ LOGDE("process_response: Saving %d bytes\n", length);
+ memcpy(priv_data->assembly_buffer, (char *)resp,
+ length);
+ priv_data->assembly_buffer_size = length;
+ } else {
+ qti_can_receive_property(priv_data, property);
+ }
+ } else if (resp->cmd == CMD_GET_FW_VERSION) {
+ struct can_fw_resp *fw_resp = (struct can_fw_resp *)resp->data;
+
+ dev_info(&priv_data->spidev->dev, "fw %d.%d",
+ fw_resp->maj, fw_resp->min);
+ dev_info(&priv_data->spidev->dev, "fw string %s",
+ fw_resp->ver);
+ } else if (resp->cmd == CMD_GET_FW_BR_VERSION) {
+ struct can_fw_br_resp *fw_resp =
+ (struct can_fw_br_resp *)resp->data;
+ dev_info(&priv_data->spidev->dev, "fw_can %d.%d",
+ fw_resp->maj, fw_resp->min);
+ dev_info(&priv_data->spidev->dev, "fw string %s",
+ fw_resp->ver);
+ dev_info(&priv_data->spidev->dev, "fw_br %d.%d exec_mode %d",
+ fw_resp->br_maj, fw_resp->br_min,
+ fw_resp->curr_exec_mode);
+ ret = fw_resp->curr_exec_mode << 28;
+ ret |= (fw_resp->br_maj & 0xF) << 24;
+ ret |= (fw_resp->br_min & 0xFF) << 16;
+ ret |= (fw_resp->maj & 0xF) << 8;
+ ret |= (fw_resp->min & 0xFF);
+ }
+
+ if (resp->cmd == priv_data->wait_cmd) {
+ priv_data->cmd_result = ret;
+ complete(&priv_data->response_completion);
+ }
+ return ret;
+}
+
+static int qti_can_process_rx(struct qti_can *priv_data, char *rx_buf)
+{
+ struct spi_miso *resp;
+ struct device *dev;
+ int length_processed = 0, actual_length = priv_data->xfer_length;
+ int ret = 0;
+
+ dev = &priv_data->spidev->dev;
+ while (length_processed < actual_length) {
+ int length_left = actual_length - length_processed;
+ int length = 0; /* length of consumed chunk */
+ void *data;
+
+ if (priv_data->assembly_buffer_size > 0) {
+ LOGDI("callback: Reassembling %d bytes\n",
+ priv_data->assembly_buffer_size);
+ /* should copy just 1 byte instead, since cmd should */
+ /* already been copied as being first byte */
+ memcpy(priv_data->assembly_buffer +
+ priv_data->assembly_buffer_size,
+ rx_buf, 2);
+ data = priv_data->assembly_buffer;
+ resp = (struct spi_miso *)data;
+ length = resp->len + sizeof(*resp)
+ - priv_data->assembly_buffer_size;
+ if (length > 0)
+ memcpy(priv_data->assembly_buffer +
+ priv_data->assembly_buffer_size,
+ rx_buf, length);
+ length_left += priv_data->assembly_buffer_size;
+ priv_data->assembly_buffer_size = 0;
+ } else {
+ data = rx_buf + length_processed;
+ resp = (struct spi_miso *)data;
+ if (resp->cmd == 0) {
+ /* special case. ignore cmd==0 */
+ length_processed += 1;
+ continue;
+ }
+ length = resp->len + sizeof(struct spi_miso);
+ }
+ LOGDI("processing. p %d -> l %d (t %d)\n",
+ length_processed, length_left, priv_data->xfer_length);
+ length_processed += length;
+ if (length_left >= sizeof(*resp) &&
+ resp->len + sizeof(*resp) <= length_left) {
+ struct spi_miso *resp =
+ (struct spi_miso *)data;
+ ret = qti_can_process_response(priv_data, resp,
+ length_left);
+ } else if (length_left > 0) {
+ /* Not full message. Store however much we have for */
+ /* later assembly */
+ LOGDI("callback: Storing %d bytes of response\n",
+ length_left);
+ memcpy(priv_data->assembly_buffer, data, length_left);
+ priv_data->assembly_buffer_size = length_left;
+ break;
+ }
+ }
+ return ret;
+}
+
+static int qti_can_do_spi_transaction(struct qti_can *priv_data)
+{
+ struct spi_device *spi;
+ struct spi_transfer *xfer;
+ struct spi_message *msg;
+ struct device *dev;
+ int ret;
+
+ spi = priv_data->spidev;
+ dev = &spi->dev;
+ msg = devm_kzalloc(&spi->dev, sizeof(*msg), GFP_KERNEL);
+ xfer = devm_kzalloc(&spi->dev, sizeof(*xfer), GFP_KERNEL);
+ if (!xfer || !msg)
+ return -ENOMEM;
+ LOGDI(">%x %2d [%d]\n", priv_data->tx_buf[0],
+ priv_data->tx_buf[1], priv_data->tx_buf[2]);
+ spi_message_init(msg);
+ spi_message_add_tail(xfer, msg);
+ xfer->tx_buf = priv_data->tx_buf;
+ xfer->rx_buf = priv_data->rx_buf;
+ xfer->len = priv_data->xfer_length;
+ xfer->bits_per_word = priv_data->bits_per_word;
+ ret = spi_sync(spi, msg);
+ LOGDI("spi_sync ret %d data %x %x %x %x %x %x %x %x\n", ret,
+ priv_data->rx_buf[0], priv_data->rx_buf[1],
+ priv_data->rx_buf[2], priv_data->rx_buf[3],
+ priv_data->rx_buf[4], priv_data->rx_buf[5],
+ priv_data->rx_buf[6], priv_data->rx_buf[7]);
+
+ if (ret == 0)
+ qti_can_process_rx(priv_data, priv_data->rx_buf);
+ devm_kfree(&spi->dev, msg);
+ devm_kfree(&spi->dev, xfer);
+ return ret;
+}
+
+static int qti_can_rx_message(struct qti_can *priv_data)
+{
+ char *tx_buf, *rx_buf;
+ int ret;
+
+ mutex_lock(&priv_data->spi_lock);
+ tx_buf = priv_data->tx_buf;
+ rx_buf = priv_data->rx_buf;
+ memset(tx_buf, 0, XFER_BUFFER_SIZE);
+ memset(rx_buf, 0, XFER_BUFFER_SIZE);
+ priv_data->xfer_length = XFER_BUFFER_SIZE;
+
+ ret = qti_can_do_spi_transaction(priv_data);
+ mutex_unlock(&priv_data->spi_lock);
+
+ return ret;
+}
+
+static int qti_can_query_firmware_version(struct qti_can *priv_data)
+{
+ char *tx_buf, *rx_buf;
+ int ret;
+ struct spi_mosi *req;
+
+ mutex_lock(&priv_data->spi_lock);
+ tx_buf = priv_data->tx_buf;
+ rx_buf = priv_data->rx_buf;
+ memset(tx_buf, 0, XFER_BUFFER_SIZE);
+ memset(rx_buf, 0, XFER_BUFFER_SIZE);
+ priv_data->xfer_length = XFER_BUFFER_SIZE;
+
+ req = (struct spi_mosi *)tx_buf;
+ req->cmd = CMD_GET_FW_VERSION;
+ req->len = 0;
+ req->seq = atomic_inc_return(&priv_data->msg_seq);
+
+ priv_data->wait_cmd = CMD_GET_FW_VERSION;
+ priv_data->cmd_result = -1;
+ reinit_completion(&priv_data->response_completion);
+
+ ret = qti_can_do_spi_transaction(priv_data);
+ mutex_unlock(&priv_data->spi_lock);
+
+ if (ret == 0) {
+ wait_for_completion_interruptible_timeout(
+ &priv_data->response_completion,
+ msecs_to_jiffies(QUERY_FIRMWARE_TIMEOUT_MS));
+ ret = priv_data->cmd_result;
+ }
+
+ return ret;
+}
+
+static int qti_can_set_bitrate(struct net_device *netdev)
+{
+ char *tx_buf, *rx_buf;
+ int ret;
+ struct spi_mosi *req;
+ struct can_config_bit_timing *req_d;
+ struct qti_can *priv_data;
+ struct can_priv *priv = netdev_priv(netdev);
+ struct qti_can_netdev_privdata *qti_can_priv;
+
+ qti_can_priv = netdev_priv(netdev);
+ priv_data = qti_can_priv->qti_can;
+
+ netdev_info(netdev, "ch%i, bitrate setting>%i",
+ qti_can_priv->netdev_index, priv->bittiming.bitrate);
+ LOGNI("sjw>%i brp>%i ph_sg1>%i ph_sg2>%i smpl_pt>%i tq>%i pr_seg>%i",
+ priv->bittiming.sjw, priv->bittiming.brp,
+ priv->bittiming.phase_seg1,
+ priv->bittiming.phase_seg2,
+ priv->bittiming.sample_point,
+ priv->bittiming.tq, priv->bittiming.prop_seg);
+
+ mutex_lock(&priv_data->spi_lock);
+ tx_buf = priv_data->tx_buf;
+ rx_buf = priv_data->rx_buf;
+ memset(tx_buf, 0, XFER_BUFFER_SIZE);
+ memset(rx_buf, 0, XFER_BUFFER_SIZE);
+ priv_data->xfer_length = XFER_BUFFER_SIZE;
+
+ req = (struct spi_mosi *)tx_buf;
+ req->cmd = CMD_CAN_CONFIG_BIT_TIMING;
+ req->len = sizeof(struct can_config_bit_timing);
+ req->seq = atomic_inc_return(&priv_data->msg_seq);
+ req_d = (struct can_config_bit_timing *)req->data;
+ req_d->can_if = qti_can_priv->netdev_index;
+ req_d->prop_seg = priv->bittiming.prop_seg;
+ req_d->phase_seg1 = priv->bittiming.phase_seg1;
+ req_d->phase_seg2 = priv->bittiming.phase_seg2;
+ req_d->sjw = priv->bittiming.sjw;
+ req_d->brp = priv->bittiming.brp;
+ ret = qti_can_do_spi_transaction(priv_data);
+ mutex_unlock(&priv_data->spi_lock);
+
+ return ret;
+}
+
+static int qti_can_write(struct qti_can *priv_data,
+ int can_channel, struct canfd_frame *cf)
+{
+ char *tx_buf, *rx_buf;
+ int ret, i;
+ struct spi_mosi *req;
+ struct can_write_req *req_d;
+ struct net_device *netdev;
+
+ if (can_channel < 0 || can_channel >= priv_data->max_can_channels) {
+ LOGDE("qti_can_write error. Channel is %d\n", can_channel);
+ return -EINVAL;
+ }
+
+ mutex_lock(&priv_data->spi_lock);
+ tx_buf = priv_data->tx_buf;
+ rx_buf = priv_data->rx_buf;
+ memset(tx_buf, 0, XFER_BUFFER_SIZE);
+ memset(rx_buf, 0, XFER_BUFFER_SIZE);
+ priv_data->xfer_length = XFER_BUFFER_SIZE;
+
+ req = (struct spi_mosi *)tx_buf;
+ if (priv_data->driver_mode == DRIVER_MODE_RAW_FRAMES) {
+ req->cmd = CMD_CAN_SEND_FRAME;
+ req->len = sizeof(struct can_write_req) + 8;
+ req->seq = atomic_inc_return(&priv_data->msg_seq);
+
+ req_d = (struct can_write_req *)req->data;
+ req_d->can_if = can_channel;
+ req_d->mid = cf->can_id;
+ req_d->dlc = cf->len;
+
+ for (i = 0; i < cf->len; i++)
+ req_d->data[i] = cf->data[i];
+ } else if (priv_data->driver_mode == DRIVER_MODE_PROPERTIES ||
+ priv_data->driver_mode == DRIVER_MODE_AMB) {
+ req->cmd = CMD_PROPERTY_WRITE;
+ req->len = sizeof(struct vehicle_property);
+ req->seq = atomic_inc_return(&priv_data->msg_seq);
+ for (i = 0; i < cf->len; i++)
+ req->data[i] = cf->data[i];
+ } else {
+ LOGDE("qti_can_write: wrong driver mode %i",
+ priv_data->driver_mode);
+ }
+
+ ret = qti_can_do_spi_transaction(priv_data);
+ netdev = priv_data->netdev[can_channel];
+ netdev->stats.tx_packets++;
+ mutex_unlock(&priv_data->spi_lock);
+
+ return ret;
+}
+
+static int qti_can_netdev_open(struct net_device *netdev)
+{
+ int err;
+
+ LOGNI("Open");
+ err = open_candev(netdev);
+ if (err)
+ return err;
+
+ netif_start_queue(netdev);
+
+ return 0;
+}
+
+static int qti_can_netdev_close(struct net_device *netdev)
+{
+ LOGNI("Close");
+
+ netif_stop_queue(netdev);
+ close_candev(netdev);
+ return 0;
+}
+
+static void qti_can_send_can_frame(struct work_struct *ws)
+{
+ struct qti_can_tx_work *tx_work;
+ struct canfd_frame *cf;
+ struct qti_can *priv_data;
+ struct net_device *netdev;
+ struct qti_can_netdev_privdata *netdev_priv_data;
+ int can_channel;
+
+ tx_work = container_of(ws, struct qti_can_tx_work, work);
+ netdev = tx_work->netdev;
+ netdev_priv_data = netdev_priv(netdev);
+ priv_data = netdev_priv_data->qti_can;
+ can_channel = netdev_priv_data->netdev_index;
+
+ LOGDI("send_can_frame ws %pK\n", ws);
+ LOGDI("send_can_frame tx %pK\n", tx_work);
+
+ cf = (struct canfd_frame *)tx_work->skb->data;
+ qti_can_write(priv_data, can_channel, cf);
+
+ dev_kfree_skb(tx_work->skb);
+ kfree(tx_work);
+}
+
+static netdev_tx_t qti_can_netdev_start_xmit(
+ struct sk_buff *skb, struct net_device *netdev)
+{
+ struct qti_can_netdev_privdata *netdev_priv_data = netdev_priv(netdev);
+ struct qti_can *priv_data = netdev_priv_data->qti_can;
+ struct qti_can_tx_work *tx_work;
+
+ LOGNI("netdev_start_xmit");
+ if (can_dropped_invalid_skb(netdev, skb)) {
+ LOGNE("Dropping invalid can frame\n");
+ return NETDEV_TX_OK;
+ }
+ tx_work = kzalloc(sizeof(*tx_work), GFP_ATOMIC);
+ if (!tx_work)
+ return NETDEV_TX_OK;
+ INIT_WORK(&tx_work->work, qti_can_send_can_frame);
+ tx_work->netdev = netdev;
+ tx_work->skb = skb;
+ queue_work(priv_data->tx_wq, &tx_work->work);
+
+ return NETDEV_TX_OK;
+}
+
+static int qti_can_send_release_can_buffer_cmd(struct net_device *netdev)
+{
+ char *tx_buf, *rx_buf;
+ int ret;
+ struct spi_mosi *req;
+ struct qti_can *priv_data;
+ struct qti_can_netdev_privdata *netdev_priv_data;
+ int *mode;
+
+ netdev_priv_data = netdev_priv(netdev);
+ priv_data = netdev_priv_data->qti_can;
+ mutex_lock(&priv_data->spi_lock);
+ tx_buf = priv_data->tx_buf;
+ rx_buf = priv_data->rx_buf;
+ memset(tx_buf, 0, XFER_BUFFER_SIZE);
+ memset(rx_buf, 0, XFER_BUFFER_SIZE);
+ priv_data->xfer_length = XFER_BUFFER_SIZE;
+
+ req = (struct spi_mosi *)tx_buf;
+ req->cmd = CMD_CAN_RELEASE_BUFFER;
+ req->len = sizeof(int);
+ req->seq = atomic_inc_return(&priv_data->msg_seq);
+ mode = (int *)req->data;
+ *mode = priv_data->driver_mode;
+
+ ret = qti_can_do_spi_transaction(priv_data);
+ mutex_unlock(&priv_data->spi_lock);
+ return ret;
+}
+
+static int qti_can_data_buffering(struct net_device *netdev,
+ struct ifreq *ifr, int cmd)
+{
+ char *tx_buf, *rx_buf;
+ int ret;
+ u32 timeout;
+ struct spi_mosi *req;
+ struct qti_can_buffer *enable_buffering;
+ struct qti_can_buffer *add_request;
+ struct qti_can *priv_data;
+ struct qti_can_netdev_privdata *netdev_priv_data;
+ struct spi_device *spi;
+
+ netdev_priv_data = netdev_priv(netdev);
+ priv_data = netdev_priv_data->qti_can;
+ spi = priv_data->spidev;
+ timeout = priv_data->can_fw_cmd_timeout_ms;
+
+ mutex_lock(&priv_data->spi_lock);
+ tx_buf = priv_data->tx_buf;
+ rx_buf = priv_data->rx_buf;
+ memset(tx_buf, 0, XFER_BUFFER_SIZE);
+ memset(rx_buf, 0, XFER_BUFFER_SIZE);
+ priv_data->xfer_length = XFER_BUFFER_SIZE;
+ if (!ifr)
+ return -EINVAL;
+ add_request = devm_kzalloc(&spi->dev,
+ sizeof(struct qti_can_buffer),
+ GFP_KERNEL);
+ if (!add_request)
+ return -ENOMEM;
+
+ if (copy_from_user(add_request, ifr->ifr_data,
+ sizeof(struct qti_can_buffer))) {
+ devm_kfree(&spi->dev, add_request);
+ return -EFAULT;
+ }
+
+ req = (struct spi_mosi *)tx_buf;
+ if (cmd == IOCTL_ENABLE_BUFFERING)
+ req->cmd = CMD_CAN_DATA_BUFF_ADD;
+ else
+ req->cmd = CMD_CAN_DATA_BUFF_REMOVE;
+ req->len = sizeof(struct qti_can_buffer);
+ req->seq = atomic_inc_return(&priv_data->msg_seq);
+
+ enable_buffering = (struct qti_can_buffer *)req->data;
+ enable_buffering->can_if = add_request->can_if;
+ enable_buffering->mid = add_request->mid;
+ enable_buffering->mask = add_request->mask;
+
+ if (priv_data->can_fw_cmd_timeout_req) {
+ priv_data->wait_cmd = req->cmd;
+ priv_data->cmd_result = -1;
+ reinit_completion(&priv_data->response_completion);
+ }
+
+ ret = qti_can_do_spi_transaction(priv_data);
+ devm_kfree(&spi->dev, add_request);
+ mutex_unlock(&priv_data->spi_lock);
+
+ if (ret == 0 && priv_data->can_fw_cmd_timeout_req) {
+ LOGDI("qti_can_data_buffering ready to wait for response\n");
+ ret = wait_for_completion_interruptible_timeout(
+ &priv_data->response_completion,
+ msecs_to_jiffies(timeout));
+ ret = priv_data->cmd_result;
+ }
+ return ret;
+}
+
+static int qti_can_remove_all_buffering(struct net_device *netdev)
+{
+ char *tx_buf, *rx_buf;
+ int ret;
+ u32 timeout;
+ struct spi_mosi *req;
+ struct qti_can *priv_data;
+ struct qti_can_netdev_privdata *netdev_priv_data;
+
+ netdev_priv_data = netdev_priv(netdev);
+ priv_data = netdev_priv_data->qti_can;
+ timeout = priv_data->rem_all_buffering_timeout_ms;
+
+ mutex_lock(&priv_data->spi_lock);
+ tx_buf = priv_data->tx_buf;
+ rx_buf = priv_data->rx_buf;
+ memset(tx_buf, 0, XFER_BUFFER_SIZE);
+ memset(rx_buf, 0, XFER_BUFFER_SIZE);
+ priv_data->xfer_length = XFER_BUFFER_SIZE;
+
+ req = (struct spi_mosi *)tx_buf;
+ req->cmd = CMD_CAN_DATA_BUFF_REMOVE_ALL;
+ req->len = 0;
+ req->seq = atomic_inc_return(&priv_data->msg_seq);
+
+ if (priv_data->can_fw_cmd_timeout_req) {
+ priv_data->wait_cmd = req->cmd;
+ priv_data->cmd_result = -1;
+ reinit_completion(&priv_data->response_completion);
+ }
+
+ ret = qti_can_do_spi_transaction(priv_data);
+ mutex_unlock(&priv_data->spi_lock);
+
+ if (ret == 0 && priv_data->can_fw_cmd_timeout_req) {
+ LOGDI("qti_can_remove_all_buffering wait for response\n");
+ ret = wait_for_completion_interruptible_timeout(
+ &priv_data->response_completion,
+ msecs_to_jiffies(timeout));
+ ret = priv_data->cmd_result;
+ }
+
+ return ret;
+}
+
+static int qti_can_frame_filter(struct net_device *netdev,
+ struct ifreq *ifr, int cmd)
+{
+ char *tx_buf, *rx_buf;
+ int ret;
+ struct spi_mosi *req;
+ struct can_filter_req *add_filter;
+ struct can_filter_req *filter_request;
+ struct qti_can *priv_data;
+ struct qti_can_netdev_privdata *netdev_priv_data;
+ struct spi_device *spi;
+
+ netdev_priv_data = netdev_priv(netdev);
+ priv_data = netdev_priv_data->qti_can;
+ spi = priv_data->spidev;
+
+ mutex_lock(&priv_data->spi_lock);
+ tx_buf = priv_data->tx_buf;
+ rx_buf = priv_data->rx_buf;
+ memset(tx_buf, 0, XFER_BUFFER_SIZE);
+ memset(rx_buf, 0, XFER_BUFFER_SIZE);
+ priv_data->xfer_length = XFER_BUFFER_SIZE;
+
+ if (!ifr)
+ return -EINVAL;
+
+ filter_request =
+ devm_kzalloc(&spi->dev, sizeof(struct can_filter_req),
+ GFP_KERNEL);
+ if (!filter_request)
+ return -ENOMEM;
+
+ if (copy_from_user(filter_request, ifr->ifr_data,
+ sizeof(struct can_filter_req))) {
+ devm_kfree(&spi->dev, filter_request);
+ return -EFAULT;
+ }
+
+ req = (struct spi_mosi *)tx_buf;
+ if (cmd == IOCTL_ADD_FRAME_FILTER)
+ req->cmd = CMD_CAN_ADD_FILTER;
+ else
+ req->cmd = CMD_CAN_REMOVE_FILTER;
+
+ req->len = sizeof(struct can_filter_req);
+ req->seq = atomic_inc_return(&priv_data->msg_seq);
+
+ add_filter = (struct can_filter_req *)req->data;
+ add_filter->can_if = filter_request->can_if;
+ add_filter->mid = filter_request->mid;
+ add_filter->mask = filter_request->mask;
+
+ ret = qti_can_do_spi_transaction(priv_data);
+ devm_kfree(&spi->dev, filter_request);
+ mutex_unlock(&priv_data->spi_lock);
+ return ret;
+}
+
+static int qti_can_send_spi_locked(struct qti_can *priv_data, int cmd, int len,
+ u8 *data)
+{
+ char *tx_buf, *rx_buf;
+ struct spi_mosi *req;
+ int ret;
+
+ LOGDI("qti_can_send_spi_locked\n");
+
+ tx_buf = priv_data->tx_buf;
+ rx_buf = priv_data->rx_buf;
+ memset(tx_buf, 0, XFER_BUFFER_SIZE);
+ memset(rx_buf, 0, XFER_BUFFER_SIZE);
+ priv_data->xfer_length = XFER_BUFFER_SIZE;
+
+ req = (struct spi_mosi *)tx_buf;
+ req->cmd = cmd;
+ req->len = len;
+ req->seq = atomic_inc_return(&priv_data->msg_seq);
+
+ if (unlikely(len > 64))
+ return -EINVAL;
+ memcpy(req->data, data, len);
+
+ ret = qti_can_do_spi_transaction(priv_data);
+ return ret;
+}
+
+static int qti_can_convert_ioctl_cmd_to_spi_cmd(int ioctl_cmd)
+{
+ switch (ioctl_cmd) {
+ case IOCTL_GET_FW_BR_VERSION:
+ return CMD_GET_FW_BR_VERSION;
+ case IOCTL_BEGIN_FIRMWARE_UPGRADE:
+ return CMD_BEGIN_FIRMWARE_UPGRADE;
+ case IOCTL_FIRMWARE_UPGRADE_DATA:
+ return CMD_FIRMWARE_UPGRADE_DATA;
+ case IOCTL_END_FIRMWARE_UPGRADE:
+ return CMD_END_FIRMWARE_UPGRADE;
+ case IOCTL_BEGIN_BOOT_ROM_UPGRADE:
+ return CMD_BEGIN_BOOT_ROM_UPGRADE;
+ case IOCTL_BOOT_ROM_UPGRADE_DATA:
+ return CMD_BOOT_ROM_UPGRADE_DATA;
+ case IOCTL_END_BOOT_ROM_UPGRADE:
+ return CMD_END_BOOT_ROM_UPGRADE;
+ }
+ return -EINVAL;
+}
+
+static int qti_can_do_blocking_ioctl(struct net_device *netdev,
+ struct ifreq *ifr, int cmd)
+{
+ int spi_cmd, ret;
+
+ struct qti_can *priv_data;
+ struct qti_can_netdev_privdata *netdev_priv_data;
+ struct qti_can_ioctl_req *ioctl_data = NULL;
+ struct spi_device *spi;
+ int len = 0;
+ u8 *data = NULL;
+
+ netdev_priv_data = netdev_priv(netdev);
+ priv_data = netdev_priv_data->qti_can;
+ spi = priv_data->spidev;
+
+ spi_cmd = qti_can_convert_ioctl_cmd_to_spi_cmd(cmd);
+ LOGDI("qti_can_do_blocking_ioctl spi_cmd %x\n", spi_cmd);
+ if (spi_cmd < 0) {
+ LOGDE("qti_can_do_blocking_ioctl wrong command %d\n", cmd);
+ return spi_cmd;
+ }
+
+ if (!ifr)
+ return -EINVAL;
+
+ mutex_lock(&priv_data->spi_lock);
+ if (spi_cmd == CMD_FIRMWARE_UPGRADE_DATA ||
+ spi_cmd == CMD_BOOT_ROM_UPGRADE_DATA) {
+ ioctl_data =
+ devm_kzalloc(&spi->dev,
+ sizeof(struct qti_can_ioctl_req),
+ GFP_KERNEL);
+ if (!ioctl_data)
+ return -ENOMEM;
+
+ if (copy_from_user(ioctl_data, ifr->ifr_data,
+ sizeof(struct qti_can_ioctl_req))) {
+ devm_kfree(&spi->dev, ioctl_data);
+ return -EFAULT;
+ }
+
+ /* Regular NULL check will fail here as ioctl_data is at
+ * some offset
+ */
+ if ((void *)ioctl_data > (void *)0x100) {
+ len = ioctl_data->len;
+ data = ioctl_data->data;
+ }
+ }
+ LOGDI("qti_can_do_blocking_ioctl len %d\n", len);
+
+ priv_data->wait_cmd = spi_cmd;
+ priv_data->cmd_result = -1;
+ reinit_completion(&priv_data->response_completion);
+
+ ret = qti_can_send_spi_locked(priv_data, spi_cmd, len, data);
+ if (ioctl_data)
+ devm_kfree(&spi->dev, ioctl_data);
+ mutex_unlock(&priv_data->spi_lock);
+
+ if (ret == 0) {
+ LOGDI("qti_can_do_blocking_ioctl ready to wait for response\n");
+ wait_for_completion_interruptible_timeout(
+ &priv_data->response_completion,
+ 5 * HZ);
+ ret = priv_data->cmd_result;
+ }
+ return ret;
+}
+
+static int qti_can_netdev_do_ioctl(struct net_device *netdev,
+ struct ifreq *ifr, int cmd)
+{
+ struct qti_can *priv_data;
+ struct qti_can_netdev_privdata *netdev_priv_data;
+ int *mode;
+ int ret = -EINVAL;
+ struct spi_device *spi;
+
+ netdev_priv_data = netdev_priv(netdev);
+ priv_data = netdev_priv_data->qti_can;
+ spi = priv_data->spidev;
+ LOGDI("qti_can_netdev_do_ioctl %x\n", cmd);
+
+ switch (cmd) {
+ case IOCTL_RELEASE_CAN_BUFFER:
+ if (!ifr)
+ return -EINVAL;
+
+ /* Regular NULL check will fail here as ioctl_data is at
+ * some offset
+ */
+ if (ifr->ifr_data > (void __user *)IFR_DATA_OFFSET) {
+ mutex_lock(&priv_data->spi_lock);
+ mode = devm_kzalloc(&spi->dev, sizeof(int), GFP_KERNEL);
+ if (!mode)
+ return -ENOMEM;
+ if (copy_from_user(mode, ifr->ifr_data, sizeof(int))) {
+ devm_kfree(&spi->dev, mode);
+ return -EFAULT;
+ }
+ priv_data->driver_mode = *mode;
+ LOGDE("qti_can_driver_mode %d\n",
+ priv_data->driver_mode);
+ devm_kfree(&spi->dev, mode);
+ mutex_unlock(&priv_data->spi_lock);
+ }
+ qti_can_send_release_can_buffer_cmd(netdev);
+ ret = 0;
+ break;
+ case IOCTL_ENABLE_BUFFERING:
+ case IOCTL_DISABLE_BUFFERING:
+ qti_can_data_buffering(netdev, ifr, cmd);
+ ret = 0;
+ break;
+ case IOCTL_DISABLE_ALL_BUFFERING:
+ qti_can_remove_all_buffering(netdev);
+ ret = 0;
+ break;
+ case IOCTL_ADD_FRAME_FILTER:
+ case IOCTL_REMOVE_FRAME_FILTER:
+ qti_can_frame_filter(netdev, ifr, cmd);
+ ret = 0;
+ break;
+ case IOCTL_GET_FW_BR_VERSION:
+ case IOCTL_BEGIN_FIRMWARE_UPGRADE:
+ case IOCTL_FIRMWARE_UPGRADE_DATA:
+ case IOCTL_END_FIRMWARE_UPGRADE:
+ case IOCTL_BEGIN_BOOT_ROM_UPGRADE:
+ case IOCTL_BOOT_ROM_UPGRADE_DATA:
+ case IOCTL_END_BOOT_ROM_UPGRADE:
+ ret = qti_can_do_blocking_ioctl(netdev, ifr, cmd);
+ break;
+ }
+ LOGDI("qti_can_netdev_do_ioctl ret %d\n", ret);
+
+ return ret;
+}
+
+static const struct net_device_ops qti_can_netdev_ops = {
+ .ndo_open = qti_can_netdev_open,
+ .ndo_stop = qti_can_netdev_close,
+ .ndo_start_xmit = qti_can_netdev_start_xmit,
+ .ndo_do_ioctl = qti_can_netdev_do_ioctl,
+};
+
+static int qti_can_create_netdev(struct spi_device *spi,
+ struct qti_can *priv_data, int index)
+{
+ struct net_device *netdev;
+ struct qti_can_netdev_privdata *netdev_priv_data;
+
+ LOGDI("qti_can_create_netdev %d\n", index);
+ if (index < 0 || index >= priv_data->max_can_channels) {
+ LOGDE("qti_can_create_netdev wrong index %d\n", index);
+ return -EINVAL;
+ }
+ netdev = alloc_candev(sizeof(*netdev_priv_data), MAX_TX_BUFFERS);
+ if (!netdev) {
+ LOGDE("Couldn't alloc candev\n");
+ return -ENOMEM;
+ }
+
+ netdev->mtu = CANFD_MTU;
+
+ netdev_priv_data = netdev_priv(netdev);
+ netdev_priv_data->qti_can = priv_data;
+ netdev_priv_data->netdev_index = index;
+
+ priv_data->netdev[index] = netdev;
+
+ netdev->netdev_ops = &qti_can_netdev_ops;
+ SET_NETDEV_DEV(netdev, &spi->dev);
+ netdev_priv_data->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
+ CAN_CTRLMODE_LISTENONLY;
+ if (priv_data->support_can_fd)
+ netdev_priv_data->can.ctrlmode_supported |= CAN_CTRLMODE_FD;
+ netdev_priv_data->can.bittiming_const = &qti_can_bittiming_const;
+ netdev_priv_data->can.data_bittiming_const =
+ &qti_can_data_bittiming_const;
+ netdev_priv_data->can.clock.freq = priv_data->clk_freq_mhz;
+ netdev_priv_data->can.do_set_bittiming = qti_can_set_bitrate;
+
+ return 0;
+}
+
+static struct qti_can *qti_can_create_priv_data(struct spi_device *spi)
+{
+ struct qti_can *priv_data;
+ int err;
+ struct device *dev;
+
+ dev = &spi->dev;
+ priv_data = kzalloc(sizeof(*priv_data), GFP_KERNEL);
+ if (!priv_data) {
+ err = -ENOMEM;
+ return NULL;
+ }
+ spi_set_drvdata(spi, priv_data);
+ atomic_set(&priv_data->netif_queue_stop, 0);
+ priv_data->spidev = spi;
+ priv_data->assembly_buffer = kzalloc(RX_ASSEMBLY_BUFFER_SIZE,
+ GFP_KERNEL);
+ if (!priv_data->assembly_buffer) {
+ err = -ENOMEM;
+ goto cleanup_privdata;
+ }
+
+ priv_data->tx_wq = alloc_workqueue("qti_can_tx_wq", 0, 0);
+ if (!priv_data->tx_wq) {
+ LOGDE("Couldn't alloc workqueue\n");
+ err = -ENOMEM;
+ goto cleanup_privdata;
+ }
+
+ priv_data->tx_buf = kzalloc(XFER_BUFFER_SIZE,
+ GFP_KERNEL);
+ priv_data->rx_buf = kzalloc(XFER_BUFFER_SIZE,
+ GFP_KERNEL);
+ if (!priv_data->tx_buf || !priv_data->rx_buf) {
+ LOGDE("Couldn't alloc tx or rx buffers\n");
+ err = -ENOMEM;
+ goto cleanup_privdata;
+ }
+ priv_data->xfer_length = 0;
+ priv_data->driver_mode = DRIVER_MODE_RAW_FRAMES;
+
+ mutex_init(&priv_data->spi_lock);
+ atomic_set(&priv_data->msg_seq, 0);
+ init_completion(&priv_data->response_completion);
+ return priv_data;
+
+cleanup_privdata:
+ if (priv_data) {
+ if (priv_data->tx_wq)
+ destroy_workqueue(priv_data->tx_wq);
+ kfree(priv_data->rx_buf);
+ kfree(priv_data->tx_buf);
+ kfree(priv_data->assembly_buffer);
+ kfree(priv_data);
+ }
+ return NULL;
+}
+
+static const struct of_device_id qti_can_match_table[] = {
+ { .compatible = "qcom,nxp,mpc5746c" },
+ { }
+};
+
+static int qti_can_probe(struct spi_device *spi)
+{
+ int err, retry = 0, query_err = -1, i;
+ struct qti_can *priv_data = NULL;
+ struct device *dev;
+
+ dev = &spi->dev;
+ dev_info(dev, "qti_can_probe");
+
+ err = spi_setup(spi);
+ if (err) {
+ dev_err(dev, "spi_setup failed: %d", err);
+ return err;
+ }
+
+ priv_data = qti_can_create_priv_data(spi);
+ if (!priv_data) {
+ dev_err(dev, "Failed to create qti_can priv_data\n");
+ err = -ENOMEM;
+ return err;
+ }
+ dev_info(dev, "qti_can_probe created priv_data");
+
+ err = of_property_read_u32(spi->dev.of_node, "qcom,clk-freq-mhz",
+ &priv_data->clk_freq_mhz);
+ if (err) {
+ LOGDE("DT property: qcom,clk-freq-hz not defined\n");
+ return err;
+ }
+
+ err = of_property_read_u32(spi->dev.of_node, "qcom,max-can-channels",
+ &priv_data->max_can_channels);
+ if (err) {
+ LOGDE("DT property: qcom,max-can-channels not defined\n");
+ return err;
+ }
+
+ err = of_property_read_u32(spi->dev.of_node, "qcom,bits-per-word",
+ &priv_data->bits_per_word);
+ if (err)
+ priv_data->bits_per_word = 16;
+
+ err = of_property_read_u32(spi->dev.of_node, "qcom,reset-delay-msec",
+ &priv_data->reset_delay_msec);
+ if (err)
+ priv_data->reset_delay_msec = 1;
+
+ priv_data->can_fw_cmd_timeout_req =
+ of_property_read_bool(spi->dev.of_node,
+ "qcom,can-fw-cmd-timeout-req");
+
+ err = of_property_read_u32(spi->dev.of_node,
+ "qcom,can-fw-cmd-timeout-ms",
+ &priv_data->can_fw_cmd_timeout_ms);
+ if (err)
+ priv_data->can_fw_cmd_timeout_ms = 0;
+
+ err = of_property_read_u32(spi->dev.of_node,
+ "qcom,rem-all-buffering-timeout-ms",
+ &priv_data->rem_all_buffering_timeout_ms);
+ if (err)
+ priv_data->rem_all_buffering_timeout_ms = 0;
+
+ priv_data->reset = of_get_named_gpio(spi->dev.of_node,
+ "qcom,reset-gpio", 0);
+
+ if (gpio_is_valid(priv_data->reset)) {
+ err = gpio_request(priv_data->reset, "qti-can-reset");
+ if (err < 0) {
+ LOGDE("failed to request gpio %d: %d\n",
+ priv_data->reset, err);
+ return err;
+ }
+
+ gpio_direction_output(priv_data->reset, 0);
+ udelay(1);
+ gpio_direction_output(priv_data->reset, 1);
+ msleep(priv_data->reset_delay_msec);
+ }
+
+ priv_data->support_can_fd = of_property_read_bool(spi->dev.of_node,
+ "support-can-fd");
+
+ if (of_device_is_compatible(spi->dev.of_node, "qcom,nxp,mpc5746c"))
+ qti_can_bittiming_const = flexcan_bittiming_const;
+ else if (of_device_is_compatible(spi->dev.of_node,
+ "qcom,renesas,rh850"))
+ qti_can_bittiming_const = rh850_bittiming_const;
+
+ priv_data->netdev = kzalloc(sizeof(priv_data->netdev[0]) *
+ priv_data->max_can_channels,
+ GFP_KERNEL);
+ if (!priv_data->netdev) {
+ err = -ENOMEM;
+ return err;
+ }
+
+ for (i = 0; i < priv_data->max_can_channels; i++) {
+ err = qti_can_create_netdev(spi, priv_data, i);
+ if (err) {
+ LOGDE("Failed to create CAN device: %d", err);
+ goto cleanup_candev;
+ }
+
+ err = register_candev(priv_data->netdev[i]);
+ if (err) {
+ LOGDE("Failed to register CAN device: %d", err);
+ goto unregister_candev;
+ }
+ }
+
+ err = request_threaded_irq(spi->irq, NULL, qti_can_irq,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "qti-can", priv_data);
+ if (err) {
+ LOGDE("Failed to request irq: %d", err);
+ goto unregister_candev;
+ }
+ dev_info(dev, "Request irq %d ret %d\n", spi->irq, err);
+
+ while ((query_err != 0) && (retry < QTI_CAN_FW_QUERY_RETRY_COUNT)) {
+ query_err = qti_can_query_firmware_version(priv_data);
+ priv_data->assembly_buffer_size = 0;
+ retry++;
+ }
+
+ if (query_err) {
+ LOGDE("QTI CAN probe failed\n");
+ err = -ENODEV;
+ goto free_irq;
+ }
+ return 0;
+
+free_irq:
+ free_irq(spi->irq, priv_data);
+unregister_candev:
+ for (i = 0; i < priv_data->max_can_channels; i++)
+ unregister_candev(priv_data->netdev[i]);
+cleanup_candev:
+ if (priv_data) {
+ for (i = 0; i < priv_data->max_can_channels; i++) {
+ if (priv_data->netdev[i])
+ free_candev(priv_data->netdev[i]);
+ }
+ if (priv_data->tx_wq)
+ destroy_workqueue(priv_data->tx_wq);
+ kfree(priv_data->rx_buf);
+ kfree(priv_data->tx_buf);
+ kfree(priv_data->assembly_buffer);
+ kfree(priv_data->netdev);
+ kfree(priv_data);
+ }
+ return err;
+}
+
+static int qti_can_remove(struct spi_device *spi)
+{
+ struct qti_can *priv_data = spi_get_drvdata(spi);
+ int i;
+
+ LOGDI("qti_can_remove\n");
+ for (i = 0; i < priv_data->max_can_channels; i++) {
+ unregister_candev(priv_data->netdev[i]);
+ free_candev(priv_data->netdev[i]);
+ }
+ destroy_workqueue(priv_data->tx_wq);
+ kfree(priv_data->rx_buf);
+ kfree(priv_data->tx_buf);
+ kfree(priv_data->assembly_buffer);
+ kfree(priv_data->netdev);
+ kfree(priv_data);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int qti_can_suspend(struct device *dev)
+{
+ struct spi_device *spi = to_spi_device(dev);
+
+ enable_irq_wake(spi->irq);
+ return 0;
+}
+
+static int qti_can_resume(struct device *dev)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct qti_can *priv_data = spi_get_drvdata(spi);
+
+ disable_irq_wake(spi->irq);
+ qti_can_rx_message(priv_data);
+ return 0;
+}
+
+static const struct dev_pm_ops qti_can_dev_pm_ops = {
+ .suspend = qti_can_suspend,
+ .resume = qti_can_resume,
+};
+#endif
+
+static struct spi_driver qti_can_driver = {
+ .driver = {
+ .name = "qti-can",
+ .of_match_table = qti_can_match_table,
+ .owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .pm = &qti_can_dev_pm_ops,
+#endif
+ },
+ .probe = qti_can_probe,
+ .remove = qti_can_remove,
+};
+module_spi_driver(qti_can_driver);
+
+MODULE_DESCRIPTION("QTI CAN controller module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/spi/rh850.c b/drivers/net/can/spi/rh850.c
index d2b6e8caa112..b32ae2ddd41b 100644
--- a/drivers/net/can/spi/rh850.c
+++ b/drivers/net/can/spi/rh850.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -19,6 +19,7 @@
#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/completion.h>
+#include <linux/irq.h>
#define DEBUG_RH850 0
#if DEBUG_RH850 == 1
@@ -1103,6 +1104,7 @@ static int rh850_probe(struct spi_device *spi)
int err, i;
struct rh850_can *priv_data;
struct device *dev;
+ u32 irq_type;
dev = &spi->dev;
dev_info(dev, "rh850_probe");
@@ -1134,8 +1136,11 @@ static int rh850_probe(struct spi_device *spi)
}
}
+ irq_type = irq_get_trigger_type(spi->irq);
+ if (irq_type == IRQ_TYPE_NONE)
+ irq_type = IRQ_TYPE_EDGE_FALLING;
err = request_threaded_irq(spi->irq, NULL, rh850_irq,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ irq_type | IRQF_ONESHOT,
"rh850", priv_data);
if (err) {
dev_err(dev, "Failed to request irq: %d", err);
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index db1855b0e08f..59f891bebcc6 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -1175,7 +1175,7 @@ static void kvaser_usb_rx_can_msg(const struct kvaser_usb *dev,
skb = alloc_can_skb(priv->netdev, &cf);
if (!skb) {
- stats->tx_dropped++;
+ stats->rx_dropped++;
return;
}
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
index 4547a1b8b958..7677c745fb30 100644
--- a/drivers/net/ethernet/3com/3c509.c
+++ b/drivers/net/ethernet/3com/3c509.c
@@ -562,7 +562,7 @@ static void el3_common_remove (struct net_device *dev)
}
#ifdef CONFIG_EISA
-static int __init el3_eisa_probe (struct device *device)
+static int el3_eisa_probe(struct device *device)
{
short i;
int ioaddr, irq, if_port;
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 2839af00f20c..1c5f3b273e6a 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -907,7 +907,7 @@ static struct eisa_device_id vortex_eisa_ids[] = {
};
MODULE_DEVICE_TABLE(eisa, vortex_eisa_ids);
-static int __init vortex_eisa_probe(struct device *device)
+static int vortex_eisa_probe(struct device *device)
{
void __iomem *ioaddr;
struct eisa_device *edev;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index 618d952c2984..2ef4b4e884ae 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -829,7 +829,7 @@ static int xgbe_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int xgbe_suspend(struct device *dev)
{
struct net_device *netdev = dev_get_drvdata(dev);
@@ -868,7 +868,7 @@ static int xgbe_resume(struct device *dev)
return ret;
}
-#endif /* CONFIG_PM */
+#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_ACPI
static const struct acpi_device_id xgbe_acpi_match[] = {
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index c31e691d11fc..e8d31640058d 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -604,6 +604,7 @@ static void xgene_enet_cle_bypass(struct xgene_enet_pdata *pdata,
xgene_enet_rd_csr(pdata, CLE_BYPASS_REG0_0_ADDR, &cb);
cb |= CFG_CLE_BYPASS_EN0;
CFG_CLE_IP_PROTOCOL0_SET(&cb, 3);
+ CFG_CLE_IP_HDR_LEN_SET(&cb, 0);
xgene_enet_wr_csr(pdata, CLE_BYPASS_REG0_0_ADDR, cb);
xgene_enet_rd_csr(pdata, CLE_BYPASS_REG1_0_ADDR, &cb);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
index c153a1dc5ff7..480312105964 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
@@ -147,6 +147,7 @@ enum xgene_enet_rm {
#define CFG_RXCLK_MUXSEL0_SET(dst, val) xgene_set_bits(dst, val, 26, 3)
#define CFG_CLE_IP_PROTOCOL0_SET(dst, val) xgene_set_bits(dst, val, 16, 2)
+#define CFG_CLE_IP_HDR_LEN_SET(dst, val) xgene_set_bits(dst, val, 8, 5)
#define CFG_CLE_DSTQID0_SET(dst, val) xgene_set_bits(dst, val, 0, 12)
#define CFG_CLE_FPSEL0_SET(dst, val) xgene_set_bits(dst, val, 16, 4)
#define CFG_MACMODE_SET(dst, val) xgene_set_bits(dst, val, 18, 2)
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index abe1eabc0171..9cc5daed13ed 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -250,39 +250,48 @@ static int arc_emac_rx(struct net_device *ndev, int budget)
continue;
}
- pktlen = info & LEN_MASK;
- stats->rx_packets++;
- stats->rx_bytes += pktlen;
- skb = rx_buff->skb;
- skb_put(skb, pktlen);
- skb->dev = ndev;
- skb->protocol = eth_type_trans(skb, ndev);
-
- dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr),
- dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE);
-
- /* Prepare the BD for next cycle */
- rx_buff->skb = netdev_alloc_skb_ip_align(ndev,
- EMAC_BUFFER_SIZE);
- if (unlikely(!rx_buff->skb)) {
+ /* Prepare the BD for next cycle. netif_receive_skb()
+ * only if new skb was allocated and mapped to avoid holes
+ * in the RX fifo.
+ */
+ skb = netdev_alloc_skb_ip_align(ndev, EMAC_BUFFER_SIZE);
+ if (unlikely(!skb)) {
+ if (net_ratelimit())
+ netdev_err(ndev, "cannot allocate skb\n");
+ /* Return ownership to EMAC */
+ rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
stats->rx_errors++;
- /* Because receive_skb is below, increment rx_dropped */
stats->rx_dropped++;
continue;
}
- /* receive_skb only if new skb was allocated to avoid holes */
- netif_receive_skb(skb);
-
- addr = dma_map_single(&ndev->dev, (void *)rx_buff->skb->data,
+ addr = dma_map_single(&ndev->dev, (void *)skb->data,
EMAC_BUFFER_SIZE, DMA_FROM_DEVICE);
if (dma_mapping_error(&ndev->dev, addr)) {
if (net_ratelimit())
- netdev_err(ndev, "cannot dma map\n");
- dev_kfree_skb(rx_buff->skb);
+ netdev_err(ndev, "cannot map dma buffer\n");
+ dev_kfree_skb(skb);
+ /* Return ownership to EMAC */
+ rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
stats->rx_errors++;
+ stats->rx_dropped++;
continue;
}
+
+ /* unmap previosly mapped skb */
+ dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr),
+ dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE);
+
+ pktlen = info & LEN_MASK;
+ stats->rx_packets++;
+ stats->rx_bytes += pktlen;
+ skb_put(rx_buff->skb, pktlen);
+ rx_buff->skb->dev = ndev;
+ rx_buff->skb->protocol = eth_type_trans(rx_buff->skb, ndev);
+
+ netif_receive_skb(rx_buff->skb);
+
+ rx_buff->skb = skb;
dma_unmap_addr_set(rx_buff, addr, addr);
dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE);
diff --git a/drivers/net/ethernet/arc/emac_rockchip.c b/drivers/net/ethernet/arc/emac_rockchip.c
index c31c7407b753..425dae560322 100644
--- a/drivers/net/ethernet/arc/emac_rockchip.c
+++ b/drivers/net/ethernet/arc/emac_rockchip.c
@@ -150,8 +150,10 @@ static int emac_rockchip_probe(struct platform_device *pdev)
/* Optional regulator for PHY */
priv->regulator = devm_regulator_get_optional(dev, "phy");
if (IS_ERR(priv->regulator)) {
- if (PTR_ERR(priv->regulator) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
+ if (PTR_ERR(priv->regulator) == -EPROBE_DEFER) {
+ err = -EPROBE_DEFER;
+ goto out_clk_disable;
+ }
dev_err(dev, "no regulator found\n");
priv->regulator = NULL;
}
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 027705117086..af9ec57bbebf 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -729,37 +729,33 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
struct bcm_sysport_tx_ring *ring)
{
struct net_device *ndev = priv->netdev;
- unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs;
unsigned int pkts_compl = 0, bytes_compl = 0;
+ unsigned int txbds_processed = 0;
struct bcm_sysport_cb *cb;
+ unsigned int txbds_ready;
+ unsigned int c_index;
u32 hw_ind;
/* Compute how many descriptors have been processed since last call */
hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
- ring->p_index = (hw_ind & RING_PROD_INDEX_MASK);
-
- last_c_index = ring->c_index;
- num_tx_cbs = ring->size;
-
- c_index &= (num_tx_cbs - 1);
-
- if (c_index >= last_c_index)
- last_tx_cn = c_index - last_c_index;
- else
- last_tx_cn = num_tx_cbs - last_c_index + c_index;
+ txbds_ready = (c_index - ring->c_index) & RING_CONS_INDEX_MASK;
netif_dbg(priv, tx_done, ndev,
- "ring=%d c_index=%d last_tx_cn=%d last_c_index=%d\n",
- ring->index, c_index, last_tx_cn, last_c_index);
+ "ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
+ ring->index, ring->c_index, c_index, txbds_ready);
- while (last_tx_cn-- > 0) {
- cb = ring->cbs + last_c_index;
+ while (txbds_processed < txbds_ready) {
+ cb = &ring->cbs[ring->clean_index];
bcm_sysport_tx_reclaim_one(priv, cb, &bytes_compl, &pkts_compl);
ring->desc_count++;
- last_c_index++;
- last_c_index &= (num_tx_cbs - 1);
+ txbds_processed++;
+
+ if (likely(ring->clean_index < ring->size - 1))
+ ring->clean_index++;
+ else
+ ring->clean_index = 0;
}
ring->c_index = c_index;
@@ -1229,6 +1225,7 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
netif_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64);
ring->index = index;
ring->size = size;
+ ring->clean_index = 0;
ring->alloc_size = ring->size;
ring->desc_cpu = p;
ring->desc_count = ring->size;
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index f28bf545d7f4..8ace6ecb5f79 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -638,7 +638,7 @@ struct bcm_sysport_tx_ring {
unsigned int desc_count; /* Number of descriptors */
unsigned int curr_desc; /* Current descriptor */
unsigned int c_index; /* Last consumer index */
- unsigned int p_index; /* Current producer index */
+ unsigned int clean_index; /* Current clean index */
struct bcm_sysport_cb *cbs; /* Transmit control blocks */
struct dma_desc *desc_cpu; /* CPU view of the descriptor */
struct bcm_sysport_priv *priv; /* private context backpointer */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index e5911ccb2148..949a82458a29 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -2044,6 +2044,7 @@ static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
ETH_OVREHEAD +
mtu +
BNX2X_FW_RX_ALIGN_END;
+ fp->rx_buf_size = SKB_DATA_ALIGN(fp->rx_buf_size);
/* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
@@ -3052,7 +3053,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
del_timer_sync(&bp->timer);
- if (IS_PF(bp)) {
+ if (IS_PF(bp) && !BP_NOMCP(bp)) {
/* Set ALWAYS_ALIVE bit in shmem */
bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
bnx2x_drv_pulse(bp);
@@ -3134,7 +3135,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
bp->cnic_loaded = false;
/* Clear driver version indication in shmem */
- if (IS_PF(bp))
+ if (IS_PF(bp) && !BP_NOMCP(bp))
bnx2x_update_mng_version(bp);
/* Check if there are pending parity attentions. If there are - set
@@ -3942,15 +3943,26 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* when transmitting in a vf, start bd must hold the ethertype
* for fw to enforce it
*/
+ u16 vlan_tci = 0;
#ifndef BNX2X_STOP_ON_ERROR
- if (IS_VF(bp))
+ if (IS_VF(bp)) {
#endif
- tx_start_bd->vlan_or_ethertype =
- cpu_to_le16(ntohs(eth->h_proto));
+ /* Still need to consider inband vlan for enforced */
+ if (__vlan_get_tag(skb, &vlan_tci)) {
+ tx_start_bd->vlan_or_ethertype =
+ cpu_to_le16(ntohs(eth->h_proto));
+ } else {
+ tx_start_bd->bd_flags.as_bitfield |=
+ (X_ETH_INBAND_VLAN <<
+ ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
+ tx_start_bd->vlan_or_ethertype =
+ cpu_to_le16(vlan_tci);
+ }
#ifndef BNX2X_STOP_ON_ERROR
- else
+ } else {
/* used by FW for packet accounting */
tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
+ }
#endif
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index abb3ff6498dc..8ddb68a3fdb6 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -9570,6 +9570,15 @@ static int bnx2x_init_shmem(struct bnx2x *bp)
do {
bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
+
+ /* If we read all 0xFFs, means we are in PCI error state and
+ * should bail out to avoid crashes on adapter's FW reads.
+ */
+ if (bp->common.shmem_base == 0xFFFFFFFF) {
+ bp->flags |= NO_MCP_FLAG;
+ return -ENODEV;
+ }
+
if (bp->common.shmem_base) {
val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
if (val & SHR_MEM_VALIDITY_MB)
@@ -14214,7 +14223,10 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
BNX2X_ERR("IO slot reset --> driver unload\n");
/* MCP should have been reset; Need to wait for validity */
- bnx2x_init_shmem(bp);
+ if (bnx2x_init_shmem(bp)) {
+ rtnl_unlock();
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
u32 v;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index ea044bbcd384..3eebb57975e3 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -29,7 +29,7 @@ static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id)
netdev_err(bp->dev, "vf ndo called though sriov is disabled\n");
return -EINVAL;
}
- if (vf_id >= bp->pf.max_vfs) {
+ if (vf_id >= bp->pf.active_vfs) {
netdev_err(bp->dev, "Invalid VF id %d\n", vf_id);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index ab53e0cfb4dc..1325825d5225 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -8722,14 +8722,15 @@ static void tg3_free_consistent(struct tg3 *tp)
tg3_mem_rx_release(tp);
tg3_mem_tx_release(tp);
- /* Protect tg3_get_stats64() from reading freed tp->hw_stats. */
- tg3_full_lock(tp, 0);
+ /* tp->hw_stats can be referenced safely:
+ * 1. under rtnl_lock
+ * 2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
+ */
if (tp->hw_stats) {
dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
tp->hw_stats, tp->stats_mapping);
tp->hw_stats = NULL;
}
- tg3_full_unlock(tp);
}
/*
@@ -10051,6 +10052,16 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
tw32(GRC_MODE, tp->grc_mode | val);
+ /* On one of the AMD platform, MRRS is restricted to 4000 because of
+ * south bridge limitation. As a workaround, Driver is setting MRRS
+ * to 2048 instead of default 4096.
+ */
+ if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
+ tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
+ val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
+ tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
+ }
+
/* Setup the timer prescalar register. Clock is always 66Mhz. */
val = tr32(GRC_MISC_CFG);
val &= ~0xff;
@@ -14153,7 +14164,7 @@ static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
struct tg3 *tp = netdev_priv(dev);
spin_lock_bh(&tp->lock);
- if (!tp->hw_stats) {
+ if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
*stats = tp->net_stats_prev;
spin_unlock_bh(&tp->lock);
return stats;
@@ -14230,7 +14241,8 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
*/
if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
tg3_asic_rev(tp) == ASIC_REV_5717 ||
- tg3_asic_rev(tp) == ASIC_REV_5719)
+ tg3_asic_rev(tp) == ASIC_REV_5719 ||
+ tg3_asic_rev(tp) == ASIC_REV_5720)
reset_phy = true;
err = tg3_restart_hw(tp, reset_phy);
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 31c9f8295953..19532961e173 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -95,6 +95,7 @@
#define TG3PCI_SUBDEVICE_ID_DELL_JAGUAR 0x0106
#define TG3PCI_SUBDEVICE_ID_DELL_MERLOT 0x0109
#define TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT 0x010a
+#define TG3PCI_SUBDEVICE_ID_DELL_5762 0x07f0
#define TG3PCI_SUBVENDOR_ID_COMPAQ PCI_VENDOR_ID_COMPAQ
#define TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE 0x007c
#define TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2 0x009a
@@ -280,6 +281,9 @@
#define TG3PCI_STD_RING_PROD_IDX 0x00000098 /* 64-bit */
#define TG3PCI_RCV_RET_RING_CON_IDX 0x000000a0 /* 64-bit */
/* 0xa8 --> 0xb8 unused */
+#define TG3PCI_DEV_STATUS_CTRL 0x000000b4
+#define MAX_READ_REQ_SIZE_2048 0x00004000
+#define MAX_READ_REQ_MASK 0x00007000
#define TG3PCI_DUAL_MAC_CTRL 0x000000b8
#define DUAL_MAC_CTRL_CH_MASK 0x00000003
#define DUAL_MAC_CTRL_ID 0x00000004
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index 0f6811860ad5..a36e38676640 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -2845,7 +2845,7 @@ bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver)
static void
bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer)
{
- memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
+ strncpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
}
static void
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index cc1725616f9d..50747573f42e 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -2823,7 +2823,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
if (!g) {
netif_info(lio, tx_err, lio->netdev,
"Transmit scatter gather: glist null!\n");
- goto lio_xmit_dma_failed;
+ goto lio_xmit_failed;
}
cmdsetup.s.gather = 1;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index cf61a5869c6e..de23f23b41de 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -6076,13 +6076,18 @@ int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
if (!t4_fw_matches_chip(adap, fw_hdr))
return -EINVAL;
+ /* Disable FW_OK flag so that mbox commands with FW_OK flag set
+ * wont be sent when we are flashing FW.
+ */
+ adap->flags &= ~FW_OK;
+
ret = t4_fw_halt(adap, mbox, force);
if (ret < 0 && !force)
- return ret;
+ goto out;
ret = t4_load_fw(adap, fw_data, size);
if (ret < 0)
- return ret;
+ goto out;
/*
* Older versions of the firmware don't understand the new
@@ -6093,7 +6098,17 @@ int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
* its header flags to see if it advertises the capability.
*/
reset = ((be32_to_cpu(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
- return t4_fw_restart(adap, mbox, reset);
+ ret = t4_fw_restart(adap, mbox, reset);
+
+ /* Grab potentially new Firmware Device Log parameters so we can see
+ * how healthy the new Firmware is. It's okay to contact the new
+ * Firmware for these parameters even though, as far as it's
+ * concerned, we've never said "HELLO" to it ...
+ */
+ (void)t4_init_devlog_params(adap);
+out:
+ adap->flags |= FW_OK;
+ return ret;
}
/**
@@ -7696,7 +7711,16 @@ int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
ret = t4_cim_read(adap, UP_UP_DBG_LA_DATA_A, 1, &la_buf[i]);
if (ret)
break;
- idx = (idx + 1) & UPDBGLARDPTR_M;
+
+ /* Bits 0-3 of UpDbgLaRdPtr can be between 0000 to 1001 to
+ * identify the 32-bit portion of the full 312-bit data
+ */
+ if (is_t6(adap->params.chip) && (idx & 0xf) >= 9)
+ idx = (idx & 0xff0) + 0x10;
+ else
+ idx++;
+ /* address can't exceed 0xfff */
+ idx &= UPDBGLARDPTR_M;
}
restart:
if (cfg & UPDBGLAEN_F) {
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index fa3786a9d30e..ec8ffd7eae33 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -2604,8 +2604,8 @@ void t4vf_sge_stop(struct adapter *adapter)
int t4vf_sge_init(struct adapter *adapter)
{
struct sge_params *sge_params = &adapter->params.sge;
- u32 fl0 = sge_params->sge_fl_buffer_size[0];
- u32 fl1 = sge_params->sge_fl_buffer_size[1];
+ u32 fl_small_pg = sge_params->sge_fl_buffer_size[0];
+ u32 fl_large_pg = sge_params->sge_fl_buffer_size[1];
struct sge *s = &adapter->sge;
unsigned int ingpadboundary, ingpackboundary;
@@ -2614,9 +2614,20 @@ int t4vf_sge_init(struct adapter *adapter)
* the Physical Function Driver. Ideally we should be able to deal
* with _any_ configuration. Practice is different ...
*/
- if (fl0 != PAGE_SIZE || (fl1 != 0 && fl1 <= fl0)) {
+
+ /* We only bother using the Large Page logic if the Large Page Buffer
+ * is larger than our Page Size Buffer.
+ */
+ if (fl_large_pg <= fl_small_pg)
+ fl_large_pg = 0;
+
+ /* The Page Size Buffer must be exactly equal to our Page Size and the
+ * Large Page Size Buffer should be 0 (per above) or a power of 2.
+ */
+ if (fl_small_pg != PAGE_SIZE ||
+ (fl_large_pg & (fl_large_pg - 1)) != 0) {
dev_err(adapter->pdev_dev, "bad SGE FL buffer sizes [%d, %d]\n",
- fl0, fl1);
+ fl_small_pg, fl_large_pg);
return -EINVAL;
}
if ((sge_params->sge_control & RXPKTCPLMODE_F) == 0) {
@@ -2627,8 +2638,8 @@ int t4vf_sge_init(struct adapter *adapter)
/*
* Now translate the adapter parameters into our internal forms.
*/
- if (fl1)
- s->fl_pg_order = ilog2(fl1) - PAGE_SHIFT;
+ if (fl_large_pg)
+ s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;
s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_F)
? 128 : 64);
s->pktshift = PKTSHIFT_G(sge_params->sge_control);
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index 8966f3159bb2..3acde3b9b767 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -1990,7 +1990,7 @@ SetMulticastFilter(struct net_device *dev)
static u_char de4x5_irq[] = EISA_ALLOWED_IRQ_LIST;
-static int __init de4x5_eisa_probe (struct device *gendev)
+static int de4x5_eisa_probe(struct device *gendev)
{
struct eisa_device *edev;
u_long iobase;
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 6d0c5d5eea6d..58c0fccdd8cb 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -28,6 +28,7 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/netdevice.h>
+#include <linux/of.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
#include <net/ip.h>
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 458e2d97d096..ae8e4fc22e7b 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -3539,6 +3539,8 @@ fec_drv_remove(struct platform_device *pdev)
fec_enet_mii_remove(fep);
if (fep->reg_phy)
regulator_disable(fep->reg_phy);
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
of_node_put(fep->phy_node);
free_netdev(ndev);
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index 40071dad1c57..9c76f1a2f57b 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -382,7 +382,7 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
{
const struct of_device_id *id =
of_match_device(fsl_pq_mdio_match, &pdev->dev);
- const struct fsl_pq_mdio_data *data = id->data;
+ const struct fsl_pq_mdio_data *data;
struct device_node *np = pdev->dev.of_node;
struct resource res;
struct device_node *tbi;
@@ -390,6 +390,13 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
struct mii_bus *new_bus;
int err;
+ if (!id) {
+ dev_err(&pdev->dev, "Failed to match device\n");
+ return -ENODEV;
+ }
+
+ data = id->data;
+
dev_dbg(&pdev->dev, "found %s compatible node\n", id->compatible);
new_bus = mdiobus_alloc_size(sizeof(*priv));
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 7923bfdc9b30..901661149b44 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1375,9 +1375,11 @@ static int gfar_probe(struct platform_device *ofdev)
gfar_init_addr_hash_table(priv);
- /* Insert receive time stamps into padding alignment bytes */
+ /* Insert receive time stamps into padding alignment bytes, and
+ * plus 2 bytes padding to ensure the cpu alignment.
+ */
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
- priv->padding = 8;
+ priv->padding = 8 + DEFAULT_PADDING;
if (dev->features & NETIF_F_IP_CSUM ||
priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
index b40fba929d65..d540ee190038 100644
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -314,11 +314,10 @@ static int ptp_gianfar_adjtime(struct ptp_clock_info *ptp, s64 delta)
now = tmr_cnt_read(etsects);
now += delta;
tmr_cnt_write(etsects, now);
+ set_fipers(etsects);
spin_unlock_irqrestore(&etsects->lock, flags);
- set_fipers(etsects);
-
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
index 802d55457f19..b1a27aef4425 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
@@ -776,7 +776,7 @@ static void hns_xgmac_get_strings(u32 stringset, u8 *data)
*/
static int hns_xgmac_get_sset_count(int stringset)
{
- if (stringset == ETH_SS_STATS)
+ if (stringset == ETH_SS_STATS || stringset == ETH_SS_PRIV_FLAGS)
return ARRAY_SIZE(g_xgmac_stats_string);
return 0;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index a0332129970b..4b91eb70c683 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -1000,8 +1000,10 @@ int hns_get_sset_count(struct net_device *netdev, int stringset)
cnt--;
return cnt;
- } else {
+ } else if (stringset == ETH_SS_STATS) {
return (HNS_NET_STATS_CNT + ops->get_sset_count(h, stringset));
+ } else {
+ return -EOPNOTSUPP;
}
}
diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c
index ae6e30d39f0f..3daf2d4a7ca0 100644
--- a/drivers/net/ethernet/hp/hp100.c
+++ b/drivers/net/ethernet/hp/hp100.c
@@ -194,7 +194,6 @@ static const char *hp100_isa_tbl[] = {
};
#endif
-#ifdef CONFIG_EISA
static struct eisa_device_id hp100_eisa_tbl[] = {
{ "HWPF180" }, /* HP J2577 rev A */
{ "HWP1920" }, /* HP 27248B */
@@ -205,9 +204,7 @@ static struct eisa_device_id hp100_eisa_tbl[] = {
{ "" } /* Mandatory final entry ! */
};
MODULE_DEVICE_TABLE(eisa, hp100_eisa_tbl);
-#endif
-#ifdef CONFIG_PCI
static const struct pci_device_id hp100_pci_tbl[] = {
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585A, PCI_ANY_ID, PCI_ANY_ID,},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585B, PCI_ANY_ID, PCI_ANY_ID,},
@@ -219,7 +216,6 @@ static const struct pci_device_id hp100_pci_tbl[] = {
{} /* Terminating entry */
};
MODULE_DEVICE_TABLE(pci, hp100_pci_tbl);
-#endif
static int hp100_rx_ratio = HP100_DEFAULT_RX_RATIO;
static int hp100_priority_tx = HP100_DEFAULT_PRIORITY_TX;
@@ -2842,8 +2838,7 @@ static void cleanup_dev(struct net_device *d)
free_netdev(d);
}
-#ifdef CONFIG_EISA
-static int __init hp100_eisa_probe (struct device *gendev)
+static int hp100_eisa_probe(struct device *gendev)
{
struct net_device *dev = alloc_etherdev(sizeof(struct hp100_private));
struct eisa_device *edev = to_eisa_device(gendev);
@@ -2884,9 +2879,7 @@ static struct eisa_driver hp100_eisa_driver = {
.remove = hp100_eisa_remove,
}
};
-#endif
-#ifdef CONFIG_PCI
static int hp100_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -2955,7 +2948,6 @@ static struct pci_driver hp100_pci_driver = {
.probe = hp100_pci_probe,
.remove = hp100_pci_remove,
};
-#endif
/*
* module section
@@ -3032,23 +3024,17 @@ static int __init hp100_module_init(void)
err = hp100_isa_init();
if (err && err != -ENODEV)
goto out;
-#ifdef CONFIG_EISA
err = eisa_driver_register(&hp100_eisa_driver);
if (err && err != -ENODEV)
goto out2;
-#endif
-#ifdef CONFIG_PCI
err = pci_register_driver(&hp100_pci_driver);
if (err && err != -ENODEV)
goto out3;
-#endif
out:
return err;
out3:
-#ifdef CONFIG_EISA
eisa_driver_unregister (&hp100_eisa_driver);
out2:
-#endif
hp100_isa_cleanup();
goto out;
}
@@ -3057,12 +3043,8 @@ static int __init hp100_module_init(void)
static void __exit hp100_module_exit(void)
{
hp100_isa_cleanup();
-#ifdef CONFIG_EISA
eisa_driver_unregister (&hp100_eisa_driver);
-#endif
-#ifdef CONFIG_PCI
pci_unregister_driver (&hp100_pci_driver);
-#endif
}
module_init(hp100_module_init)
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 5d7db6c01c46..f301c03c527b 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -342,6 +342,7 @@ static int emac_reset(struct emac_instance *dev)
{
struct emac_regs __iomem *p = dev->emacp;
int n = 20;
+ bool __maybe_unused try_internal_clock = false;
DBG(dev, "reset" NL);
@@ -354,6 +355,7 @@ static int emac_reset(struct emac_instance *dev)
}
#ifdef CONFIG_PPC_DCR_NATIVE
+do_retry:
/*
* PPC460EX/GT Embedded Processor Advanced User's Manual
* section 28.10.1 Mode Register 0 (EMACx_MR0) states:
@@ -361,10 +363,19 @@ static int emac_reset(struct emac_instance *dev)
* of the EMAC. If none is present, select the internal clock
* (SDR0_ETH_CFG[EMACx_PHY_CLK] = 1).
* After a soft reset, select the external clock.
+ *
+ * The AR8035-A PHY Meraki MR24 does not provide a TX Clk if the
+ * ethernet cable is not attached. This causes the reset to timeout
+ * and the PHY detection code in emac_init_phy() is unable to
+ * communicate and detect the AR8035-A PHY. As a result, the emac
+ * driver bails out early and the user has no ethernet.
+ * In order to stay compatible with existing configurations, the
+ * driver will temporarily switch to the internal clock, after
+ * the first reset fails.
*/
if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) {
- if (dev->phy_address == 0xffffffff &&
- dev->phy_map == 0xffffffff) {
+ if (try_internal_clock || (dev->phy_address == 0xffffffff &&
+ dev->phy_map == 0xffffffff)) {
/* No PHY: select internal loop clock before reset */
dcri_clrset(SDR0, SDR0_ETH_CFG,
0, SDR0_ETH_CFG_ECS << dev->cell_index);
@@ -382,8 +393,15 @@ static int emac_reset(struct emac_instance *dev)
#ifdef CONFIG_PPC_DCR_NATIVE
if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) {
- if (dev->phy_address == 0xffffffff &&
- dev->phy_map == 0xffffffff) {
+ if (!n && !try_internal_clock) {
+ /* first attempt has timed out. */
+ n = 20;
+ try_internal_clock = true;
+ goto do_retry;
+ }
+
+ if (try_internal_clock || (dev->phy_address == 0xffffffff &&
+ dev->phy_map == 0xffffffff)) {
/* No PHY: restore external clock source after reset */
dcri_clrset(SDR0, SDR0_ETH_CFG,
SDR0_ETH_CFG_ECS << dev->cell_index, 0);
diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h
index 98fe5a2cd6e3..481e994490ce 100644
--- a/drivers/net/ethernet/intel/e1000/e1000.h
+++ b/drivers/net/ethernet/intel/e1000/e1000.h
@@ -331,7 +331,8 @@ struct e1000_adapter {
enum e1000_state_t {
__E1000_TESTING,
__E1000_RESETTING,
- __E1000_DOWN
+ __E1000_DOWN,
+ __E1000_DISABLED
};
#undef pr_fmt
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 068023595d84..2a1d4a9d3c19 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -940,7 +940,7 @@ static int e1000_init_hw_struct(struct e1000_adapter *adapter,
static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *netdev;
- struct e1000_adapter *adapter;
+ struct e1000_adapter *adapter = NULL;
struct e1000_hw *hw;
static int cards_found = 0;
@@ -950,6 +950,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
u16 tmp = 0;
u16 eeprom_apme_mask = E1000_EEPROM_APME;
int bars, need_ioport;
+ bool disable_dev = false;
/* do not allocate ioport bars when not needed */
need_ioport = e1000_is_need_ioport(pdev);
@@ -1250,11 +1251,13 @@ err_mdio_ioremap:
iounmap(hw->ce4100_gbe_mdio_base_virt);
iounmap(hw->hw_addr);
err_ioremap:
+ disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
free_netdev(netdev);
err_alloc_etherdev:
pci_release_selected_regions(pdev, bars);
err_pci_reg:
- pci_disable_device(pdev);
+ if (!adapter || disable_dev)
+ pci_disable_device(pdev);
return err;
}
@@ -1272,6 +1275,7 @@ static void e1000_remove(struct pci_dev *pdev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
+ bool disable_dev;
e1000_down_and_stop(adapter);
e1000_release_manageability(adapter);
@@ -1290,9 +1294,11 @@ static void e1000_remove(struct pci_dev *pdev)
iounmap(hw->flash_address);
pci_release_selected_regions(pdev, adapter->bars);
+ disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
free_netdev(netdev);
- pci_disable_device(pdev);
+ if (disable_dev)
+ pci_disable_device(pdev);
}
/**
@@ -5135,7 +5141,8 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
if (netif_running(netdev))
e1000_free_irq(adapter);
- pci_disable_device(pdev);
+ if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
+ pci_disable_device(pdev);
return 0;
}
@@ -5179,6 +5186,10 @@ static int e1000_resume(struct pci_dev *pdev)
pr_err("Cannot enable PCI device from suspend\n");
return err;
}
+
+ /* flush memory to make sure state is correct */
+ smp_mb__before_atomic();
+ clear_bit(__E1000_DISABLED, &adapter->flags);
pci_set_master(pdev);
pci_enable_wake(pdev, PCI_D3hot, 0);
@@ -5253,7 +5264,9 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
if (netif_running(netdev))
e1000_down(adapter);
- pci_disable_device(pdev);
+
+ if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
+ pci_disable_device(pdev);
/* Request a slot slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
@@ -5281,6 +5294,10 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
pr_err("Cannot re-enable PCI device after reset.\n");
return PCI_ERS_RESULT_DISCONNECT;
}
+
+ /* flush memory to make sure state is correct */
+ smp_mb__before_atomic();
+ clear_bit(__E1000_DISABLED, &adapter->flags);
pci_set_master(pdev);
pci_enable_wake(pdev, PCI_D3hot, 0);
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 5205f1ebe381..20d8806d2bff 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -1182,6 +1182,7 @@ static void e1000e_tx_hwtstamp_work(struct work_struct *work)
struct e1000_hw *hw = &adapter->hw;
if (er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID) {
+ struct sk_buff *skb = adapter->tx_hwtstamp_skb;
struct skb_shared_hwtstamps shhwtstamps;
u64 txstmp;
@@ -1190,9 +1191,14 @@ static void e1000e_tx_hwtstamp_work(struct work_struct *work)
e1000e_systim_to_hwtstamp(adapter, &shhwtstamps, txstmp);
- skb_tstamp_tx(adapter->tx_hwtstamp_skb, &shhwtstamps);
- dev_kfree_skb_any(adapter->tx_hwtstamp_skb);
+ /* Clear the global tx_hwtstamp_skb pointer and force writes
+ * prior to notifying the stack of a Tx timestamp.
+ */
adapter->tx_hwtstamp_skb = NULL;
+ wmb(); /* force write prior to skb_tstamp_tx */
+
+ skb_tstamp_tx(skb, &shhwtstamps);
+ dev_kfree_skb_any(skb);
} else if (time_after(jiffies, adapter->tx_hwtstamp_start
+ adapter->tx_timeout_factor * HZ)) {
dev_kfree_skb_any(adapter->tx_hwtstamp_skb);
@@ -3526,6 +3532,12 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
switch (hw->mac.type) {
case e1000_pch2lan:
+ /* Stable 96MHz frequency */
+ incperiod = INCPERIOD_96MHz;
+ incvalue = INCVALUE_96MHz;
+ shift = INCVALUE_SHIFT_96MHz;
+ adapter->cc.shift = shift + INCPERIOD_SHIFT_96MHz;
+ break;
case e1000_pch_lpt:
if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) {
/* Stable 96MHz frequency */
@@ -6583,12 +6595,17 @@ static int e1000e_pm_thaw(struct device *dev)
static int e1000e_pm_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
+ int rc;
e1000e_flush_lpic(pdev);
e1000e_pm_freeze(dev);
- return __e1000_shutdown(pdev, false);
+ rc = __e1000_shutdown(pdev, false);
+ if (rc)
+ e1000e_pm_thaw(dev);
+
+ return rc;
}
static int e1000e_pm_resume(struct device *dev)
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
index 2ce0eba5e040..38431b49020f 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
@@ -983,7 +983,7 @@ static void fm10k_self_test(struct net_device *dev,
memset(data, 0, sizeof(*data) * FM10K_TEST_LEN);
- if (FM10K_REMOVED(hw)) {
+ if (FM10K_REMOVED(hw->hw_addr)) {
netif_err(interface, drv, dev,
"Interface removed - test blocked\n");
eth_test->flags |= ETH_TEST_FL_FAILED;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 488a50d59dca..3da1f206ff84 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -1073,6 +1073,11 @@ static int i40e_get_eeprom_len(struct net_device *netdev)
struct i40e_hw *hw = &np->vsi->back->hw;
u32 val;
+#define X722_EEPROM_SCOPE_LIMIT 0x5B9FFF
+ if (hw->mac.type == I40E_MAC_X722) {
+ val = X722_EEPROM_SCOPE_LIMIT + 1;
+ return val;
+ }
val = (rd32(hw, I40E_GLPCI_LBARCTRL)
& I40E_GLPCI_LBARCTRL_FL_SIZE_MASK)
>> I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 6100cdd9ad13..dd4e6ea9e0e1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -292,14 +292,14 @@ i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
{
enum i40e_status_code ret_code = 0;
- if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
- ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
- if (!ret_code) {
+ ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (!ret_code) {
+ if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
ret_code = i40e_read_nvm_word_aq(hw, offset, data);
- i40e_release_nvm(hw);
+ } else {
+ ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
}
- } else {
- ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
+ i40e_release_nvm(hw);
}
return ret_code;
}
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index 4f34e1b79705..ac92685dd4e5 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -5666,6 +5666,7 @@ static void mvpp2_set_rx_mode(struct net_device *dev)
int id = port->id;
bool allmulti = dev->flags & IFF_ALLMULTI;
+retry:
mvpp2_prs_mac_promisc_set(priv, id, dev->flags & IFF_PROMISC);
mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_ALL, allmulti);
mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_IP6, allmulti);
@@ -5673,9 +5674,13 @@ static void mvpp2_set_rx_mode(struct net_device *dev)
/* Remove all port->id's mcast enries */
mvpp2_prs_mcast_del_all(priv, id);
- if (allmulti && !netdev_mc_empty(dev)) {
- netdev_for_each_mc_addr(ha, dev)
- mvpp2_prs_mac_da_accept(priv, id, ha->addr, true);
+ if (!allmulti) {
+ netdev_for_each_mc_addr(ha, dev) {
+ if (mvpp2_prs_mac_da_accept(priv, id, ha->addr, true)) {
+ allmulti = true;
+ goto retry;
+ }
+ }
}
}
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 4b62aa1f9ff8..6e5065f0907b 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -5079,7 +5079,7 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_WORK(&hw->restart_work, sky2_restart);
pci_set_drvdata(pdev, hw);
- pdev->d3_delay = 150;
+ pdev->d3_delay = 200;
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index ddb5541882f5..bcfac000199e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -967,6 +967,22 @@ static int mlx4_en_set_coalesce(struct net_device *dev,
if (!coal->tx_max_coalesced_frames_irq)
return -EINVAL;
+ if (coal->tx_coalesce_usecs > MLX4_EN_MAX_COAL_TIME ||
+ coal->rx_coalesce_usecs > MLX4_EN_MAX_COAL_TIME ||
+ coal->rx_coalesce_usecs_low > MLX4_EN_MAX_COAL_TIME ||
+ coal->rx_coalesce_usecs_high > MLX4_EN_MAX_COAL_TIME) {
+ netdev_info(dev, "%s: maximum coalesce time supported is %d usecs\n",
+ __func__, MLX4_EN_MAX_COAL_TIME);
+ return -ERANGE;
+ }
+
+ if (coal->tx_max_coalesced_frames > MLX4_EN_MAX_COAL_PKTS ||
+ coal->rx_max_coalesced_frames > MLX4_EN_MAX_COAL_PKTS) {
+ netdev_info(dev, "%s: maximum coalesced frames supported is %d\n",
+ __func__, MLX4_EN_MAX_COAL_PKTS);
+ return -ERANGE;
+ }
+
priv->rx_frames = (coal->rx_max_coalesced_frames ==
MLX4_EN_AUTO_CONF) ?
MLX4_EN_RX_COAL_TARGET :
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index 1d4e2e054647..897d061e4f03 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -35,6 +35,7 @@
#include <linux/etherdevice.h>
#include <linux/mlx4/cmd.h>
+#include <linux/mlx4/qp.h>
#include <linux/export.h>
#include "mlx4.h"
@@ -985,16 +986,21 @@ int mlx4_flow_attach(struct mlx4_dev *dev,
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
+ if (!mlx4_qp_lookup(dev, rule->qpn)) {
+ mlx4_err_rule(dev, "QP doesn't exist\n", rule);
+ ret = -EINVAL;
+ goto out;
+ }
+
trans_rule_ctrl_to_hw(rule, mailbox->buf);
size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
list_for_each_entry(cur, &rule->list, list) {
ret = parse_trans_rule(dev, cur, mailbox->buf + size);
- if (ret < 0) {
- mlx4_free_cmd_mailbox(dev, mailbox);
- return ret;
- }
+ if (ret < 0)
+ goto out;
+
size += ret;
}
@@ -1021,6 +1027,7 @@ int mlx4_flow_attach(struct mlx4_dev *dev,
}
}
+out:
mlx4_free_cmd_mailbox(dev, mailbox);
return ret;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 10aa6544cf4d..607daaffae98 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -140,6 +140,9 @@ enum {
#define MLX4_EN_TX_COAL_PKTS 16
#define MLX4_EN_TX_COAL_TIME 0x10
+#define MLX4_EN_MAX_COAL_PKTS U16_MAX
+#define MLX4_EN_MAX_COAL_TIME U16_MAX
+
#define MLX4_EN_RX_RATE_LOW 400000
#define MLX4_EN_RX_COAL_TIME_LOW 0
#define MLX4_EN_RX_RATE_HIGH 450000
@@ -518,8 +521,8 @@ struct mlx4_en_priv {
u16 rx_usecs_low;
u32 pkt_rate_high;
u16 rx_usecs_high;
- u16 sample_interval;
- u16 adaptive_rx_coal;
+ u32 sample_interval;
+ u32 adaptive_rx_coal;
u32 msg_enable;
u32 loopback_ok;
u32 validate_loopback;
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 168823dde79f..62f1a3433a62 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -280,6 +280,9 @@ void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
u64 in_param = 0;
int err;
+ if (!cnt)
+ return;
+
if (mlx4_is_mfunc(dev)) {
set_param_l(&in_param, base_qpn);
set_param_h(&in_param, cnt);
@@ -378,6 +381,19 @@ static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
__mlx4_qp_free_icm(dev, qpn);
}
+struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn)
+{
+ struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
+ struct mlx4_qp *qp;
+
+ spin_lock(&qp_table->lock);
+
+ qp = __mlx4_qp_lookup(dev, qpn);
+
+ spin_unlock(&qp_table->lock);
+ return qp;
+}
+
int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, gfp_t gfp)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -465,6 +481,12 @@ int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
}
if (attr & MLX4_UPDATE_QP_QOS_VPORT) {
+ if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP)) {
+ mlx4_warn(dev, "Granular QoS per VF is not enabled\n");
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
qp_mask |= 1ULL << MLX4_UPD_QP_MASK_QOS_VPP;
cmd->qp_context.qos_vport = params->qos_vport;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index d1fc7fa87b05..e3080fbd9d00 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -5040,6 +5040,13 @@ void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
}
+static void update_qos_vpp(struct mlx4_update_qp_context *ctx,
+ struct mlx4_vf_immed_vlan_work *work)
+{
+ ctx->qp_mask |= cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_QOS_VPP);
+ ctx->qp_context.qos_vport = work->qos_vport;
+}
+
void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
{
struct mlx4_vf_immed_vlan_work *work =
@@ -5144,11 +5151,10 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
qp->sched_queue & 0xC7;
upd_context->qp_context.pri_path.sched_queue |=
((work->qos & 0x7) << 3);
- upd_context->qp_mask |=
- cpu_to_be64(1ULL <<
- MLX4_UPD_QP_MASK_QOS_VPP);
- upd_context->qp_context.qos_vport =
- work->qos_vport;
+
+ if (dev->caps.flags2 &
+ MLX4_DEV_CAP_FLAG2_QOS_VPP)
+ update_qos_vpp(upd_context, work);
}
err = mlx4_cmd(dev, mailbox->dma,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index f5c1f4acc57b..7c42be586be8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -513,7 +513,6 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
struct mlx5_priv *priv = &mdev->priv;
struct msix_entry *msix = priv->msix_arr;
int irq = msix[i + MLX5_EQ_VEC_COMP_BASE].vector;
- int err;
if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) {
mlx5_core_warn(mdev, "zalloc_cpumask_var failed");
@@ -523,18 +522,11 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node),
priv->irq_info[i].mask);
- err = irq_set_affinity_hint(irq, priv->irq_info[i].mask);
- if (err) {
- mlx5_core_warn(mdev, "irq_set_affinity_hint failed,irq 0x%.4x",
- irq);
- goto err_clear_mask;
- }
+ if (IS_ENABLED(CONFIG_SMP) &&
+ irq_set_affinity_hint(irq, priv->irq_info[i].mask))
+ mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq);
return 0;
-
-err_clear_mask:
- free_cpumask_var(priv->irq_info[i].mask);
- return err;
}
static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev *mdev, int i)
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
index b8d5270359cd..e30676515529 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
@@ -247,7 +247,7 @@ nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu)
cmd.req.arg3 = 0;
if (recv_ctx->state == NX_HOST_CTX_STATE_ACTIVE)
- netxen_issue_cmd(adapter, &cmd);
+ rcode = netxen_issue_cmd(adapter, &cmd);
if (rcode != NX_RCODE_SUCCESS)
return -EIO;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index f9640d5ce6ba..b4f3cb55605e 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -3850,7 +3850,7 @@ static void qlcnic_83xx_flush_mbx_queue(struct qlcnic_adapter *adapter)
struct list_head *head = &mbx->cmd_q;
struct qlcnic_cmd_args *cmd = NULL;
- spin_lock(&mbx->queue_lock);
+ spin_lock_bh(&mbx->queue_lock);
while (!list_empty(head)) {
cmd = list_entry(head->next, struct qlcnic_cmd_args, list);
@@ -3861,7 +3861,7 @@ static void qlcnic_83xx_flush_mbx_queue(struct qlcnic_adapter *adapter)
qlcnic_83xx_notify_cmd_completion(adapter, cmd);
}
- spin_unlock(&mbx->queue_lock);
+ spin_unlock_bh(&mbx->queue_lock);
}
static int qlcnic_83xx_check_mbx_status(struct qlcnic_adapter *adapter)
@@ -3897,12 +3897,12 @@ static void qlcnic_83xx_dequeue_mbx_cmd(struct qlcnic_adapter *adapter,
{
struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
- spin_lock(&mbx->queue_lock);
+ spin_lock_bh(&mbx->queue_lock);
list_del(&cmd->list);
mbx->num_cmds--;
- spin_unlock(&mbx->queue_lock);
+ spin_unlock_bh(&mbx->queue_lock);
qlcnic_83xx_notify_cmd_completion(adapter, cmd);
}
@@ -3967,7 +3967,7 @@ static int qlcnic_83xx_enqueue_mbx_cmd(struct qlcnic_adapter *adapter,
init_completion(&cmd->completion);
cmd->rsp_opcode = QLC_83XX_MBX_RESPONSE_UNKNOWN;
- spin_lock(&mbx->queue_lock);
+ spin_lock_bh(&mbx->queue_lock);
list_add_tail(&cmd->list, &mbx->cmd_q);
mbx->num_cmds++;
@@ -3975,7 +3975,7 @@ static int qlcnic_83xx_enqueue_mbx_cmd(struct qlcnic_adapter *adapter,
*timeout = cmd->total_cmds * QLC_83XX_MBX_TIMEOUT;
queue_work(mbx->work_q, &mbx->work);
- spin_unlock(&mbx->queue_lock);
+ spin_unlock_bh(&mbx->queue_lock);
return 0;
}
@@ -4071,15 +4071,15 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
mbx->rsp_status = QLC_83XX_MBX_RESPONSE_WAIT;
spin_unlock_irqrestore(&mbx->aen_lock, flags);
- spin_lock(&mbx->queue_lock);
+ spin_lock_bh(&mbx->queue_lock);
if (list_empty(head)) {
- spin_unlock(&mbx->queue_lock);
+ spin_unlock_bh(&mbx->queue_lock);
return;
}
cmd = list_entry(head->next, struct qlcnic_cmd_args, list);
- spin_unlock(&mbx->queue_lock);
+ spin_unlock_bh(&mbx->queue_lock);
mbx_ops->encode_cmd(adapter, cmd);
mbx_ops->nofity_fw(adapter, QLC_83XX_MBX_REQUEST);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 509b596cf1e8..bd1ec70fb736 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -341,7 +341,7 @@ qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
}
return -EIO;
}
- usleep_range(1000, 1500);
+ udelay(1200);
}
if (id_reg)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index 7327b729ba2e..ffa6885acfc8 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -127,6 +127,8 @@ static int qlcnic_sriov_virtid_fn(struct qlcnic_adapter *adapter, int vf_id)
return 0;
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
+ if (!pos)
+ return 0;
pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset);
pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride);
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
index be258d90de9e..e3223f2fe2ff 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
@@ -765,7 +765,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
sizeof(struct mpi_coredump_global_header);
mpi_coredump->mpi_global_header.imageSize =
sizeof(struct ql_mpi_coredump);
- memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
+ strncpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
sizeof(mpi_coredump->mpi_global_header.idString));
/* Get generic NIC reg dump */
@@ -1255,7 +1255,7 @@ static void ql_gen_reg_dump(struct ql_adapter *qdev,
sizeof(struct mpi_coredump_global_header);
mpi_coredump->mpi_global_header.imageSize =
sizeof(struct ql_reg_dump);
- memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
+ strncpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
sizeof(mpi_coredump->mpi_global_header.idString));
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index 1ef03939d25f..c90ae4d4be7d 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -296,8 +296,9 @@ qcaspi_receive(struct qcaspi *qca)
/* Allocate rx SKB if we don't have one available. */
if (!qca->rx_skb) {
- qca->rx_skb = netdev_alloc_skb(net_dev,
- net_dev->mtu + VLAN_ETH_HLEN);
+ qca->rx_skb = netdev_alloc_skb_ip_align(net_dev,
+ net_dev->mtu +
+ VLAN_ETH_HLEN);
if (!qca->rx_skb) {
netdev_dbg(net_dev, "out of RX resources\n");
qca->stats.out_of_mem++;
@@ -377,7 +378,7 @@ qcaspi_receive(struct qcaspi *qca)
qca->rx_skb, qca->rx_skb->dev);
qca->rx_skb->ip_summed = CHECKSUM_UNNECESSARY;
netif_rx_ni(qca->rx_skb);
- qca->rx_skb = netdev_alloc_skb(net_dev,
+ qca->rx_skb = netdev_alloc_skb_ip_align(net_dev,
net_dev->mtu + VLAN_ETH_HLEN);
if (!qca->rx_skb) {
netdev_dbg(net_dev, "out of RX resources\n");
@@ -759,7 +760,8 @@ qcaspi_netdev_init(struct net_device *dev)
if (!qca->rx_buffer)
return -ENOBUFS;
- qca->rx_skb = netdev_alloc_skb(dev, qca->net_dev->mtu + VLAN_ETH_HLEN);
+ qca->rx_skb = netdev_alloc_skb_ip_align(dev, qca->net_dev->mtu +
+ VLAN_ETH_HLEN);
if (!qca->rx_skb) {
kfree(qca->rx_buffer);
netdev_info(qca->net_dev, "Failed to allocate RX sk_buff.\n");
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index ef668d300800..d987d571fdd6 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -2229,7 +2229,7 @@ static void rtl8139_poll_controller(struct net_device *dev)
struct rtl8139_private *tp = netdev_priv(dev);
const int irq = tp->pci_dev->irq;
- disable_irq(irq);
+ disable_irq_nosync(irq);
rtl8139_interrupt(irq, dev);
enable_irq(irq);
}
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 24155380e43c..8b4069ea52ce 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -1387,7 +1387,7 @@ DECLARE_RTL_COND(rtl_ocp_tx_cond)
{
void __iomem *ioaddr = tp->mmio_addr;
- return RTL_R8(IBISR0) & 0x02;
+ return RTL_R8(IBISR0) & 0x20;
}
static void rtl8168ep_stop_cmac(struct rtl8169_private *tp)
@@ -1395,7 +1395,7 @@ static void rtl8168ep_stop_cmac(struct rtl8169_private *tp)
void __iomem *ioaddr = tp->mmio_addr;
RTL_W8(IBCR2, RTL_R8(IBCR2) & ~0x01);
- rtl_msleep_loop_wait_low(tp, &rtl_ocp_tx_cond, 50, 2000);
+ rtl_msleep_loop_wait_high(tp, &rtl_ocp_tx_cond, 50, 2000);
RTL_W8(IBISR0, RTL_R8(IBISR0) | 0x20);
RTL_W8(IBCR0, RTL_R8(IBCR0) & ~0x01);
}
@@ -4832,6 +4832,9 @@ static void rtl_pll_power_down(struct rtl8169_private *tp)
static void rtl_pll_power_up(struct rtl8169_private *tp)
{
rtl_generic_op(tp, tp->pll_power_ops.up);
+
+ /* give MAC/PHY some time to resume */
+ msleep(20);
}
static void rtl_init_pll_power_ops(struct rtl8169_private *tp)
@@ -8411,12 +8414,12 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_msi_4;
}
+ pci_set_drvdata(pdev, dev);
+
rc = register_netdev(dev);
if (rc < 0)
goto err_out_cnt_5;
- pci_set_drvdata(pdev, dev);
-
netif_info(tp, probe, dev, "%s at 0x%p, %pM, XID %08x IRQ %d\n",
rtl_chip_infos[chipset].name, ioaddr, dev->dev_addr,
(u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), pdev->irq);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 424d1dee55c9..afaf79b8761f 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -3222,7 +3222,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
/* MDIO bus init */
ret = sh_mdio_init(mdp, pd);
if (ret) {
- dev_err(&ndev->dev, "failed to initialise MDIO\n");
+ dev_err(&pdev->dev, "failed to initialise MDIO\n");
goto out_release;
}
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index ab6051a43134..ccebf89aa1e4 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -3442,7 +3442,7 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
len = (val & RCR_ENTRY_L2_LEN) >>
RCR_ENTRY_L2_LEN_SHIFT;
- len -= ETH_FCS_LEN;
+ append_size = len + ETH_HLEN + ETH_FCS_LEN;
addr = (val & RCR_ENTRY_PKT_BUF_ADDR) <<
RCR_ENTRY_PKT_BUF_ADDR_SHIFT;
@@ -3452,7 +3452,6 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
RCR_ENTRY_PKTBUFSZ_SHIFT];
off = addr & ~PAGE_MASK;
- append_size = rcr_size;
if (num_rcr == 1) {
int ptype;
@@ -3465,7 +3464,7 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
else
skb_checksum_none_assert(skb);
} else if (!(val & RCR_ENTRY_MULTI))
- append_size = len - skb->len;
+ append_size = append_size - skb->len;
niu_rx_skb_append(skb, page, off, append_size, rcr_size);
if ((page->index + rp->rbr_block_size) - rcr_size == addr) {
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index fc958067d10a..c69b0bdd891d 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -280,6 +280,10 @@ struct cpsw_ss_regs {
/* Bit definitions for the CPSW1_TS_SEQ_LTYPE register */
#define CPSW_V1_SEQ_ID_OFS_SHIFT 16
+#define CPSW_MAX_BLKS_TX 15
+#define CPSW_MAX_BLKS_TX_SHIFT 4
+#define CPSW_MAX_BLKS_RX 5
+
struct cpsw_host_regs {
u32 max_blks;
u32 blk_cnt;
@@ -878,7 +882,8 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave,
/* set speed_in input in case RMII mode is used in 100Mbps */
if (phy->speed == 100)
mac_control |= BIT(15);
- else if (phy->speed == 10)
+ /* in band mode only works in 10Mbps RGMII mode */
+ else if ((phy->speed == 10) && phy_interface_is_rgmii(phy))
mac_control |= BIT(18); /* In Band mode */
if (priv->rx_pause)
@@ -1126,11 +1131,23 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
switch (priv->version) {
case CPSW_VERSION_1:
slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP);
+ /* Increase RX FIFO size to 5 for supporting fullduplex
+ * flow control mode
+ */
+ slave_write(slave,
+ (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
+ CPSW_MAX_BLKS_RX, CPSW1_MAX_BLKS);
break;
case CPSW_VERSION_2:
case CPSW_VERSION_3:
case CPSW_VERSION_4:
slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP);
+ /* Increase RX FIFO size to 5 for supporting fullduplex
+ * flow control mode
+ */
+ slave_write(slave,
+ (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
+ CPSW_MAX_BLKS_RX, CPSW2_MAX_BLKS);
break;
}
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index a274cd49afe9..399a89f30826 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -610,8 +610,8 @@ err_out_regions:
#ifdef CONFIG_PCI
if (pdev)
pci_release_regions(pdev);
-#endif
err_out:
+#endif
if (pdev)
pci_disable_device(pdev);
return rc;
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
index 49fe59b180a8..a75ce9051a7f 100644
--- a/drivers/net/hamradio/hdlcdrv.c
+++ b/drivers/net/hamradio/hdlcdrv.c
@@ -574,6 +574,8 @@ static int hdlcdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
case HDLCDRVCTL_CALIBRATE:
if(!capable(CAP_SYS_RAWIO))
return -EPERM;
+ if (s->par.bitrate <= 0)
+ return -EINVAL;
if (bi.data.calibrate > INT_MAX / s->par.bitrate)
return -EINVAL;
s->hdlctx.calibrate = bi.data.calibrate * s->par.bitrate / 16;
diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c
index 95c0b45a68fb..313e006f74fe 100644
--- a/drivers/net/hippi/rrunner.c
+++ b/drivers/net/hippi/rrunner.c
@@ -1381,8 +1381,8 @@ static int rr_close(struct net_device *dev)
rrpriv->info_dma);
rrpriv->info = NULL;
- free_irq(pdev->irq, dev);
spin_unlock_irqrestore(&rrpriv->lock, flags);
+ free_irq(pdev->irq, dev);
return 0;
}
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index af827faec7fe..142015af43db 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -282,6 +282,10 @@ static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff **pskb,
if (dev_forward_skb(ipvlan->dev, skb) == NET_RX_SUCCESS)
success = true;
} else {
+ if (!ether_addr_equal_64bits(eth_hdr(skb)->h_dest,
+ ipvlan->phy_dev->dev_addr))
+ skb->pkt_type = PACKET_OTHERHOST;
+
ret = RX_HANDLER_ANOTHER;
success = true;
}
@@ -353,6 +357,7 @@ static int ipvlan_process_v4_outbound(struct sk_buff *skb)
.flowi4_oif = dev->ifindex,
.flowi4_tos = RT_TOS(ip4h->tos),
.flowi4_flags = FLOWI_FLAG_ANYSRC,
+ .flowi4_mark = skb->mark,
.daddr = ip4h->daddr,
.saddr = ip4h->saddr,
};
diff --git a/drivers/net/phy/mdio-sun4i.c b/drivers/net/phy/mdio-sun4i.c
index 15bc7f9ea224..afd76e07088b 100644
--- a/drivers/net/phy/mdio-sun4i.c
+++ b/drivers/net/phy/mdio-sun4i.c
@@ -128,8 +128,10 @@ static int sun4i_mdio_probe(struct platform_device *pdev)
data->regulator = devm_regulator_get(&pdev->dev, "phy");
if (IS_ERR(data->regulator)) {
- if (PTR_ERR(data->regulator) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
+ if (PTR_ERR(data->regulator) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto err_out_free_mdiobus;
+ }
dev_info(&pdev->dev, "no regulator found\n");
} else {
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 7d0690433ee0..7d2cf015c5e7 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -148,6 +148,12 @@ static inline int phy_aneg_done(struct phy_device *phydev)
if (phydev->drv->aneg_done)
return phydev->drv->aneg_done(phydev);
+ /* Avoid genphy_aneg_done() if the Clause 45 PHY does not
+ * implement Clause 22 registers
+ */
+ if (phydev->is_c45 && !(phydev->c45_ids.devices_in_package & BIT(0)))
+ return -EINVAL;
+
return genphy_aneg_done(phydev);
}
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index e2decf71c6d1..46448d7e3290 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -2952,6 +2952,15 @@ ppp_connect_channel(struct channel *pch, int unit)
goto outl;
ppp_lock(ppp);
+ spin_lock_bh(&pch->downl);
+ if (!pch->chan) {
+ /* Don't connect unregistered channels */
+ spin_unlock_bh(&pch->downl);
+ ppp_unlock(ppp);
+ ret = -ENOTCONN;
+ goto outl;
+ }
+ spin_unlock_bh(&pch->downl);
if (pch->file.hdrlen > ppp->file.hdrlen)
ppp->file.hdrlen = pch->file.hdrlen;
hdrlen = pch->file.hdrlen + 2; /* for protocol bytes */
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index b7b859c3a0c7..583d50f80b24 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -638,6 +638,10 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
lock_sock(sk);
error = -EINVAL;
+
+ if (sockaddr_len != sizeof(struct sockaddr_pppox))
+ goto end;
+
if (sp->sa_protocol != PX_PROTO_OE)
goto end;
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index f7e8c79349ad..12a627fcc02c 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -501,7 +501,6 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
po->chan.mtu = dst_mtu(&rt->dst);
if (!po->chan.mtu)
po->chan.mtu = PPP_MRU;
- ip_rt_put(rt);
po->chan.mtu -= PPTP_HEADER_OVERHEAD;
po->chan.hdrlen = 2 + sizeof(struct pptp_gre_header);
diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
index 27ed25252aac..cfd81eb1b532 100644
--- a/drivers/net/slip/slhc.c
+++ b/drivers/net/slip/slhc.c
@@ -509,6 +509,10 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
if(x < 0 || x > comp->rslot_limit)
goto bad;
+ /* Check if the cstate is initialized */
+ if (!comp->rstate[x].initialized)
+ goto bad;
+
comp->flags &=~ SLF_TOSS;
comp->recv_current = x;
} else {
@@ -673,6 +677,7 @@ slhc_remember(struct slcompress *comp, unsigned char *icp, int isize)
if (cs->cs_tcp.doff > 5)
memcpy(cs->cs_tcpopt, icp + ihl*4 + sizeof(struct tcphdr), (cs->cs_tcp.doff - 5) * 4);
cs->cs_hsize = ihl*2 + cs->cs_tcp.doff*2;
+ cs->initialized = true;
/* Put headers back on packet
* Neither header checksum is recalculated
*/
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 61cd53838360..e74709e4b5dd 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -247,6 +247,17 @@ static void __team_option_inst_mark_removed_port(struct team *team,
}
}
+static bool __team_option_inst_tmp_find(const struct list_head *opts,
+ const struct team_option_inst *needle)
+{
+ struct team_option_inst *opt_inst;
+
+ list_for_each_entry(opt_inst, opts, tmp_list)
+ if (opt_inst == needle)
+ return true;
+ return false;
+}
+
static int __team_options_register(struct team *team,
const struct team_option *option,
size_t option_count)
@@ -1039,14 +1050,11 @@ static void team_port_leave(struct team *team, struct team_port *port)
}
#ifdef CONFIG_NET_POLL_CONTROLLER
-static int team_port_enable_netpoll(struct team *team, struct team_port *port)
+static int __team_port_enable_netpoll(struct team_port *port)
{
struct netpoll *np;
int err;
- if (!team->dev->npinfo)
- return 0;
-
np = kzalloc(sizeof(*np), GFP_KERNEL);
if (!np)
return -ENOMEM;
@@ -1060,6 +1068,14 @@ static int team_port_enable_netpoll(struct team *team, struct team_port *port)
return err;
}
+static int team_port_enable_netpoll(struct team_port *port)
+{
+ if (!port->team->dev->npinfo)
+ return 0;
+
+ return __team_port_enable_netpoll(port);
+}
+
static void team_port_disable_netpoll(struct team_port *port)
{
struct netpoll *np = port->np;
@@ -1074,7 +1090,7 @@ static void team_port_disable_netpoll(struct team_port *port)
kfree(np);
}
#else
-static int team_port_enable_netpoll(struct team *team, struct team_port *port)
+static int team_port_enable_netpoll(struct team_port *port)
{
return 0;
}
@@ -1181,7 +1197,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
goto err_vids_add;
}
- err = team_port_enable_netpoll(team, port);
+ err = team_port_enable_netpoll(port);
if (err) {
netdev_err(dev, "Failed to enable netpoll on device %s\n",
portname);
@@ -1889,7 +1905,7 @@ static int team_netpoll_setup(struct net_device *dev,
mutex_lock(&team->lock);
list_for_each_entry(port, &team->port_list, list) {
- err = team_port_enable_netpoll(team, port);
+ err = __team_port_enable_netpoll(port);
if (err) {
__team_netpoll_cleanup(team);
break;
@@ -2380,7 +2396,7 @@ send_done:
if (!nlh) {
err = __send_and_alloc_skb(&skb, team, portid, send_func);
if (err)
- goto errout;
+ return err;
goto send_done;
}
@@ -2544,6 +2560,14 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
if (err)
goto team_put;
opt_inst->changed = true;
+
+ /* dumb/evil user-space can send us duplicate opt,
+ * keep only the last one
+ */
+ if (__team_option_inst_tmp_find(&opt_inst_list,
+ opt_inst))
+ continue;
+
list_add(&opt_inst->tmp_list, &opt_inst_list);
}
if (!opt_found) {
@@ -2660,7 +2684,7 @@ send_done:
if (!nlh) {
err = __send_and_alloc_skb(&skb, team, portid, send_func);
if (err)
- goto errout;
+ return err;
goto send_done;
}
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 1f6893ebce16..3a7286256db0 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -395,6 +395,10 @@ config USB_NET_RNDIS_HOST
The protocol specification is incomplete, and is controlled by
(and for) Microsoft; it isn't an "Open" ecosystem or market.
+config USB_NET_CDC_SUBSET_ENABLE
+ tristate
+ depends on USB_NET_CDC_SUBSET
+
config USB_NET_CDC_SUBSET
tristate "Simple USB Network Links (CDC Ethernet subset)"
depends on USB_USBNET
@@ -413,6 +417,7 @@ config USB_NET_CDC_SUBSET
config USB_ALI_M5632
bool "ALi M5632 based 'USB 2.0 Data Link' cables"
depends on USB_NET_CDC_SUBSET
+ select USB_NET_CDC_SUBSET_ENABLE
help
Choose this option if you're using a host-to-host cable
based on this design, which supports USB 2.0 high speed.
@@ -420,6 +425,7 @@ config USB_ALI_M5632
config USB_AN2720
bool "AnchorChips 2720 based cables (Xircom PGUNET, ...)"
depends on USB_NET_CDC_SUBSET
+ select USB_NET_CDC_SUBSET_ENABLE
help
Choose this option if you're using a host-to-host cable
based on this design. Note that AnchorChips is now a
@@ -428,6 +434,7 @@ config USB_AN2720
config USB_BELKIN
bool "eTEK based host-to-host cables (Advance, Belkin, ...)"
depends on USB_NET_CDC_SUBSET
+ select USB_NET_CDC_SUBSET_ENABLE
default y
help
Choose this option if you're using a host-to-host cable
@@ -437,6 +444,7 @@ config USB_BELKIN
config USB_ARMLINUX
bool "Embedded ARM Linux links (iPaq, ...)"
depends on USB_NET_CDC_SUBSET
+ select USB_NET_CDC_SUBSET_ENABLE
default y
help
Choose this option to support the "usb-eth" networking driver
@@ -454,6 +462,7 @@ config USB_ARMLINUX
config USB_EPSON2888
bool "Epson 2888 based firmware (DEVELOPMENT)"
depends on USB_NET_CDC_SUBSET
+ select USB_NET_CDC_SUBSET_ENABLE
help
Choose this option to support the usb networking links used
by some sample firmware from Epson.
@@ -461,6 +470,7 @@ config USB_EPSON2888
config USB_KC2190
bool "KT Technology KC2190 based cables (InstaNet)"
depends on USB_NET_CDC_SUBSET
+ select USB_NET_CDC_SUBSET_ENABLE
help
Choose this option if you're using a host-to-host cable
with one of these chips.
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index b5f04068dbe4..37fb46aee341 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -23,7 +23,7 @@ obj-$(CONFIG_USB_NET_GL620A) += gl620a.o
obj-$(CONFIG_USB_NET_NET1080) += net1080.o
obj-$(CONFIG_USB_NET_PLUSB) += plusb.o
obj-$(CONFIG_USB_NET_RNDIS_HOST) += rndis_host.o
-obj-$(CONFIG_USB_NET_CDC_SUBSET) += cdc_subset.o
+obj-$(CONFIG_USB_NET_CDC_SUBSET_ENABLE) += cdc_subset.o
obj-$(CONFIG_USB_NET_ZAURUS) += zaurus.o
obj-$(CONFIG_USB_NET_MCS7830) += mcs7830.o
obj-$(CONFIG_USB_USBNET) += usbnet.o
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index f9343bee1de3..f71abe50ea6f 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -461,6 +461,7 @@ static const struct driver_info wwan_info = {
#define REALTEK_VENDOR_ID 0x0bda
#define SAMSUNG_VENDOR_ID 0x04e8
#define LENOVO_VENDOR_ID 0x17ef
+#define LINKSYS_VENDOR_ID 0x13b1
#define NVIDIA_VENDOR_ID 0x0955
#define HP_VENDOR_ID 0x03f0
@@ -650,6 +651,15 @@ static const struct usb_device_id products[] = {
.driver_info = 0,
},
+#if IS_ENABLED(CONFIG_USB_RTL8152)
+/* Linksys USB3GIGV1 Ethernet Adapter */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(LINKSYS_VENDOR_ID, 0x0041, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+#endif
+
/* Lenovo Thinkpad USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */
{
USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x7205, USB_CLASS_COMM,
@@ -705,6 +715,12 @@ static const struct usb_device_id products[] = {
USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&wwan_info,
}, {
+ /* Cinterion AHS3 modem by GEMALTO */
+ USB_DEVICE_AND_INTERFACE_INFO(0x1e2d, 0x0055, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET,
+ USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&wwan_info,
+}, {
/* Telit modules */
USB_VENDOR_AND_INTERFACE_INFO(0x1bc7, USB_CLASS_COMM,
USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 1228d0da4075..c8e98c8e29fa 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -825,6 +825,9 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
goto error2;
}
+ /* Device-specific flags */
+ ctx->drvflags = drvflags;
+
/*
* Some Huawei devices have been observed to come out of reset in NDP32 mode.
* Let's check if this is the case, and set the device to NDP16 mode again if
@@ -873,9 +876,6 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
/* finish setting up the device specific data */
cdc_ncm_setup(dev);
- /* Device-specific flags */
- ctx->drvflags = drvflags;
-
/* Allocate the delayed NDP if needed. */
if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) {
ctx->delayed_ndp16 = kzalloc(ctx->max_ndp_size, GFP_KERNEL);
@@ -1069,6 +1069,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
u16 n = 0, index, ndplen;
u8 ready2send = 0;
u32 delayed_ndp_size;
+ size_t padding_count;
/* When our NDP gets written in cdc_ncm_ndp(), then skb_out->len gets updated
* accordingly. Otherwise, we should check here.
@@ -1225,11 +1226,13 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
* a ZLP after full sized NTBs.
*/
if (!(dev->driver_info->flags & FLAG_SEND_ZLP) &&
- skb_out->len > ctx->min_tx_pkt)
- memset(skb_put(skb_out, ctx->tx_max - skb_out->len), 0,
- ctx->tx_max - skb_out->len);
- else if (skb_out->len < ctx->tx_max && (skb_out->len % dev->maxpacket) == 0)
+ skb_out->len > ctx->min_tx_pkt) {
+ padding_count = ctx->tx_max - skb_out->len;
+ memset(skb_put(skb_out, padding_count), 0, padding_count);
+ } else if (skb_out->len < ctx->tx_max &&
+ (skb_out->len % dev->maxpacket) == 0) {
*skb_put(skb_out, 1) = 0; /* force short packet */
+ }
/* set final frame length */
nth16 = (struct usb_cdc_ncm_nth16 *)skb_out->data;
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index ebdee8f01f65..a6d429950cb0 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -618,7 +618,8 @@ static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
offset += 0x100;
else
ret = -EINVAL;
- ret = lan78xx_read_raw_otp(dev, offset, length, data);
+ if (!ret)
+ ret = lan78xx_read_raw_otp(dev, offset, length, data);
}
return ret;
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index b0ea8dee5f06..8aaa09b3c753 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -631,6 +631,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x05c6, 0x9080, 8)},
{QMI_FIXED_INTF(0x05c6, 0x9083, 3)},
{QMI_FIXED_INTF(0x05c6, 0x9084, 4)},
+ {QMI_FIXED_INTF(0x05c6, 0x90b2, 3)}, /* ublox R410M */
{QMI_FIXED_INTF(0x05c6, 0x920d, 0)},
{QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
{QMI_FIXED_INTF(0x0846, 0x68a2, 8)},
@@ -854,6 +855,18 @@ static int qmi_wwan_probe(struct usb_interface *intf,
id->driver_info = (unsigned long)&qmi_wwan_info;
}
+ /* There are devices where the same interface number can be
+ * configured as different functions. We should only bind to
+ * vendor specific functions when matching on interface number
+ */
+ if (id->match_flags & USB_DEVICE_ID_MATCH_INT_NUMBER &&
+ desc->bInterfaceClass != USB_CLASS_VENDOR_SPEC) {
+ dev_dbg(&intf->dev,
+ "Rejecting interface number match for class %02x\n",
+ desc->bInterfaceClass);
+ return -ENODEV;
+ }
+
/* Quectel EC20 quirk where we've QMI on interface 4 instead of 0 */
if (quectel_ec20_detected(intf) && desc->bInterfaceNumber == 0) {
dev_dbg(&intf->dev, "Quectel EC20 quirk, skipping interface 0\n");
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 89950f5cea71..b2c1a435357f 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -506,6 +506,7 @@ enum rtl8152_flags {
#define VENDOR_ID_REALTEK 0x0bda
#define VENDOR_ID_SAMSUNG 0x04e8
#define VENDOR_ID_LENOVO 0x17ef
+#define VENDOR_ID_LINKSYS 0x13b1
#define VENDOR_ID_NVIDIA 0x0955
#define MCU_TYPE_PLA 0x0100
@@ -4376,6 +4377,7 @@ static struct usb_device_id rtl8152_table[] = {
{REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f)},
+ {REALTEK_USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041)},
{REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff)},
{}
};
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index ba21d072be31..6b4cc1c2e6b4 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -399,6 +399,9 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
if (ifmp && (dev->ifindex != 0))
peer->ifindex = ifmp->ifi_index;
+ peer->gso_max_size = dev->gso_max_size;
+ peer->gso_max_segs = dev->gso_max_segs;
+
err = register_netdevice(peer);
put_net(net);
net = NULL;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 8dfc75250583..d01285250204 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -556,7 +556,12 @@ static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
hdr = skb_vnet_hdr(skb);
sg_init_table(rq->sg, 2);
sg_set_buf(rq->sg, hdr, vi->hdr_len);
- skb_to_sgvec(skb, rq->sg + 1, 0, skb->len);
+
+ err = skb_to_sgvec(skb, rq->sg + 1, 0, skb->len);
+ if (unlikely(err < 0)) {
+ dev_kfree_skb(skb);
+ return err;
+ }
err = virtqueue_add_inbuf(rq->vq, rq->sg, 2, skb, gfp);
if (err < 0)
@@ -858,7 +863,7 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
struct virtio_net_hdr_mrg_rxbuf *hdr;
const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
struct virtnet_info *vi = sq->vq->vdev->priv;
- unsigned num_sg;
+ int num_sg;
unsigned hdr_len = vi->hdr_len;
bool can_push;
@@ -911,11 +916,16 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
if (can_push) {
__skb_push(skb, hdr_len);
num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len);
+ if (unlikely(num_sg < 0))
+ return num_sg;
/* Pull header back to avoid skew in tx bytes calculations. */
__skb_pull(skb, hdr_len);
} else {
sg_set_buf(sq->sg, hdr, hdr_len);
- num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1;
+ num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len);
+ if (unlikely(num_sg < 0))
+ return num_sg;
+ num_sg++;
}
return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC);
}
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 82bf85ae5d08..419c045d0752 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -2789,6 +2789,11 @@ vmxnet3_force_close(struct vmxnet3_adapter *adapter)
/* we need to enable NAPI, otherwise dev_close will deadlock */
for (i = 0; i < adapter->num_rx_queues; i++)
napi_enable(&adapter->rx_queue[i].napi);
+ /*
+ * Need to clear the quiesce bit to ensure that vmxnet3_close
+ * can quiesce the device properly
+ */
+ clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
dev_close(adapter->netdev);
}
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index ac945f8781ac..d3d59122a357 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -550,13 +550,15 @@ static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *s
neigh = __ipv4_neigh_lookup_noref(dev, nexthop);
if (unlikely(!neigh))
neigh = __neigh_create(&arp_tbl, &nexthop, dev, false);
- if (!IS_ERR(neigh))
+ if (!IS_ERR(neigh)) {
ret = dst_neigh_output(dst, neigh, skb);
+ rcu_read_unlock_bh();
+ return ret;
+ }
rcu_read_unlock_bh();
err:
- if (unlikely(ret < 0))
- vrf_tx_error(skb->dev, skb);
+ vrf_tx_error(skb->dev, skb);
return ret;
}
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index dab3bf6649e6..c41378214ede 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -962,7 +962,7 @@ static bool vxlan_snoop(struct net_device *dev,
return false;
/* Don't migrate static entries, drop packets */
- if (f->state & NUD_NOARP)
+ if (f->state & (NUD_PERMANENT | NUD_NOARP))
return true;
if (net_ratelimit())
@@ -2834,6 +2834,11 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
needed_headroom = lowerdev->hard_header_len;
}
+ if (lowerdev) {
+ dev->gso_max_size = lowerdev->gso_max_size;
+ dev->gso_max_segs = lowerdev->gso_max_segs;
+ }
+
if (conf->mtu) {
err = __vxlan_change_mtu(dev, lowerdev, dst, conf->mtu, false);
if (err)
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
index 0d7645581f91..4842344a96f1 100644
--- a/drivers/net/wan/hdlc_ppp.c
+++ b/drivers/net/wan/hdlc_ppp.c
@@ -574,7 +574,10 @@ static void ppp_timer(unsigned long arg)
ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0,
0, NULL);
proto->restart_counter--;
- } else
+ } else if (netif_carrier_ok(proto->dev))
+ ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0,
+ 0, NULL);
+ else
ppp_cp_event(proto->dev, proto->pid, TO_BAD, 0, 0,
0, NULL);
break;
diff --git a/drivers/net/wan/pc300too.c b/drivers/net/wan/pc300too.c
index db363856e0b5..2b064998915f 100644
--- a/drivers/net/wan/pc300too.c
+++ b/drivers/net/wan/pc300too.c
@@ -347,6 +347,7 @@ static int pc300_pci_init_one(struct pci_dev *pdev,
card->rambase == NULL) {
pr_err("ioremap() failed\n");
pc300_pci_remove_one(pdev);
+ return -ENOMEM;
}
/* PLX PCI 9050 workaround for local configuration register read bug */
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index ece0eee5cf7c..784a11a74443 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -1303,7 +1303,7 @@ static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
int ret;
struct ath10k_fw_file *fw_file;
- if (!ar->is_bmi && QCA_REV_WCN3990(ar)) {
+ if (!ar->is_bmi) {
fw_file = &ar->normal_mode_fw.fw_file;
fw_file->wmi_op_version = ATH10K_FW_WMI_OP_VERSION_TLV;
fw_file->htt_op_version = ATH10K_FW_HTT_OP_VERSION_TLV;
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 42aab9b86af3..0836a81b93e0 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -2226,6 +2226,15 @@ static ssize_t ath10k_write_simulate_radar(struct file *file,
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
+ struct ath10k_vif *arvif;
+
+ /* Just check for for the first vif alone, as all the vifs will be
+ * sharing the same channel and if the channel is disabled, all the
+ * vifs will share the same 'is_started' state.
+ */
+ arvif = list_first_entry(&ar->arvifs, typeof(*arvif), list);
+ if (!arvif->is_started)
+ return -EINVAL;
ieee80211_radar_detected(ar->hw);
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 635b8281b055..184da610f9da 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -564,6 +564,11 @@ static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar,
return IEEE80211_TKIP_IV_LEN;
case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
return IEEE80211_CCMP_HDR_LEN;
+ case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
+ return IEEE80211_CCMP_256_HDR_LEN;
+ case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
+ case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
+ return IEEE80211_GCMP_HDR_LEN;
case HTT_RX_MPDU_ENCRYPT_WEP128:
case HTT_RX_MPDU_ENCRYPT_WAPI:
break;
@@ -589,6 +594,11 @@ static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar,
return IEEE80211_TKIP_ICV_LEN;
case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
return IEEE80211_CCMP_MIC_LEN;
+ case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
+ return IEEE80211_CCMP_256_MIC_LEN;
+ case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
+ case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
+ return IEEE80211_GCMP_MIC_LEN;
case HTT_RX_MPDU_ENCRYPT_WEP128:
case HTT_RX_MPDU_ENCRYPT_WAPI:
break;
@@ -942,7 +952,7 @@ static void ath10k_process_rx(struct ath10k *ar,
*status = *rx_status;
fill_datapath_stats(ar, status);
ath10k_dbg(ar, ATH10K_DBG_DATA,
- "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
+ "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%llx fcs-err %i mic-err %i amsdu-more %i\n",
skb,
skb->len,
ieee80211_get_SA(hdr),
@@ -1041,9 +1051,21 @@ static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
hdr = (void *)msdu->data;
/* Tail */
- if (status->flag & RX_FLAG_IV_STRIPPED)
+ if (status->flag & RX_FLAG_IV_STRIPPED) {
skb_trim(msdu, msdu->len -
ath10k_htt_rx_crypto_tail_len(ar, enctype));
+ } else {
+ /* MIC */
+ if ((status->flag & RX_FLAG_MIC_STRIPPED) &&
+ enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2)
+ skb_trim(msdu, msdu->len - 8);
+
+ /* ICV */
+ if (status->flag & RX_FLAG_ICV_STRIPPED &&
+ enctype != HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2)
+ skb_trim(msdu, msdu->len -
+ ath10k_htt_rx_crypto_tail_len(ar, enctype));
+ }
/* MMIC */
if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
@@ -1065,7 +1087,8 @@ static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
struct sk_buff *msdu,
struct ieee80211_rx_status *status,
- const u8 first_hdr[64])
+ const u8 first_hdr[64],
+ enum htt_rx_mpdu_encrypt_type enctype)
{
struct ieee80211_hdr *hdr;
struct htt_rx_desc *rxd;
@@ -1073,6 +1096,7 @@ static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
u8 da[ETH_ALEN];
u8 sa[ETH_ALEN];
int l3_pad_bytes;
+ int bytes_aligned = ar->hw_params.decap_align_bytes;
/* Delivered decapped frame:
* [nwifi 802.11 header] <-- replaced with 802.11 hdr
@@ -1101,6 +1125,14 @@ static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
/* push original 802.11 header */
hdr = (struct ieee80211_hdr *)first_hdr;
hdr_len = ieee80211_hdrlen(hdr->frame_control);
+
+ if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
+ memcpy(skb_push(msdu,
+ ath10k_htt_rx_crypto_param_len(ar, enctype)),
+ (void *)hdr + round_up(hdr_len, bytes_aligned),
+ ath10k_htt_rx_crypto_param_len(ar, enctype));
+ }
+
memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
/* original 802.11 header has a different DA and in
@@ -1161,6 +1193,7 @@ static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
u8 sa[ETH_ALEN];
int l3_pad_bytes;
struct htt_rx_desc *rxd;
+ int bytes_aligned = ar->hw_params.decap_align_bytes;
/* Delivered decapped frame:
* [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
@@ -1189,6 +1222,14 @@ static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
/* push original 802.11 header */
hdr = (struct ieee80211_hdr *)first_hdr;
hdr_len = ieee80211_hdrlen(hdr->frame_control);
+
+ if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
+ memcpy(skb_push(msdu,
+ ath10k_htt_rx_crypto_param_len(ar, enctype)),
+ (void *)hdr + round_up(hdr_len, bytes_aligned),
+ ath10k_htt_rx_crypto_param_len(ar, enctype));
+ }
+
memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
/* original 802.11 header has a different DA and in
@@ -1202,12 +1243,14 @@ static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
struct sk_buff *msdu,
struct ieee80211_rx_status *status,
- const u8 first_hdr[64])
+ const u8 first_hdr[64],
+ enum htt_rx_mpdu_encrypt_type enctype)
{
struct ieee80211_hdr *hdr;
size_t hdr_len;
int l3_pad_bytes;
struct htt_rx_desc *rxd;
+ int bytes_aligned = ar->hw_params.decap_align_bytes;
/* Delivered decapped frame:
* [amsdu header] <-- replaced with 802.11 hdr
@@ -1223,6 +1266,14 @@ static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
hdr = (struct ieee80211_hdr *)first_hdr;
hdr_len = ieee80211_hdrlen(hdr->frame_control);
+
+ if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
+ memcpy(skb_push(msdu,
+ ath10k_htt_rx_crypto_param_len(ar, enctype)),
+ (void *)hdr + round_up(hdr_len, bytes_aligned),
+ ath10k_htt_rx_crypto_param_len(ar, enctype));
+ }
+
memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
}
@@ -1257,13 +1308,15 @@ static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
is_decrypted);
break;
case RX_MSDU_DECAP_NATIVE_WIFI:
- ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr);
+ ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr,
+ enctype);
break;
case RX_MSDU_DECAP_ETHERNET2_DIX:
ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype);
break;
case RX_MSDU_DECAP_8023_SNAP_LLC:
- ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr);
+ ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr,
+ enctype);
break;
}
}
@@ -1306,7 +1359,8 @@ static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu)
static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
struct sk_buff_head *amsdu,
- struct ieee80211_rx_status *status)
+ struct ieee80211_rx_status *status,
+ bool fill_crypt_header)
{
struct sk_buff *first;
struct sk_buff *last;
@@ -1316,7 +1370,6 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
enum htt_rx_mpdu_encrypt_type enctype;
u8 first_hdr[64];
u8 *qos;
- size_t hdr_len;
bool has_fcs_err;
bool has_crypto_err;
bool has_tkip_err;
@@ -1341,15 +1394,17 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
* decapped header. It'll be used for undecapping of each MSDU.
*/
hdr = (void *)rxd->rx_hdr_status;
- hdr_len = ieee80211_hdrlen(hdr->frame_control);
- memcpy(first_hdr, hdr, hdr_len);
+ memcpy(first_hdr, hdr, RX_HTT_HDR_STATUS_LEN);
/* Each A-MSDU subframe will use the original header as the base and be
* reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
*/
hdr = (void *)first_hdr;
- qos = ieee80211_get_qos_ctl(hdr);
- qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
+
+ if (ieee80211_is_data_qos(hdr->frame_control)) {
+ qos = ieee80211_get_qos_ctl(hdr);
+ qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
+ }
/* Some attention flags are valid only in the last MSDU. */
last = skb_peek_tail(amsdu);
@@ -1396,9 +1451,14 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
status->flag |= RX_FLAG_DECRYPTED;
if (likely(!is_mgmt))
- status->flag |= RX_FLAG_IV_STRIPPED |
- RX_FLAG_MMIC_STRIPPED;
-}
+ status->flag |= RX_FLAG_MMIC_STRIPPED;
+
+ if (fill_crypt_header)
+ status->flag |= RX_FLAG_MIC_STRIPPED |
+ RX_FLAG_ICV_STRIPPED;
+ else
+ status->flag |= RX_FLAG_IV_STRIPPED;
+ }
skb_queue_walk(amsdu, msdu) {
ath10k_htt_rx_h_csum_offload(msdu);
@@ -1414,6 +1474,9 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
if (is_mgmt)
continue;
+ if (fill_crypt_header)
+ continue;
+
hdr = (void *)msdu->data;
hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
}
@@ -1424,6 +1487,9 @@ static void ath10k_htt_rx_h_deliver(struct ath10k *ar,
struct ieee80211_rx_status *status)
{
struct sk_buff *msdu;
+ struct sk_buff *first_subframe;
+
+ first_subframe = skb_peek(amsdu);
while ((msdu = __skb_dequeue(amsdu))) {
/* Setup per-MSDU flags */
@@ -1432,6 +1498,13 @@ static void ath10k_htt_rx_h_deliver(struct ath10k *ar,
else
status->flag |= RX_FLAG_AMSDU_MORE;
+ if (msdu == first_subframe) {
+ first_subframe = NULL;
+ status->flag &= ~RX_FLAG_ALLOW_SAME_PN;
+ } else {
+ status->flag |= RX_FLAG_ALLOW_SAME_PN;
+ }
+
ath10k_process_rx(ar, status, msdu);
}
}
@@ -1574,7 +1647,7 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0);
ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
- ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
+ ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true);
ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
return num_msdus;
@@ -1913,7 +1986,7 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
num_msdus += skb_queue_len(&amsdu);
ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
ath10k_htt_rx_h_filter(ar, &amsdu, status);
- ath10k_htt_rx_h_mpdu(ar, &amsdu, status);
+ ath10k_htt_rx_h_mpdu(ar, &amsdu, status, false);
ath10k_htt_rx_h_deliver(ar, &amsdu, status);
break;
case -EAGAIN:
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 8d382f12b5fd..f02c1b148545 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -5004,6 +5004,15 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
goto err;
}
+ if ((arvif->vdev_type == WMI_VDEV_TYPE_STA) && QCA_REV_WCN3990(ar)) {
+ ret = ath10k_wmi_csa_offload(ar, arvif->vdev_id, true);
+ if (ret) {
+ ath10k_err(ar, "CSA offload failed for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ goto err_vdev_delete;
+ }
+ }
+
ar->free_vdev_map &= ~(1LL << arvif->vdev_id);
list_add(&arvif->list, &ar->arvifs);
@@ -5216,6 +5225,9 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
kfree(arvif->u.ap.noa_data);
}
+ if ((arvif->vdev_type == WMI_VDEV_TYPE_STA) && QCA_REV_WCN3990(ar))
+ ath10k_wmi_csa_offload(ar, arvif->vdev_id, false);
+
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i delete (remove interface)\n",
arvif->vdev_id);
@@ -5665,6 +5677,22 @@ static void ath10k_set_key_h_def_keyidx(struct ath10k *ar,
arvif->vdev_id, ret);
}
+static void ath10k_set_rekey_data(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_gtk_rekey_data *data)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+
+ mutex_lock(&ar->conf_mutex);
+ memcpy(&arvif->gtk_rekey_data.kek, data->kek, NL80211_KEK_LEN);
+ memcpy(&arvif->gtk_rekey_data.kck, data->kck, NL80211_KCK_LEN);
+ arvif->gtk_rekey_data.replay_ctr =
+ be64_to_cpup((__be64 *)data->replay_ctr);
+ arvif->gtk_rekey_data.valid = true;
+ mutex_unlock(&ar->conf_mutex);
+}
+
static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif, struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
@@ -5913,9 +5941,8 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
sta->addr, smps, err);
}
- if (changed & IEEE80211_RC_SUPP_RATES_CHANGED ||
- changed & IEEE80211_RC_NSS_CHANGED) {
- ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates/nss\n",
+ if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates\n",
sta->addr);
err = ath10k_station_assoc(ar, arvif->vif, sta, true);
@@ -6151,6 +6178,16 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
"mac vdev %d peer delete %pM sta %pK (sta gone)\n",
arvif->vdev_id, sta->addr, sta);
+ if (sta->tdls) {
+ ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id,
+ sta,
+ WMI_TDLS_PEER_STATE_TEARDOWN);
+ if (ret)
+ ath10k_warn(ar, "failed to update tdls peer state for %pM state %d: %i\n",
+ sta->addr,
+ WMI_TDLS_PEER_STATE_TEARDOWN, ret);
+ }
+
ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
if (ret)
ath10k_warn(ar, "failed to delete peer %pM for vdev %d: %i\n",
@@ -7174,7 +7211,7 @@ ath10k_mac_update_rx_channel(struct ath10k *ar,
lockdep_assert_held(&ar->data_lock);
WARN_ON(ctx && vifs);
- WARN_ON(vifs && n_vifs != 1);
+ WARN_ON(vifs && !n_vifs);
/* FIXME: Sort of an optimization and a workaround. Peers and vifs are
* on a linked list now. Doing a lookup peer -> vif -> chanctx for each
@@ -7599,6 +7636,7 @@ static const struct ieee80211_ops ath10k_ops = {
.bss_info_changed = ath10k_bss_info_changed,
.hw_scan = ath10k_hw_scan,
.cancel_hw_scan = ath10k_cancel_hw_scan,
+ .set_rekey_data = ath10k_set_rekey_data,
.set_key = ath10k_set_key,
.set_default_unicast_key = ath10k_set_default_unicast_key,
.sta_state = ath10k_sta_state,
@@ -7634,7 +7672,6 @@ static const struct ieee80211_ops ath10k_ops = {
.suspend = ath10k_wow_op_suspend,
.resume = ath10k_wow_op_resume,
.set_wakeup = ath10k_wow_op_set_wakeup,
- .set_rekey_data = ath10k_wow_op_set_rekey_data,
#endif
#ifdef CONFIG_MAC80211_DEBUGFS
.sta_add_debugfs = ath10k_sta_add_debugfs,
@@ -8308,6 +8345,7 @@ err_free:
void ath10k_mac_unregister(struct ath10k *ar)
{
+ ath10k_wow_deinit(ar);
ieee80211_unregister_hw(ar->hw);
if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h
index bb711b525af8..5499bd2712e4 100644
--- a/drivers/net/wireless/ath/ath10k/rx_desc.h
+++ b/drivers/net/wireless/ath/ath10k/rx_desc.h
@@ -252,6 +252,9 @@ enum htt_rx_mpdu_encrypt_type {
HTT_RX_MPDU_ENCRYPT_WAPI = 5,
HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2 = 6,
HTT_RX_MPDU_ENCRYPT_NONE = 7,
+ HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2 = 8,
+ HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2 = 9,
+ HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2 = 10,
};
#define RX_MPDU_START_INFO0_PEER_IDX_MASK 0x000007ff
diff --git a/drivers/net/wireless/ath/ath10k/testmode.c b/drivers/net/wireless/ath/ath10k/testmode.c
index ed85f938e3c0..1a067a4ece4d 100644
--- a/drivers/net/wireless/ath/ath10k/testmode.c
+++ b/drivers/net/wireless/ath/ath10k/testmode.c
@@ -137,6 +137,13 @@ static int ath10k_tm_cmd_get_version(struct ath10k *ar, struct nlattr *tb[])
return ret;
}
+ ret = nla_put_u32(skb, ATH10K_TM_ATTR_WMI_OP_VERSION,
+ ar->normal_mode_fw.fw_file.wmi_op_version);
+ if (ret) {
+ kfree_skb(skb);
+ return ret;
+ }
+
return cfg80211_testmode_reply(skb);
}
@@ -174,8 +181,15 @@ static int ath10k_tm_fetch_utf_firmware_api_1(struct ath10k *ar,
static int ath10k_tm_fetch_firmware(struct ath10k *ar)
{
struct ath10k_fw_components *utf_mode_fw;
+ struct ath10k_fw_file *fw_file;
int ret;
+ if (!ar->is_bmi) {
+ fw_file = &ar->testmode.utf_mode_fw.fw_file;
+ fw_file->wmi_op_version = ATH10K_FW_WMI_OP_VERSION_TLV;
+ fw_file->htt_op_version = ATH10K_FW_HTT_OP_VERSION_TLV;
+ return 0;
+ }
ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_UTF_API2_FILE,
&ar->testmode.utf_mode_fw.fw_file);
if (ret == 0) {
diff --git a/drivers/net/wireless/ath/ath10k/testmode_i.h b/drivers/net/wireless/ath/ath10k/testmode_i.h
index ba81bf66ce85..191a8f34c8ea 100644
--- a/drivers/net/wireless/ath/ath10k/testmode_i.h
+++ b/drivers/net/wireless/ath/ath10k/testmode_i.h
@@ -33,6 +33,7 @@ enum ath10k_tm_attr {
ATH10K_TM_ATTR_WMI_CMDID = 3,
ATH10K_TM_ATTR_VERSION_MAJOR = 4,
ATH10K_TM_ATTR_VERSION_MINOR = 5,
+ ATH10K_TM_ATTR_WMI_OP_VERSION = 6,
/* keep last */
__ATH10K_TM_ATTR_AFTER_LAST,
diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h
index 06fb7596988d..468ad47f0298 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-ops.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h
@@ -211,6 +211,8 @@ struct wmi_ops {
(struct ath10k *ar,
enum wmi_bss_survey_req_type type);
struct sk_buff *(*gen_echo)(struct ath10k *ar, u32 value);
+ struct sk_buff *(*gen_csa_offload)(struct ath10k *ar,
+ u32 vdev_id, bool enable);
};
int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
@@ -1493,6 +1495,23 @@ ath10k_wmi_pdev_bss_chan_info_request(struct ath10k *ar,
}
static inline int
+ath10k_wmi_csa_offload(struct ath10k *ar, u32 vdev_id, bool enable)
+{
+ struct sk_buff *skb;
+ u32 cmd_id;
+
+ if (!ar->wmi.ops->gen_csa_offload)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_csa_offload(ar, vdev_id, enable);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ cmd_id = ar->wmi.cmd->csa_offload_enable_cmdid;
+ return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
ath10k_wmi_echo(struct ath10k *ar, u32 value)
{
struct ath10k_wmi *wmi = &ar->wmi;
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index 2dc2b5360ee8..3d323f3e73af 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -3079,6 +3079,37 @@ ath10k_wmi_tlv_op_gen_tdls_peer_update(struct ath10k *ar,
}
static struct sk_buff *
+ath10k_wmi_tlv_op_gen_csa_offload(struct ath10k *ar, u32 vdev_id, bool enable)
+{
+ struct wmi_csa_offload_enable_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ int len;
+
+ len = sizeof(*cmd) + sizeof(*tlv);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CSA_OFFLOAD_ENABLE_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ if (enable)
+ cmd->csa_offload_enable |=
+ __cpu_to_le32(WMI_CSA_OFFLOAD_ENABLE);
+ else
+ cmd->csa_offload_enable |=
+ __cpu_to_le32(WMI_CSA_OFFLOAD_DISABLE);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi CSA offload for vdev: %d\n", vdev_id);
+ return skb;
+}
+
+static struct sk_buff *
ath10k_wmi_op_gen_gtk_offload(struct ath10k *ar, struct ath10k_vif *arvif)
{
struct wmi_tlv_gtk_offload_cmd *cmd;
@@ -3123,13 +3154,14 @@ ath10k_wmi_tlv_op_gen_set_arp_ns_offload(struct ath10k *ar,
void *ptr;
int i;
struct wmi_ns_arp_offload_req *arp = &arvif->arp_offload;
+ struct wmi_ns_arp_offload_req *ns = &arvif->ns_offload;
struct wmi_ns_offload *ns_tuple;
struct wmi_arp_offload *arp_tuple;
len = sizeof(*cmd) + sizeof(*tlv) +
- sizeof(*tlv) + WMI_MAX_NS_OFFLOADS *
+ sizeof(*tlv) + WMI_NS_ARP_OFFLOAD *
(sizeof(struct wmi_ns_offload) + sizeof(*tlv)) +
- sizeof(*tlv) + WMI_MAX_ARP_OFFLOADS *
+ sizeof(*tlv) + WMI_NS_ARP_OFFLOAD *
(sizeof(struct wmi_arp_offload) + sizeof(*tlv));
skb = ath10k_wmi_alloc_skb(ar, len);
@@ -3147,33 +3179,49 @@ ath10k_wmi_tlv_op_gen_set_arp_ns_offload(struct ath10k *ar,
ptr += (sizeof(*tlv) + sizeof(*cmd));
tlv = ptr;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
- tlv->len = __cpu_to_le16(WMI_MAX_NS_OFFLOADS *
+ tlv->len = __cpu_to_le16(WMI_NS_ARP_OFFLOAD *
(sizeof(struct wmi_ns_offload) + sizeof(*tlv)));
ptr += sizeof(*tlv);
tlv = ptr;
- for (i = 0; i < WMI_MAX_NS_OFFLOADS; i++) {
+ for (i = 0; i < WMI_NS_ARP_OFFLOAD; i++) {
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_NS_OFFLOAD_TUPLE);
tlv->len = __cpu_to_le16(sizeof(struct wmi_ns_offload));
ns_tuple = (struct wmi_ns_offload *)tlv->value;
- ns_tuple->flags |= __cpu_to_le32(WMI_ARP_NS_OFFLOAD_DISABLE);
+ if (ns->enable_offload) {
+ ns_tuple->flags |=
+ __cpu_to_le32(WMI_ARP_NS_OFF_FLAGS_VALID);
+ if (ns->info.target_addr_valid.s6_addr[i]) {
+ memcpy(&ns_tuple->target_ipaddr[0],
+ &ns->info.target_addr[i],
+ sizeof(struct in6_addr));
+ }
+ memcpy(&ns_tuple->solicitation_ipaddr,
+ &ns->info.self_addr[i], sizeof(struct in6_addr));
+ if (ns->info.target_ipv6_ac.s6_addr[i] == IPV6_ADDR_ANY)
+ ns_tuple->flags |=
+ __cpu_to_le32(WMI_NSOFF_IPV6_ANYCAST);
+ } else {
+ ns_tuple->flags |=
+ __cpu_to_le32(WMI_ARP_NS_OFFLOAD_DISABLE);
+ }
ptr += (sizeof(*tlv) + sizeof(struct wmi_ns_offload));
tlv = ptr;
}
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
- tlv->len = __cpu_to_le16(WMI_MAX_ARP_OFFLOADS *
+ tlv->len = __cpu_to_le16(WMI_NS_ARP_OFFLOAD *
(sizeof(struct wmi_arp_offload) + sizeof(*tlv)));
ptr += sizeof(*tlv);
tlv = ptr;
- for (i = 0; i < WMI_MAX_ARP_OFFLOADS; i++) {
+ for (i = 0; i < WMI_NS_ARP_OFFLOAD; i++) {
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_ARP_OFFLOAD_TUPLE);
tlv->len = __cpu_to_le16(sizeof(struct wmi_arp_offload));
arp_tuple = (struct wmi_arp_offload *)tlv->value;
if (arp->enable_offload && (i == 0)) {
arp_tuple->flags |=
- __cpu_to_le32(WMI_ARPOFF_FLAGS_VALID);
+ __cpu_to_le32(WMI_ARP_NS_OFF_FLAGS_VALID);
memcpy(&arp_tuple->target_ipaddr,
&arp->params.ipv4_addr,
sizeof(arp_tuple->target_ipaddr));
@@ -3878,6 +3926,7 @@ static const struct wmi_ops wmi_tlv_ops = {
.gen_sta_keepalive = ath10k_wmi_tlv_op_gen_sta_keepalive,
.gen_set_arp_ns_offload = ath10k_wmi_tlv_op_gen_set_arp_ns_offload,
.gen_gtk_offload = ath10k_wmi_op_gen_gtk_offload,
+ .gen_csa_offload = ath10k_wmi_tlv_op_gen_csa_offload,
.gen_wow_enable = ath10k_wmi_tlv_op_gen_wow_enable,
.gen_wow_add_wakeup_event = ath10k_wmi_tlv_op_gen_wow_add_wakeup_event,
.gen_wow_host_wakeup_ind = ath10k_wmi_tlv_gen_wow_host_wakeup_ind,
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 57b81b8bae82..4892c7d3cce3 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -21,6 +21,7 @@
#include <linux/types.h>
#include <net/mac80211.h>
#include <linux/ipv6.h>
+#include <net/ipv6.h>
#include <linux/in.h>
/*
@@ -2887,13 +2888,12 @@ struct wmi_start_scan_common {
} __packed;
/* ARP-NS offload data structure */
-#define WMI_NSOFF_MAX_TARGET_IPS 2
-#define WMI_MAX_NS_OFFLOADS 2
-#define WMI_MAX_ARP_OFFLOADS 2
-#define WMI_ARPOFF_FLAGS_VALID BIT(0)
+#define WMI_NS_ARP_OFFLOAD 2
+#define WMI_ARP_NS_OFF_FLAGS_VALID BIT(0)
#define WMI_IPV4_ARP_REPLY_OFFLOAD 0
#define WMI_ARP_NS_OFFLOAD_DISABLE 0
#define WMI_ARP_NS_OFFLOAD_ENABLE 1
+#define WMI_NSOFF_IPV6_ANYCAST BIT(3)
struct wmi_ns_offload_info {
struct in6_addr src_addr;
@@ -2902,7 +2902,7 @@ struct wmi_ns_offload_info {
struct wmi_mac_addr self_macaddr;
u8 src_ipv6_addr_valid;
struct in6_addr target_addr_valid;
- struct in6_addr target_addr_ac_type;
+ struct in6_addr target_ipv6_ac;
u8 slot_idx;
} __packed;
@@ -2914,13 +2914,13 @@ struct wmi_ns_arp_offload_req {
struct in_addr ipv4_addr;
struct in6_addr ipv6_addr;
} params;
- struct wmi_ns_offload_info offload_info;
+ struct wmi_ns_offload_info info;
struct wmi_mac_addr bssid;
} __packed;
struct wmi_ns_offload {
__le32 flags;
- struct in6_addr target_ipaddr[WMI_NSOFF_MAX_TARGET_IPS];
+ struct in6_addr target_ipaddr[WMI_NS_ARP_OFFLOAD];
struct in6_addr solicitation_ipaddr;
struct in6_addr remote_ipaddr;
struct wmi_mac_addr target_mac;
@@ -5088,7 +5088,8 @@ enum wmi_10_4_vdev_param {
#define WMI_VDEV_PARAM_TXBF_MU_TX_BFER BIT(3)
#define WMI_TXBF_STS_CAP_OFFSET_LSB 4
-#define WMI_TXBF_STS_CAP_OFFSET_MASK 0xf0
+#define WMI_TXBF_STS_CAP_OFFSET_MASK 0x70
+#define WMI_TXBF_CONF_IMPLICIT_BF BIT(7)
#define WMI_BF_SOUND_DIM_OFFSET_LSB 8
#define WMI_BF_SOUND_DIM_OFFSET_MASK 0xf00
diff --git a/drivers/net/wireless/ath/ath10k/wow.c b/drivers/net/wireless/ath/ath10k/wow.c
index 74a9206c9f12..2280f47dc227 100644
--- a/drivers/net/wireless/ath/ath10k/wow.c
+++ b/drivers/net/wireless/ath/ath10k/wow.c
@@ -17,6 +17,7 @@
#include "mac.h"
#include <net/mac80211.h>
+#include <net/addrconf.h>
#include "hif.h"
#include "core.h"
#include "debug.h"
@@ -232,6 +233,116 @@ static int ath10k_wow_wakeup(struct ath10k *ar)
}
static int
+ath10k_wow_fill_vdev_ns_offload_struct(struct ath10k_vif *arvif,
+ bool enable_offload)
+{
+ struct in6_addr addr[TARGET_NUM_STATIONS];
+ struct wmi_ns_arp_offload_req *ns;
+ struct wireless_dev *wdev;
+ struct inet6_dev *in6_dev;
+ struct in6_addr addr_type;
+ struct inet6_ifaddr *ifa;
+ struct ifacaddr6 *ifaca;
+ struct list_head *addr_list;
+ u32 scope, count = 0;
+ int i;
+
+ ns = &arvif->ns_offload;
+ if (!enable_offload) {
+ ns->offload_type = __cpu_to_le16(WMI_NS_ARP_OFFLOAD);
+ ns->enable_offload = __cpu_to_le16(WMI_ARP_NS_OFFLOAD_DISABLE);
+ return 0;
+ }
+
+ wdev = ieee80211_vif_to_wdev(arvif->vif);
+ if (!wdev)
+ return -ENODEV;
+
+ in6_dev = __in6_dev_get(wdev->netdev);
+ if (!in6_dev)
+ return -ENODEV;
+
+ memset(&addr, 0, TARGET_NUM_STATIONS * sizeof(struct in6_addr));
+ memset(&addr_type, 0, sizeof(struct in6_addr));
+
+ /* Unicast Addresses */
+ read_lock_bh(&in6_dev->lock);
+ list_for_each(addr_list, &in6_dev->addr_list) {
+ if (count >= TARGET_NUM_STATIONS) {
+ read_unlock_bh(&in6_dev->lock);
+ return -EINVAL;
+ }
+
+ ifa = list_entry(addr_list, struct inet6_ifaddr, if_list);
+ if (ifa->flags & IFA_F_DADFAILED)
+ continue;
+ scope = ipv6_addr_src_scope(&ifa->addr);
+ switch (scope) {
+ case IPV6_ADDR_SCOPE_GLOBAL:
+ case IPV6_ADDR_SCOPE_LINKLOCAL:
+ memcpy(&addr[count], &ifa->addr.s6_addr,
+ sizeof(ifa->addr.s6_addr));
+ addr_type.s6_addr[count] = IPV6_ADDR_UNICAST;
+ count += 1;
+ break;
+ }
+ }
+
+ /* Anycast Addresses */
+ for (ifaca = in6_dev->ac_list; ifaca; ifaca = ifaca->aca_next) {
+ if (count >= TARGET_NUM_STATIONS) {
+ read_unlock_bh(&in6_dev->lock);
+ return -EINVAL;
+ }
+
+ scope = ipv6_addr_src_scope(&ifaca->aca_addr);
+ switch (scope) {
+ case IPV6_ADDR_SCOPE_GLOBAL:
+ case IPV6_ADDR_SCOPE_LINKLOCAL:
+ memcpy(&addr[count], &ifaca->aca_addr,
+ sizeof(ifaca->aca_addr));
+ addr_type.s6_addr[count] = IPV6_ADDR_ANY;
+ count += 1;
+ break;
+ }
+ }
+ read_unlock_bh(&in6_dev->lock);
+
+ /* Filling up the request structure
+ * Filling the self_addr with solicited address
+ * A Solicited-Node multicast address is created by
+ * taking the last 24 bits of a unicast or anycast
+ * address and appending them to the prefix
+ *
+ * FF02:0000:0000:0000:0000:0001:FFXX:XXXX
+ *
+ * here XX is the unicast/anycast bits
+ */
+ for (i = 0; i < count; i++) {
+ ns->info.self_addr[i].s6_addr[0] = 0xFF;
+ ns->info.self_addr[i].s6_addr[1] = 0x02;
+ ns->info.self_addr[i].s6_addr[11] = 0x01;
+ ns->info.self_addr[i].s6_addr[12] = 0xFF;
+ ns->info.self_addr[i].s6_addr[13] = addr[i].s6_addr[13];
+ ns->info.self_addr[i].s6_addr[14] = addr[i].s6_addr[14];
+ ns->info.self_addr[i].s6_addr[15] = addr[i].s6_addr[15];
+ ns->info.slot_idx = i;
+ memcpy(&ns->info.target_addr[i], &addr[i],
+ sizeof(struct in6_addr));
+ ns->info.target_addr_valid.s6_addr[i] = 1;
+ ns->info.target_ipv6_ac.s6_addr[i] = addr_type.s6_addr[i];
+ memcpy(&ns->params.ipv6_addr, &ns->info.target_addr[i],
+ sizeof(struct in6_addr));
+ }
+
+ ns->offload_type = __cpu_to_le16(WMI_NS_ARP_OFFLOAD);
+ ns->enable_offload = __cpu_to_le16(WMI_ARP_NS_OFFLOAD_ENABLE);
+ ns->num_ns_offload_count = __cpu_to_le16(count);
+
+ return 0;
+}
+
+static int
ath10k_wow_fill_vdev_arp_offload_struct(struct ath10k_vif *arvif,
bool enable_offload)
{
@@ -291,6 +402,13 @@ static int ath10k_wow_enable_ns_arp_offload(struct ath10k *ar, bool offload)
return ret;
}
+ ret = ath10k_wow_fill_vdev_ns_offload_struct(arvif, offload);
+ if (ret) {
+ ath10k_err(ar, "NS-offload config failed, vdev: %d\n",
+ arvif->vdev_id);
+ return ret;
+ }
+
ret = ath10k_wmi_set_arp_ns_offload(ar, arvif);
if (ret) {
ath10k_err(ar, "failed to send offload cmd, vdev: %d\n",
@@ -327,22 +445,6 @@ static int ath10k_config_wow_listen_interval(struct ath10k *ar)
return 0;
}
-void ath10k_wow_op_set_rekey_data(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct cfg80211_gtk_rekey_data *data)
-{
- struct ath10k *ar = hw->priv;
- struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
-
- mutex_lock(&ar->conf_mutex);
- memcpy(&arvif->gtk_rekey_data.kek, data->kek, NL80211_KEK_LEN);
- memcpy(&arvif->gtk_rekey_data.kck, data->kck, NL80211_KCK_LEN);
- arvif->gtk_rekey_data.replay_ctr =
- cpu_to_le64(be64_to_cpup((__be64 *)data->replay_ctr));
- arvif->gtk_rekey_data.valid = true;
- mutex_unlock(&ar->conf_mutex);
-}
-
static int ath10k_wow_config_gtk_offload(struct ath10k *ar, bool gtk_offload)
{
struct ath10k_vif *arvif;
@@ -391,6 +493,13 @@ int ath10k_wow_op_suspend(struct ieee80211_hw *hw,
goto exit;
}
+ ret = ath10k_wow_cleanup(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to clear wow wakeup events: %d\n",
+ ret);
+ goto exit;
+ }
+
ret = ath10k_wow_config_gtk_offload(ar, true);
if (ret) {
ath10k_warn(ar, "failed to enable GTK offload: %d\n", ret);
@@ -403,18 +512,11 @@ int ath10k_wow_op_suspend(struct ieee80211_hw *hw,
goto disable_gtk_offload;
}
- ret = ath10k_wow_cleanup(ar);
- if (ret) {
- ath10k_warn(ar, "failed to clear wow wakeup events: %d\n",
- ret);
- goto disable_ns_arp_offload;
- }
-
ret = ath10k_wow_set_wakeups(ar, wowlan);
if (ret) {
ath10k_warn(ar, "failed to set wow wakeup events: %d\n",
ret);
- goto cleanup;
+ goto disable_ns_arp_offload;
}
ret = ath10k_config_wow_listen_interval(ar);
@@ -471,6 +573,7 @@ static void ath10k_wow_op_report_wakeup_reason(struct ath10k *ar)
struct cfg80211_wowlan_wakeup *wakeup = &ar->wow.wakeup;
struct ath10k_vif *arvif;
+ memset(wakeup, 0, sizeof(struct cfg80211_wowlan_wakeup));
switch (ar->wow.wakeup_reason) {
case WOW_REASON_UNSPECIFIED:
wakeup = NULL;
@@ -488,6 +591,7 @@ static void ath10k_wow_op_report_wakeup_reason(struct ath10k *ar)
wakeup->gtk_rekey_failure = true;
break;
}
+ ar->wow.wakeup_reason = WOW_REASON_UNSPECIFIED;
if (wakeup) {
wakeup->pattern_idx = -1;
@@ -575,8 +679,15 @@ int ath10k_wow_init(struct ath10k *ar)
ar->wow.wowlan_support = ath10k_wowlan_support;
ar->wow.wowlan_support.n_patterns = ar->wow.max_num_patterns;
ar->hw->wiphy->wowlan = &ar->wow.wowlan_support;
-
- device_set_wakeup_capable(ar->dev, true);
+ device_init_wakeup(ar->dev, true);
return 0;
}
+
+void ath10k_wow_deinit(struct ath10k *ar)
+{
+ if (test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT,
+ ar->running_fw->fw_file.fw_features) &&
+ test_bit(WMI_SERVICE_WOW, ar->wmi.svc_map))
+ device_init_wakeup(ar->dev, false);
+}
diff --git a/drivers/net/wireless/ath/ath10k/wow.h b/drivers/net/wireless/ath/ath10k/wow.h
index b53211584052..2ca4ba4848c9 100644
--- a/drivers/net/wireless/ath/ath10k/wow.h
+++ b/drivers/net/wireless/ath/ath10k/wow.h
@@ -27,13 +27,11 @@ struct ath10k_wow {
#ifdef CONFIG_PM
int ath10k_wow_init(struct ath10k *ar);
+void ath10k_wow_deinit(struct ath10k *ar);
int ath10k_wow_op_suspend(struct ieee80211_hw *hw,
struct cfg80211_wowlan *wowlan);
int ath10k_wow_op_resume(struct ieee80211_hw *hw);
void ath10k_wow_op_set_wakeup(struct ieee80211_hw *hw, bool enabled);
-void ath10k_wow_op_set_rekey_data(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct cfg80211_gtk_rekey_data *data);
#else
static inline int ath10k_wow_init(struct ath10k *ar)
@@ -41,5 +39,8 @@ static inline int ath10k_wow_init(struct ath10k *ar)
return 0;
}
+void ath10k_wow_deinit(struct ath10k *ar)
+{
+}
#endif /* CONFIG_PM */
#endif /* _WOW_H_ */
diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
index 654a1e33f827..7c5f189cace7 100644
--- a/drivers/net/wireless/ath/ath5k/debug.c
+++ b/drivers/net/wireless/ath/ath5k/debug.c
@@ -939,7 +939,10 @@ static int open_file_eeprom(struct inode *inode, struct file *file)
}
for (i = 0; i < eesize; ++i) {
- AR5K_EEPROM_READ(i, val);
+ if (!ath5k_hw_nvram_read(ah, i, &val)) {
+ ret = -EIO;
+ goto freebuf;
+ }
buf[i] = val;
}
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 41382f89abe1..4435c7bbb625 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1595,6 +1595,10 @@ bool ath9k_hw_check_alive(struct ath_hw *ah)
int count = 50;
u32 reg, last_val;
+ /* Check if chip failed to wake up */
+ if (REG_READ(ah, AR_CFG) == 0xdeadbeef)
+ return false;
+
if (AR_SREV_9300(ah))
return !ath9k_hw_detect_mac_hang(ah);
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index 213569d384e7..5cde46c82a03 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -254,8 +254,12 @@ bool ath_is_49ghz_allowed(u16 regdomain)
EXPORT_SYMBOL(ath_is_49ghz_allowed);
/* Frequency is one where radar detection is required */
-static bool ath_is_radar_freq(u16 center_freq)
+static bool ath_is_radar_freq(u16 center_freq,
+ struct ath_regulatory *reg)
+
{
+ if (reg->country_code == CTRY_INDIA)
+ return (center_freq >= 5500 && center_freq <= 5700);
return (center_freq >= 5260 && center_freq <= 5720);
}
@@ -306,7 +310,7 @@ __ath_reg_apply_beaconing_flags(struct wiphy *wiphy,
enum nl80211_reg_initiator initiator,
struct ieee80211_channel *ch)
{
- if (ath_is_radar_freq(ch->center_freq) ||
+ if (ath_is_radar_freq(ch->center_freq, reg) ||
(ch->flags & IEEE80211_CHAN_RADAR))
return;
@@ -395,8 +399,9 @@ ath_reg_apply_ir_flags(struct wiphy *wiphy,
}
}
-/* Always apply Radar/DFS rules on freq range 5260 MHz - 5700 MHz */
-static void ath_reg_apply_radar_flags(struct wiphy *wiphy)
+/* Always apply Radar/DFS rules on freq range 5500 MHz - 5700 MHz */
+static void ath_reg_apply_radar_flags(struct wiphy *wiphy,
+ struct ath_regulatory *reg)
{
struct ieee80211_supported_band *sband;
struct ieee80211_channel *ch;
@@ -409,7 +414,7 @@ static void ath_reg_apply_radar_flags(struct wiphy *wiphy)
for (i = 0; i < sband->n_channels; i++) {
ch = &sband->channels[i];
- if (!ath_is_radar_freq(ch->center_freq))
+ if (!ath_is_radar_freq(ch->center_freq, reg))
continue;
/* We always enable radar detection/DFS on this
* frequency range. Additionally we also apply on
@@ -505,7 +510,7 @@ void ath_reg_notifier_apply(struct wiphy *wiphy,
struct ath_common *common = container_of(reg, struct ath_common,
regulatory);
/* We always apply this */
- ath_reg_apply_radar_flags(wiphy);
+ ath_reg_apply_radar_flags(wiphy, reg);
/*
* This would happen when we have sent a custom regulatory request
@@ -669,7 +674,7 @@ ath_regd_init_wiphy(struct ath_regulatory *reg,
chan->flags |= IEEE80211_CHAN_DISABLED;
}
- ath_reg_apply_radar_flags(wiphy);
+ ath_reg_apply_radar_flags(wiphy, reg);
ath_reg_apply_world_flags(wiphy, NL80211_REGDOM_SET_BY_DRIVER, reg);
return 0;
}
diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.c b/drivers/net/wireless/ath/wcn36xx/txrx.c
index 9bec8237231d..99c21aac68bd 100644
--- a/drivers/net/wireless/ath/wcn36xx/txrx.c
+++ b/drivers/net/wireless/ath/wcn36xx/txrx.c
@@ -57,7 +57,7 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
RX_FLAG_MMIC_STRIPPED |
RX_FLAG_DECRYPTED;
- wcn36xx_dbg(WCN36XX_DBG_RX, "status.flags=%x\n", status.flag);
+ wcn36xx_dbg(WCN36XX_DBG_RX, "status.flags=%llx\n", status.flag);
memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
index d224b3dd72ed..3196245ab820 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
@@ -461,25 +461,23 @@ static int brcmf_p2p_set_firmware(struct brcmf_if *ifp, u8 *p2p_mac)
* @dev_addr: optional device address.
*
* P2P needs mac addresses for P2P device and interface. If no device
- * address it specified, these are derived from the primary net device, ie.
- * the permanent ethernet address of the device.
+ * address it specified, these are derived from a random ethernet
+ * address.
*/
static void brcmf_p2p_generate_bss_mac(struct brcmf_p2p_info *p2p, u8 *dev_addr)
{
- struct brcmf_if *pri_ifp = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp;
- bool local_admin = false;
+ bool random_addr = false;
- if (!dev_addr || is_zero_ether_addr(dev_addr)) {
- dev_addr = pri_ifp->mac_addr;
- local_admin = true;
- }
+ if (!dev_addr || is_zero_ether_addr(dev_addr))
+ random_addr = true;
- /* Generate the P2P Device Address. This consists of the device's
- * primary MAC address with the locally administered bit set.
+ /* Generate the P2P Device Address obtaining a random ethernet
+ * address with the locally administered bit set.
*/
- memcpy(p2p->dev_addr, dev_addr, ETH_ALEN);
- if (local_admin)
- p2p->dev_addr[0] |= 0x02;
+ if (random_addr)
+ eth_random_addr(p2p->dev_addr);
+ else
+ memcpy(p2p->dev_addr, dev_addr, ETH_ALEN);
/* Generate the P2P Interface Address. If the discovery and connection
* BSSCFGs need to simultaneously co-exist, then this address must be
diff --git a/drivers/net/wireless/cnss/cnss_pci.c b/drivers/net/wireless/cnss/cnss_pci.c
index af92f00ca56e..03219cf1693a 100644
--- a/drivers/net/wireless/cnss/cnss_pci.c
+++ b/drivers/net/wireless/cnss/cnss_pci.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -569,6 +569,7 @@ out:
static void cnss_wlan_gpio_set(struct cnss_wlan_gpio_info *info, bool state)
{
+#ifndef CONFIG_MSM_GVM_QUIN
if (!info->prop)
return;
@@ -588,6 +589,9 @@ static void cnss_wlan_gpio_set(struct cnss_wlan_gpio_info *info, bool state)
pr_debug("%s: %s gpio is now %s\n", __func__,
info->name, info->state ? "enabled" : "disabled");
+#else
+ return;
+#endif
}
static int cnss_configure_wlan_en_gpio(bool state)
@@ -1560,7 +1564,6 @@ int cnss_msm_pcie_enumerate(u32 rc_idx)
return msm_pcie_enumerate(rc_idx);
}
#else /* !defined CONFIG_PCI_MSM */
-
struct pci_saved_state *cnss_pci_store_saved_state(struct pci_dev *dev)
{
return NULL;
@@ -1570,7 +1573,7 @@ int cnss_msm_pcie_pm_control(
enum msm_pcie_pm_opt pm_opt, u32 bus_num,
struct pci_dev *pdev, u32 options)
{
- return -ENODEV;
+ return 0;
}
int cnss_pci_load_and_free_saved_state(
@@ -1581,27 +1584,27 @@ int cnss_pci_load_and_free_saved_state(
int cnss_msm_pcie_shadow_control(struct pci_dev *dev, bool enable)
{
- return -ENODEV;
+ return 0;
}
int cnss_msm_pcie_deregister_event(struct msm_pcie_register_event *reg)
{
- return -ENODEV;
+ return 0;
}
int cnss_msm_pcie_recover_config(struct pci_dev *dev)
{
- return -ENODEV;
+ return 0;
}
int cnss_msm_pcie_register_event(struct msm_pcie_register_event *reg)
{
- return -ENODEV;
+ return 0;
}
int cnss_msm_pcie_enumerate(u32 rc_idx)
{
- return -EPROBE_DEFER;
+ return 0;
}
#endif
@@ -2870,7 +2873,9 @@ static int cnss_probe(struct platform_device *pdev)
struct esoc_desc *desc;
const char *client_desc;
struct device *dev = &pdev->dev;
+#ifndef CONFIG_MSM_GVM_QUIN
u32 rc_num;
+#endif
struct resource *res;
u32 ramdump_size = 0;
u32 smmu_iova_address[2];
@@ -2905,6 +2910,7 @@ static int cnss_probe(struct platform_device *pdev)
goto err_get_rc;
}
+#ifndef CONFIG_MSM_GVM_QUIN
ret = of_property_read_u32(dev->of_node, "qcom,wlan-rc-num", &rc_num);
if (ret) {
pr_err("%s: Failed to find PCIe RC number!\n", __func__);
@@ -2916,6 +2922,7 @@ static int cnss_probe(struct platform_device *pdev)
pr_err("%s: Failed to enable PCIe RC%x!\n", __func__, rc_num);
goto err_pcie_enumerate;
}
+#endif
penv->pcie_link_state = PCIE_LINK_UP;
@@ -3096,7 +3103,9 @@ err_subsys_reg:
devm_unregister_esoc_client(&pdev->dev, penv->esoc_desc);
err_esoc_reg:
+#ifndef CONFIG_MSM_GVM_QUIN
err_pcie_enumerate:
+#endif
err_get_rc:
cnss_configure_wlan_en_gpio(WLAN_EN_LOW);
cnss_wlan_release_resources();
diff --git a/drivers/net/wireless/cnss2/Kconfig b/drivers/net/wireless/cnss2/Kconfig
index 8bc9cc61b202..17c6860e719c 100644
--- a/drivers/net/wireless/cnss2/Kconfig
+++ b/drivers/net/wireless/cnss2/Kconfig
@@ -1,10 +1,12 @@
config CNSS2
tristate "CNSS2 Platform Driver for Wi-Fi Module"
- depends on !CNSS && PCI_MSM
+ depends on !CNSS && (PCI_MSM || PCI_HOST_GENERIC)
select CNSS_UTILS
---help---
This module adds the support for Connectivity Subsystem (CNSS) used
for PCIe based Wi-Fi devices with QCA6174/QCA6290 chipsets.
+ This module in metal case depends on PCI_MSM, while in GVM case
+ depends on PCI_HOST_GENERIC.
This driver also adds support to integrate WLAN module to subsystem
restart framework.
diff --git a/drivers/net/wireless/cnss2/main.c b/drivers/net/wireless/cnss2/main.c
index bcea74ad6685..fc35b0892768 100644
--- a/drivers/net/wireless/cnss2/main.c
+++ b/drivers/net/wireless/cnss2/main.c
@@ -571,7 +571,8 @@ static int cnss_driver_call_probe(struct cnss_plat_data *plat_priv)
goto out;
}
- if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
+ if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state) &&
+ test_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state)) {
ret = plat_priv->driver_ops->reinit(pci_priv->pci_dev,
pci_priv->pci_device_id);
if (ret) {
@@ -588,6 +589,7 @@ static int cnss_driver_call_probe(struct cnss_plat_data *plat_priv)
ret);
goto out;
}
+ clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
clear_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state);
set_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state);
}
@@ -614,7 +616,8 @@ static int cnss_driver_call_remove(struct cnss_plat_data *plat_priv)
return -EINVAL;
}
- if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
+ if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state) &&
+ test_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state)) {
plat_priv->driver_ops->shutdown(pci_priv->pci_dev);
} else if (test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state)) {
plat_priv->driver_ops->remove(pci_priv->pci_dev);
@@ -652,7 +655,9 @@ static int cnss_fw_ready_hdlr(struct cnss_plat_data *plat_priv)
complete(&plat_priv->power_up_complete);
}
- if (ret)
+ if (ret && test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
+ goto out;
+ else if (ret)
goto shutdown;
return 0;
@@ -662,6 +667,10 @@ shutdown:
cnss_suspend_pci_link(plat_priv->bus_priv);
cnss_power_off_device(plat_priv);
+ clear_bit(CNSS_FW_READY, &plat_priv->driver_state);
+ clear_bit(CNSS_FW_MEM_READY, &plat_priv->driver_state);
+
+out:
return ret;
}
@@ -1179,8 +1188,10 @@ static void cnss_qca6290_crash_shutdown(struct cnss_plat_data *plat_priv)
if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state) ||
test_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state) ||
- test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state))
+ test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state)) {
+ cnss_pr_dbg("Ignore crash shutdown\n");
return;
+ }
ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RDDM_KERNEL_PANIC);
if (ret) {
@@ -1538,11 +1549,6 @@ static int cnss_driver_recovery_hdlr(struct cnss_plat_data *plat_priv,
if (!test_bit(CNSS_FW_READY, &plat_priv->driver_state)) {
set_bit(CNSS_FW_BOOT_RECOVERY,
&plat_priv->driver_state);
- } else if (test_bit(CNSS_DRIVER_LOADING,
- &plat_priv->driver_state)) {
- cnss_pr_err("Driver probe is in progress, ignore recovery\n");
- ret = -EINVAL;
- goto out;
}
break;
}
@@ -1684,6 +1690,7 @@ static int cnss_cold_boot_cal_start_hdlr(struct cnss_plat_data *plat_priv)
static int cnss_cold_boot_cal_done_hdlr(struct cnss_plat_data *plat_priv)
{
+ plat_priv->cal_done = true;
cnss_wlfw_wlan_mode_send_sync(plat_priv, QMI_WLFW_OFF_V01);
cnss_shutdown(plat_priv);
clear_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state);
diff --git a/drivers/net/wireless/cnss2/main.h b/drivers/net/wireless/cnss2/main.h
index 81b5de8bc66f..a36281cb560f 100644
--- a/drivers/net/wireless/cnss2/main.h
+++ b/drivers/net/wireless/cnss2/main.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -107,6 +107,7 @@ struct cnss_fw_mem {
void *va;
phys_addr_t pa;
bool valid;
+ u32 type;
};
enum cnss_driver_event_type {
@@ -192,7 +193,8 @@ struct cnss_plat_data {
struct wlfw_rf_board_info_s_v01 board_info;
struct wlfw_soc_info_s_v01 soc_info;
struct wlfw_fw_version_info_s_v01 fw_version_info;
- struct cnss_fw_mem fw_mem;
+ u32 fw_mem_seg_len;
+ struct cnss_fw_mem fw_mem[QMI_WLFW_MAX_NUM_MEM_SEG_V01];
struct cnss_fw_mem m3_mem;
struct cnss_pin_connect_result pin_result;
struct dentry *root_dentry;
@@ -204,6 +206,7 @@ struct cnss_plat_data {
u32 diag_reg_read_mem_type;
u32 diag_reg_read_len;
u8 *diag_reg_read_buf;
+ bool cal_done;
};
void *cnss_bus_dev_to_bus_priv(struct device *dev);
@@ -213,8 +216,21 @@ int cnss_driver_event_post(struct cnss_plat_data *plat_priv,
u32 flags, void *data);
int cnss_get_vreg(struct cnss_plat_data *plat_priv);
int cnss_get_pinctrl(struct cnss_plat_data *plat_priv);
+
+#ifndef CONFIG_MSM_GVM_QUIN
int cnss_power_on_device(struct cnss_plat_data *plat_priv);
void cnss_power_off_device(struct cnss_plat_data *plat_priv);
+#else /* CONFIG_MSM_GVM_QUIN */
+static inline int cnss_power_on_device(struct cnss_plat_data *plat_priv)
+{
+ return 0;
+}
+
+static inline void cnss_power_off_device(struct cnss_plat_data *plat_priv)
+{
+}
+#endif /* CONFIG_MSM_GVM_QUIN */
+
int cnss_register_subsys(struct cnss_plat_data *plat_priv);
void cnss_unregister_subsys(struct cnss_plat_data *plat_priv);
int cnss_register_ramdump(struct cnss_plat_data *plat_priv);
diff --git a/drivers/net/wireless/cnss2/pci.c b/drivers/net/wireless/cnss2/pci.c
index d57d55ec79dd..2356caa3af78 100644
--- a/drivers/net/wireless/cnss2/pci.c
+++ b/drivers/net/wireless/cnss2/pci.c
@@ -44,7 +44,9 @@
#define MAX_M3_FILE_NAME_LENGTH 13
#define DEFAULT_M3_FILE_NAME "m3.bin"
+#ifdef CONFIG_PCI_MSM
static DEFINE_SPINLOCK(pci_link_down_lock);
+#endif
static unsigned int pci_link_down_panic;
module_param(pci_link_down_panic, uint, 0600);
@@ -91,6 +93,7 @@ static int cnss_set_pci_config_space(struct cnss_pci_data *pci_priv, bool save)
return 0;
}
+#ifdef CONFIG_PCI_MSM
static int cnss_set_pci_link(struct cnss_pci_data *pci_priv, bool link_up)
{
int ret = 0;
@@ -130,7 +133,8 @@ int cnss_suspend_pci_link(struct cnss_pci_data *pci_priv)
pci_disable_device(pci_priv->pci_dev);
if (pci_priv->pci_dev->device != QCA6174_DEVICE_ID) {
- if (pci_set_power_state(pci_priv->pci_dev, PCI_D3hot))
+ ret = pci_set_power_state(pci_priv->pci_dev, PCI_D3hot);
+ if (ret)
cnss_pr_err("Failed to set D3Hot, err = %d\n", ret);
}
@@ -217,6 +221,13 @@ int cnss_pci_link_down(struct device *dev)
}
EXPORT_SYMBOL(cnss_pci_link_down);
+#else /* CONFIG_PCI_MSM */
+static int cnss_set_pci_link(struct cnss_pci_data *pci_priv, bool link_up)
+{
+ return 0;
+}
+#endif /* CONFIG_PCI_MSM */
+
static int cnss_pci_init_smmu(struct cnss_pci_data *pci_priv)
{
int ret = 0;
@@ -277,6 +288,7 @@ static void cnss_pci_deinit_smmu(struct cnss_pci_data *pci_priv)
pci_priv->smmu_mapping = NULL;
}
+#ifdef CONFIG_PCI_MSM
static void cnss_pci_event_cb(struct msm_pcie_notify *notify)
{
unsigned long flags;
@@ -393,10 +405,12 @@ static int cnss_pci_suspend(struct device *dev)
SAVE_PCI_CONFIG_SPACE);
pci_disable_device(pci_dev);
- ret = pci_set_power_state(pci_dev, PCI_D3hot);
- if (ret)
- cnss_pr_err("Failed to set D3Hot, err = %d\n",
- ret);
+ if (pci_dev->device != QCA6174_DEVICE_ID) {
+ ret = pci_set_power_state(pci_dev, PCI_D3hot);
+ if (ret)
+ cnss_pr_err("Failed to set D3Hot, err = %d\n",
+ ret);
+ }
}
cnss_pci_set_monitor_wake_intr(pci_priv, false);
@@ -425,16 +439,19 @@ static int cnss_pci_resume(struct device *dev)
if (pci_priv->pci_link_down_ind)
goto out;
- ret = pci_enable_device(pci_dev);
- if (ret)
- cnss_pr_err("Failed to enable PCI device, err = %d\n", ret);
+ if (pci_priv->pci_link_state) {
+ ret = pci_enable_device(pci_dev);
+ if (ret)
+ cnss_pr_err("Failed to enable PCI device, err = %d\n",
+ ret);
- if (pci_priv->saved_state)
- cnss_set_pci_config_space(pci_priv,
- RESTORE_PCI_CONFIG_SPACE);
+ if (pci_priv->saved_state)
+ cnss_set_pci_config_space(pci_priv,
+ RESTORE_PCI_CONFIG_SPACE);
- pci_set_master(pci_dev);
- cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RESUME);
+ pci_set_master(pci_dev);
+ cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RESUME);
+ }
driver_ops = plat_priv->driver_ops;
if (driver_ops && driver_ops->resume) {
@@ -592,6 +609,17 @@ int cnss_wlan_pm_control(struct device *dev, bool vote)
}
EXPORT_SYMBOL(cnss_wlan_pm_control);
+#else /* CONFIG_PCI_MSM */
+static int cnss_reg_pci_event(struct cnss_pci_data *pci_priv)
+{
+ return 0;
+}
+
+static void cnss_dereg_pci_event(struct cnss_pci_data *pci_priv)
+{
+}
+#endif /* CONFIG_PCI_MSM */
+
int cnss_auto_suspend(struct device *dev)
{
int ret = 0;
@@ -618,9 +646,12 @@ int cnss_auto_suspend(struct device *dev)
cnss_set_pci_config_space(pci_priv, SAVE_PCI_CONFIG_SPACE);
pci_disable_device(pci_dev);
- ret = pci_set_power_state(pci_dev, PCI_D3hot);
- if (ret)
- cnss_pr_err("Failed to set D3Hot, err = %d\n", ret);
+ if (pci_dev->device != QCA6174_DEVICE_ID) {
+ ret = pci_set_power_state(pci_dev, PCI_D3hot);
+ if (ret)
+ cnss_pr_err("Failed to set D3Hot, err = %d\n",
+ ret);
+ }
cnss_pr_dbg("Suspending PCI link\n");
if (cnss_set_pci_link(pci_priv, PCI_LINK_DOWN)) {
@@ -712,18 +743,21 @@ int cnss_pm_request_resume(struct cnss_pci_data *pci_priv)
int cnss_pci_alloc_fw_mem(struct cnss_pci_data *pci_priv)
{
struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
- struct cnss_fw_mem *fw_mem = &plat_priv->fw_mem;
-
- if (!fw_mem->va && fw_mem->size) {
- fw_mem->va = dma_alloc_coherent(&pci_priv->pci_dev->dev,
- fw_mem->size, &fw_mem->pa,
- GFP_KERNEL);
- if (!fw_mem->va) {
- cnss_pr_err("Failed to allocate memory for FW, size: 0x%zx\n",
- fw_mem->size);
- fw_mem->size = 0;
-
- return -ENOMEM;
+ struct cnss_fw_mem *fw_mem = plat_priv->fw_mem;
+ int i;
+
+ for (i = 0; i < plat_priv->fw_mem_seg_len; i++) {
+ if (!fw_mem[i].va && fw_mem[i].size) {
+ fw_mem[i].va =
+ dma_alloc_coherent(&pci_priv->pci_dev->dev,
+ fw_mem[i].size,
+ &fw_mem[i].pa, GFP_KERNEL);
+ if (!fw_mem[i].va) {
+ cnss_pr_err("Failed to allocate memory for FW, size: 0x%zx, type: %u\n",
+ fw_mem[i].size, fw_mem[i].type);
+
+ return -ENOMEM;
+ }
}
}
@@ -733,17 +767,25 @@ int cnss_pci_alloc_fw_mem(struct cnss_pci_data *pci_priv)
static void cnss_pci_free_fw_mem(struct cnss_pci_data *pci_priv)
{
struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
- struct cnss_fw_mem *fw_mem = &plat_priv->fw_mem;
-
- if (fw_mem->va && fw_mem->size) {
- cnss_pr_dbg("Freeing memory for FW, va: 0x%pK, pa: %pa, size: 0x%zx\n",
- fw_mem->va, &fw_mem->pa, fw_mem->size);
- dma_free_coherent(&pci_priv->pci_dev->dev, fw_mem->size,
- fw_mem->va, fw_mem->pa);
- fw_mem->va = NULL;
- fw_mem->pa = 0;
- fw_mem->size = 0;
+ struct cnss_fw_mem *fw_mem = plat_priv->fw_mem;
+ int i;
+
+ for (i = 0; i < plat_priv->fw_mem_seg_len; i++) {
+ if (fw_mem[i].va && fw_mem[i].size) {
+ cnss_pr_dbg("Freeing memory for FW, va: 0x%pK, pa: %pa, size: 0x%zx, type: %u\n",
+ fw_mem[i].va, &fw_mem[i].pa,
+ fw_mem[i].size, fw_mem[i].type);
+ dma_free_coherent(&pci_priv->pci_dev->dev,
+ fw_mem[i].size, fw_mem[i].va,
+ fw_mem[i].pa);
+ fw_mem[i].va = NULL;
+ fw_mem[i].pa = 0;
+ fw_mem[i].size = 0;
+ fw_mem[i].type = 0;
+ }
}
+
+ plat_priv->fw_mem_seg_len = 0;
}
int cnss_pci_load_m3(struct cnss_pci_data *pci_priv)
@@ -949,6 +991,31 @@ void cnss_get_msi_address(struct device *dev, u32 *msi_addr_low,
}
EXPORT_SYMBOL(cnss_get_msi_address);
+#ifdef CONFIG_PCI_MSM
+static inline int cnss_pci_set_dma_mask(struct pci_dev *pci_dev)
+{
+ int ret;
+
+ ret = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(PCI_DMA_MASK));
+ if (ret) {
+ cnss_pr_err("PCI DMA mask: %d, err: %d\n", PCI_DMA_MASK, ret);
+ return ret;
+ }
+
+ ret = pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(PCI_DMA_MASK));
+ if (ret)
+ cnss_pr_err("PCI consistent DMA mask: %d, err: %d\n",
+ PCI_DMA_MASK, ret);
+
+ return ret;
+}
+#else /* CONFIG_PCI_MSM */
+static inline int cnss_pci_set_dma_mask(struct pci_dev *pci_dev)
+{
+ return 0;
+}
+#endif /* CONFIG_PCI_MSM */
+
static int cnss_pci_enable_bus(struct cnss_pci_data *pci_priv)
{
int ret = 0;
@@ -981,19 +1048,9 @@ static int cnss_pci_enable_bus(struct cnss_pci_data *pci_priv)
goto disable_device;
}
- ret = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(PCI_DMA_MASK));
- if (ret) {
- cnss_pr_err("Failed to set PCI DMA mask (%d), err = %d\n",
- ret, PCI_DMA_MASK);
- goto release_region;
- }
-
- ret = pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(PCI_DMA_MASK));
- if (ret) {
- cnss_pr_err("Failed to set PCI consistent DMA mask (%d), err = %d\n",
- ret, PCI_DMA_MASK);
+ ret = cnss_pci_set_dma_mask(pci_dev);
+ if (ret)
goto release_region;
- }
pci_set_master(pci_dev);
@@ -1137,14 +1194,22 @@ void cnss_pci_clear_dump_info(struct cnss_pci_data *pci_priv)
static void cnss_mhi_notify_status(enum MHI_CB_REASON reason, void *priv)
{
struct cnss_pci_data *pci_priv = priv;
- struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+ struct cnss_plat_data *plat_priv;
enum cnss_recovery_reason cnss_reason = CNSS_REASON_RDDM;
- if (!pci_priv)
+ if (!pci_priv) {
+ cnss_pr_err("pci_priv is NULL");
return;
+ }
+
+ plat_priv = pci_priv->plat_priv;
cnss_pr_dbg("MHI status cb is called with reason %d\n", reason);
+ if (plat_priv->driver_ops && plat_priv->driver_ops->update_status)
+ plat_priv->driver_ops->update_status(pci_priv->pci_dev,
+ CNSS_FW_DOWN);
+
set_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state);
del_timer(&plat_priv->fw_boot_timer);
@@ -1562,6 +1627,7 @@ static const struct pci_device_id cnss_pci_id_table[] = {
};
MODULE_DEVICE_TABLE(pci, cnss_pci_id_table);
+#ifdef CONFIG_PCI_MSM
static const struct dev_pm_ops cnss_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(cnss_pci_suspend, cnss_pci_resume)
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(cnss_pci_suspend_noirq,
@@ -1569,20 +1635,24 @@ static const struct dev_pm_ops cnss_pm_ops = {
SET_RUNTIME_PM_OPS(cnss_pci_runtime_suspend, cnss_pci_runtime_resume,
cnss_pci_runtime_idle)
};
+#endif
struct pci_driver cnss_pci_driver = {
.name = "cnss_pci",
.id_table = cnss_pci_id_table,
.probe = cnss_pci_probe,
.remove = cnss_pci_remove,
+#ifdef CONFIG_PCI_MSM
.driver = {
.pm = &cnss_pm_ops,
},
+#endif
};
-int cnss_pci_init(struct cnss_plat_data *plat_priv)
+#ifdef CONFIG_PCI_MSM
+static inline int cnss_msm_pcie_enumerate(struct cnss_plat_data *plat_priv)
{
- int ret = 0;
+ int ret;
struct device *dev = &plat_priv->plat_dev->dev;
u32 rc_num;
@@ -1599,6 +1669,25 @@ int cnss_pci_init(struct cnss_plat_data *plat_priv)
goto out;
}
+ return 0;
+out:
+ return ret;
+}
+#else /* CONFIG_PCI_MSM */
+static inline int cnss_msm_pcie_enumerate(struct cnss_plat_data *plat_priv)
+{
+ return 0;
+}
+#endif /* CONFIG_PCI_MSM */
+
+int cnss_pci_init(struct cnss_plat_data *plat_priv)
+{
+ int ret;
+
+ ret = cnss_msm_pcie_enumerate(plat_priv);
+ if (ret)
+ goto out;
+
ret = pci_register_driver(&cnss_pci_driver);
if (ret) {
cnss_pr_err("Failed to register to PCI framework, err = %d\n",
diff --git a/drivers/net/wireless/cnss2/pci.h b/drivers/net/wireless/cnss2/pci.h
index 89edc6020d35..a00ca61972f0 100644
--- a/drivers/net/wireless/cnss2/pci.h
+++ b/drivers/net/wireless/cnss2/pci.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -124,8 +124,21 @@ static inline int cnss_pci_get_auto_suspended(void *bus_priv)
return atomic_read(&pci_priv->auto_suspended);
}
+#ifdef CONFIG_PCI_MSM
int cnss_suspend_pci_link(struct cnss_pci_data *pci_priv);
int cnss_resume_pci_link(struct cnss_pci_data *pci_priv);
+#else /* CONFIG_PCI_MSM */
+static inline int cnss_suspend_pci_link(struct cnss_pci_data *pci_priv)
+{
+ return 0;
+}
+
+static inline int cnss_resume_pci_link(struct cnss_pci_data *pci_priv)
+{
+ return 0;
+}
+#endif /* CONFIG_PCI_MSM */
+
int cnss_pci_init(struct cnss_plat_data *plat_priv);
void cnss_pci_deinit(struct cnss_plat_data *plat_priv);
int cnss_pci_alloc_fw_mem(struct cnss_pci_data *pci_priv);
diff --git a/drivers/net/wireless/cnss2/power.c b/drivers/net/wireless/cnss2/power.c
index 8ed1507bde11..8a58a5357765 100644
--- a/drivers/net/wireless/cnss2/power.c
+++ b/drivers/net/wireless/cnss2/power.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -110,6 +110,7 @@ out:
return ret;
}
+#ifndef CONFIG_MSM_GVM_QUIN
static int cnss_vreg_on(struct cnss_plat_data *plat_priv)
{
int ret = 0;
@@ -229,6 +230,7 @@ static int cnss_vreg_off(struct cnss_plat_data *plat_priv)
return ret;
}
+#endif /* CONFIG_MSM_GVM_QUIN */
int cnss_get_pinctrl(struct cnss_plat_data *plat_priv)
{
@@ -285,6 +287,7 @@ out:
return ret;
}
+#ifndef CONFIG_MSM_GVM_QUIN
static int cnss_select_pinctrl_state(struct cnss_plat_data *plat_priv,
bool state)
{
@@ -368,6 +371,7 @@ void cnss_power_off_device(struct cnss_plat_data *plat_priv)
cnss_select_pinctrl_state(plat_priv, false);
cnss_vreg_off(plat_priv);
}
+#endif /* CONFIG_MSM_GVM_QUIN */
void cnss_set_pin_connect_status(struct cnss_plat_data *plat_priv)
{
diff --git a/drivers/net/wireless/cnss2/qmi.c b/drivers/net/wireless/cnss2/qmi.c
index f4344aee54ee..b8777c18d252 100644
--- a/drivers/net/wireless/cnss2/qmi.c
+++ b/drivers/net/wireless/cnss2/qmi.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -159,10 +159,9 @@ static int cnss_wlfw_host_cap_send_sync(struct cnss_plat_data *plat_priv)
memset(&req, 0, sizeof(req));
memset(&resp, 0, sizeof(resp));
- req.daemon_support_valid = 1;
- req.daemon_support = daemon_support;
-
- cnss_pr_dbg("daemon_support is %d\n", req.daemon_support);
+ req.num_clients_valid = 1;
+ req.num_clients = daemon_support ? 2 : 1;
+ cnss_pr_dbg("Number of clients is %d\n", req.num_clients);
req.wake_msi = cnss_get_wake_msi(plat_priv);
if (req.wake_msi) {
@@ -170,6 +169,19 @@ static int cnss_wlfw_host_cap_send_sync(struct cnss_plat_data *plat_priv)
req.wake_msi_valid = 1;
}
+ req.bdf_support_valid = 1;
+ req.bdf_support = 1;
+
+ req.m3_support_valid = 1;
+ req.m3_support = 1;
+
+ req.m3_cache_support_valid = 1;
+ req.m3_cache_support = 1;
+
+ req.cal_done_valid = 1;
+ req.cal_done = plat_priv->cal_done;
+ cnss_pr_dbg("Calibration done is %d\n", plat_priv->cal_done);
+
req_desc.max_msg_len = WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN;
req_desc.msg_id = QMI_WLFW_HOST_CAP_REQ_V01;
req_desc.ei_array = wlfw_host_cap_req_msg_v01_ei;
@@ -221,8 +233,8 @@ static int cnss_wlfw_ind_register_send_sync(struct cnss_plat_data *plat_priv)
req.request_mem_enable = 1;
req.fw_mem_ready_enable_valid = 1;
req.fw_mem_ready_enable = 1;
- req.cold_boot_cal_done_enable_valid = 1;
- req.cold_boot_cal_done_enable = 1;
+ req.fw_init_done_enable_valid = 1;
+ req.fw_init_done_enable = 1;
req.pin_connect_result_enable_valid = 1;
req.pin_connect_result_enable = 1;
@@ -260,27 +272,48 @@ static int cnss_wlfw_request_mem_ind_hdlr(struct cnss_plat_data *plat_priv,
void *msg, unsigned int msg_len)
{
struct msg_desc ind_desc;
- struct wlfw_request_mem_ind_msg_v01 ind_msg;
- struct cnss_fw_mem *fw_mem = &plat_priv->fw_mem;
- int ret = 0;
+ struct wlfw_request_mem_ind_msg_v01 *ind_msg;
+ int ret = 0, i;
+
+ ind_msg = kzalloc(sizeof(*ind_msg), GFP_KERNEL);
+ if (!ind_msg)
+ return -ENOMEM;
ind_desc.msg_id = QMI_WLFW_REQUEST_MEM_IND_V01;
ind_desc.max_msg_len = WLFW_REQUEST_MEM_IND_MSG_V01_MAX_MSG_LEN;
ind_desc.ei_array = wlfw_request_mem_ind_msg_v01_ei;
- ret = qmi_kernel_decode(&ind_desc, &ind_msg, msg, msg_len);
+ ret = qmi_kernel_decode(&ind_desc, ind_msg, msg, msg_len);
if (ret < 0) {
cnss_pr_err("Failed to decode request memory indication, msg_len: %u, err = %d\n",
ret, msg_len);
- return ret;
+ goto out;
}
- fw_mem->size = ind_msg.size;
+ if (ind_msg->mem_seg_len == 0 ||
+ ind_msg->mem_seg_len > QMI_WLFW_MAX_NUM_MEM_SEG_V01) {
+ cnss_pr_err("Invalid memory segment length: %u\n",
+ ind_msg->mem_seg_len);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ cnss_pr_dbg("FW memory segment count is %u\n", ind_msg->mem_seg_len);
+ plat_priv->fw_mem_seg_len = ind_msg->mem_seg_len;
+ for (i = 0; i < plat_priv->fw_mem_seg_len; i++) {
+ plat_priv->fw_mem[i].type = ind_msg->mem_seg[i].type;
+ plat_priv->fw_mem[i].size = ind_msg->mem_seg[i].size;
+ }
cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_REQUEST_MEM,
0, NULL);
+ kfree(ind_msg);
return 0;
+
+out:
+ kfree(ind_msg);
+ return ret;
}
static int cnss_qmi_pin_result_ind_hdlr(struct cnss_plat_data *plat_priv,
@@ -317,29 +350,46 @@ static int cnss_qmi_pin_result_ind_hdlr(struct cnss_plat_data *plat_priv,
int cnss_wlfw_respond_mem_send_sync(struct cnss_plat_data *plat_priv)
{
- struct wlfw_respond_mem_req_msg_v01 req;
- struct wlfw_respond_mem_resp_msg_v01 resp;
+ struct wlfw_respond_mem_req_msg_v01 *req;
+ struct wlfw_respond_mem_resp_msg_v01 *resp;
struct msg_desc req_desc, resp_desc;
- struct cnss_fw_mem *fw_mem = &plat_priv->fw_mem;
- int ret = 0;
+ struct cnss_fw_mem *fw_mem = plat_priv->fw_mem;
+ int ret = 0, i;
cnss_pr_dbg("Sending respond memory message, state: 0x%lx\n",
plat_priv->driver_state);
- if (!fw_mem->pa || !fw_mem->size) {
- cnss_pr_err("Memory for FW is not available!\n");
- ret = -ENOMEM;
- goto out;
- }
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+ if (!resp)
+ return -ENOMEM;
- cnss_pr_dbg("Memory for FW, va: 0x%pK, pa: %pa, size: 0x%zx\n",
- fw_mem->va, &fw_mem->pa, fw_mem->size);
+ req->mem_seg_len = plat_priv->fw_mem_seg_len;
+ for (i = 0; i < req->mem_seg_len; i++) {
+ if (!fw_mem[i].pa || !fw_mem[i].size) {
+ if (fw_mem[i].type == 0) {
+ cnss_pr_err("Invalid memory for FW type, segment = %d\n",
+ i);
+ ret = -EINVAL;
+ goto out;
+ }
+ cnss_pr_err("Memory for FW is not available for type: %u\n",
+ fw_mem[i].type);
+ ret = -ENOMEM;
+ goto out;
+ }
- memset(&req, 0, sizeof(req));
- memset(&resp, 0, sizeof(resp));
+ cnss_pr_dbg("Memory for FW, va: 0x%pK, pa: %pa, size: 0x%zx, type: %u\n",
+ fw_mem[i].va, &fw_mem[i].pa,
+ fw_mem[i].size, fw_mem[i].type);
- req.addr = fw_mem->pa;
- req.size = fw_mem->size;
+ req->mem_seg[i].addr = fw_mem[i].pa;
+ req->mem_seg[i].size = fw_mem[i].size;
+ req->mem_seg[i].type = fw_mem[i].type;
+ }
req_desc.max_msg_len = WLFW_RESPOND_MEM_REQ_MSG_V01_MAX_MSG_LEN;
req_desc.msg_id = QMI_WLFW_RESPOND_MEM_REQ_V01;
@@ -349,8 +399,8 @@ int cnss_wlfw_respond_mem_send_sync(struct cnss_plat_data *plat_priv)
resp_desc.msg_id = QMI_WLFW_RESPOND_MEM_RESP_V01;
resp_desc.ei_array = wlfw_respond_mem_resp_msg_v01_ei;
- ret = qmi_send_req_wait(plat_priv->qmi_wlfw_clnt, &req_desc, &req,
- sizeof(req), &resp_desc, &resp, sizeof(resp),
+ ret = qmi_send_req_wait(plat_priv->qmi_wlfw_clnt, &req_desc, req,
+ sizeof(*req), &resp_desc, resp, sizeof(*resp),
QMI_WLFW_TIMEOUT_MS);
if (ret < 0) {
cnss_pr_err("Failed to send respond memory request, err = %d\n",
@@ -358,16 +408,21 @@ int cnss_wlfw_respond_mem_send_sync(struct cnss_plat_data *plat_priv)
goto out;
}
- if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
cnss_pr_err("Respond memory request failed, result: %d, err: %d\n",
- resp.resp.result, resp.resp.error);
- ret = resp.resp.result;
+ resp->resp.result, resp->resp.error);
+ ret = resp->resp.result;
goto out;
}
+ kfree(req);
+ kfree(resp);
return 0;
+
out:
CNSS_ASSERT(0);
+ kfree(req);
+ kfree(resp);
return ret;
}
@@ -908,12 +963,12 @@ static void cnss_wlfw_clnt_ind(struct qmi_handle *handle,
CNSS_DRIVER_EVENT_FW_MEM_READY,
0, NULL);
break;
- case QMI_WLFW_COLD_BOOT_CAL_DONE_IND_V01:
+ case QMI_WLFW_FW_READY_IND_V01:
cnss_driver_event_post(plat_priv,
CNSS_DRIVER_EVENT_COLD_BOOT_CAL_DONE,
0, NULL);
break;
- case QMI_WLFW_FW_READY_IND_V01:
+ case QMI_WLFW_FW_INIT_DONE_IND_V01:
cnss_driver_event_post(plat_priv,
CNSS_DRIVER_EVENT_FW_READY,
0, NULL);
@@ -974,11 +1029,11 @@ int cnss_wlfw_server_arrive(struct cnss_plat_data *plat_priv)
cnss_pr_info("QMI WLFW service connected, state: 0x%lx\n",
plat_priv->driver_state);
- ret = cnss_wlfw_host_cap_send_sync(plat_priv);
+ ret = cnss_wlfw_ind_register_send_sync(plat_priv);
if (ret < 0)
goto out;
- ret = cnss_wlfw_ind_register_send_sync(plat_priv);
+ ret = cnss_wlfw_host_cap_send_sync(plat_priv);
if (ret < 0)
goto out;
diff --git a/drivers/net/wireless/cnss2/wlan_firmware_service_v01.c b/drivers/net/wireless/cnss2/wlan_firmware_service_v01.c
index 7d6a771bc0d5..bbf707b869bd 100644
--- a/drivers/net/wireless/cnss2/wlan_firmware_service_v01.c
+++ b/drivers/net/wireless/cnss2/wlan_firmware_service_v01.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -62,7 +62,7 @@ static struct elem_info wlfw_ce_tgt_pipe_cfg_s_v01_ei[] = {
{
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
- .is_array = QMI_COMMON_TLV_TYPE,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
},
};
@@ -97,7 +97,7 @@ static struct elem_info wlfw_ce_svc_pipe_cfg_s_v01_ei[] = {
{
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
- .is_array = QMI_COMMON_TLV_TYPE,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
},
};
@@ -123,7 +123,7 @@ static struct elem_info wlfw_shadow_reg_cfg_s_v01_ei[] = {
{
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
- .is_array = QMI_COMMON_TLV_TYPE,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
},
};
@@ -140,7 +140,7 @@ static struct elem_info wlfw_shadow_reg_v2_cfg_s_v01_ei[] = {
{
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
- .is_array = QMI_COMMON_TLV_TYPE,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
},
};
@@ -175,7 +175,131 @@ static struct elem_info wlfw_memory_region_info_s_v01_ei[] = {
{
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
- .is_array = QMI_COMMON_TLV_TYPE,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct elem_info wlfw_mem_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_mem_cfg_s_v01,
+ offset),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_mem_cfg_s_v01,
+ size),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_mem_cfg_s_v01,
+ secure_flag),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct elem_info wlfw_mem_seg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_mem_seg_s_v01,
+ size),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_mem_type_enum_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_mem_seg_s_v01,
+ type),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_mem_seg_s_v01,
+ mem_cfg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLFW_MAX_NUM_MEM_CFG_V01,
+ .elem_size = sizeof(struct wlfw_mem_cfg_s_v01),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_mem_seg_s_v01,
+ mem_cfg),
+ .ei_array = wlfw_mem_cfg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct elem_info wlfw_mem_seg_resp_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_mem_seg_resp_s_v01,
+ addr),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_mem_seg_resp_s_v01,
+ size),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_mem_type_enum_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_mem_seg_resp_s_v01,
+ type),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_mem_seg_resp_s_v01,
+ restore),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
},
};
@@ -201,7 +325,7 @@ static struct elem_info wlfw_rf_chip_info_s_v01_ei[] = {
{
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
- .is_array = QMI_COMMON_TLV_TYPE,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
},
};
@@ -218,7 +342,7 @@ static struct elem_info wlfw_rf_board_info_s_v01_ei[] = {
{
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
- .is_array = QMI_COMMON_TLV_TYPE,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
},
};
@@ -235,7 +359,7 @@ static struct elem_info wlfw_soc_info_s_v01_ei[] = {
{
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
- .is_array = QMI_COMMON_TLV_TYPE,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
},
};
@@ -261,7 +385,7 @@ static struct elem_info wlfw_fw_version_info_s_v01_ei[] = {
{
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
- .is_array = QMI_COMMON_TLV_TYPE,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
},
};
@@ -418,7 +542,7 @@ struct elem_info wlfw_ind_register_req_msg_v01_ei[] = {
.is_array = NO_ARRAY,
.tlv_type = 0x18,
.offset = offsetof(struct wlfw_ind_register_req_msg_v01,
- cold_boot_cal_done_enable_valid),
+ fw_init_done_enable_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
@@ -427,7 +551,7 @@ struct elem_info wlfw_ind_register_req_msg_v01_ei[] = {
.is_array = NO_ARRAY,
.tlv_type = 0x18,
.offset = offsetof(struct wlfw_ind_register_req_msg_v01,
- cold_boot_cal_done_enable),
+ fw_init_done_enable),
},
{
.data_type = QMI_OPT_FLAG,
@@ -448,9 +572,45 @@ struct elem_info wlfw_ind_register_req_msg_v01_ei[] = {
rejuvenate_enable),
},
{
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x1A,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ xo_cal_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x1A,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ xo_cal_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x1B,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ cal_done_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x1B,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ cal_done_enable),
+ },
+ {
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
- .is_array = QMI_COMMON_TLV_TYPE,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
},
};
@@ -489,7 +649,7 @@ struct elem_info wlfw_ind_register_resp_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
- .is_array = QMI_COMMON_TLV_TYPE,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
},
};
@@ -497,7 +657,7 @@ struct elem_info wlfw_fw_ready_ind_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
- .is_array = QMI_COMMON_TLV_TYPE,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
},
};
@@ -505,7 +665,7 @@ struct elem_info wlfw_msa_ready_ind_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
- .is_array = QMI_COMMON_TLV_TYPE,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
},
};
@@ -573,7 +733,7 @@ struct elem_info wlfw_pin_connect_result_ind_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
- .is_array = QMI_COMMON_TLV_TYPE,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
},
};
@@ -608,7 +768,7 @@ struct elem_info wlfw_wlan_mode_req_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
- .is_array = QMI_COMMON_TLV_TYPE,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
},
};
@@ -626,7 +786,7 @@ struct elem_info wlfw_wlan_mode_resp_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
- .is_array = QMI_COMMON_TLV_TYPE,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
},
};
@@ -764,7 +924,7 @@ struct elem_info wlfw_wlan_cfg_req_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
- .is_array = QMI_COMMON_TLV_TYPE,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
},
};
@@ -782,7 +942,7 @@ struct elem_info wlfw_wlan_cfg_resp_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
- .is_array = QMI_COMMON_TLV_TYPE,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
},
};
@@ -790,7 +950,7 @@ struct elem_info wlfw_cap_req_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
- .is_array = QMI_COMMON_TLV_TYPE,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
},
};
@@ -920,7 +1080,7 @@ struct elem_info wlfw_cap_resp_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
- .is_array = QMI_COMMON_TLV_TYPE,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
},
};
@@ -1054,7 +1214,7 @@ struct elem_info wlfw_bdf_download_req_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
- .is_array = QMI_COMMON_TLV_TYPE,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
},
};
@@ -1073,7 +1233,7 @@ struct elem_info wlfw_bdf_download_resp_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
- .is_array = QMI_COMMON_TLV_TYPE,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
},
};
@@ -1117,7 +1277,7 @@ struct elem_info wlfw_cal_report_req_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
- .is_array = QMI_COMMON_TLV_TYPE,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
},
};
@@ -1135,7 +1295,7 @@ struct elem_info wlfw_cal_report_resp_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
- .is_array = QMI_COMMON_TLV_TYPE,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
},
};
@@ -1153,7 +1313,7 @@ struct elem_info wlfw_initiate_cal_download_ind_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
- .is_array = QMI_COMMON_TLV_TYPE,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
},
};
@@ -1269,7 +1429,7 @@ struct elem_info wlfw_cal_download_req_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
- .is_array = QMI_COMMON_TLV_TYPE,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
},
};
@@ -1288,7 +1448,7 @@ struct elem_info wlfw_cal_download_resp_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
- .is_array = QMI_COMMON_TLV_TYPE,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
},
};
@@ -1316,7 +1476,7 @@ struct elem_info wlfw_initiate_cal_update_ind_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
- .is_array = QMI_COMMON_TLV_TYPE,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
},
};
@@ -1342,7 +1502,7 @@ struct elem_info wlfw_cal_update_req_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
- .is_array = QMI_COMMON_TLV_TYPE,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
},
};
@@ -1459,7 +1619,7 @@ struct elem_info wlfw_cal_update_resp_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
- .is_array = QMI_COMMON_TLV_TYPE,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
},
};
@@ -1485,7 +1645,7 @@ struct elem_info wlfw_msa_info_req_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
- .is_array = QMI_COMMON_TLV_TYPE,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
},
};
@@ -1522,7 +1682,7 @@ struct elem_info wlfw_msa_info_resp_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
- .is_array = QMI_COMMON_TLV_TYPE,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
},
};
@@ -1530,7 +1690,7 @@ struct elem_info wlfw_msa_ready_req_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
- .is_array = QMI_COMMON_TLV_TYPE,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
},
};
@@ -1548,7 +1708,7 @@ struct elem_info wlfw_msa_ready_resp_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
- .is_array = QMI_COMMON_TLV_TYPE,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
},
};
@@ -1574,7 +1734,7 @@ struct elem_info wlfw_ini_req_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
- .is_array = QMI_COMMON_TLV_TYPE,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
},
};
@@ -1592,7 +1752,7 @@ struct elem_info wlfw_ini_resp_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
- .is_array = QMI_COMMON_TLV_TYPE,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
},
};
@@ -1627,7 +1787,7 @@ struct elem_info wlfw_athdiag_read_req_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
- .is_array = QMI_COMMON_TLV_TYPE,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
},
};
@@ -1676,7 +1836,7 @@ struct elem_info wlfw_athdiag_read_resp_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
- .is_array = QMI_COMMON_TLV_TYPE,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
},
};
@@ -1724,7 +1884,7 @@ struct elem_info wlfw_athdiag_write_req_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
- .is_array = QMI_COMMON_TLV_TYPE,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
},
};
@@ -1743,7 +1903,7 @@ struct elem_info wlfw_athdiag_write_resp_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
- .is_array = QMI_COMMON_TLV_TYPE,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
},
};
@@ -1760,7 +1920,7 @@ struct elem_info wlfw_vbatt_req_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
- .is_array = QMI_COMMON_TLV_TYPE,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
},
};
@@ -1778,7 +1938,7 @@ struct elem_info wlfw_vbatt_resp_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
- .is_array = QMI_COMMON_TLV_TYPE,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
},
};
@@ -1804,7 +1964,7 @@ struct elem_info wlfw_mac_addr_req_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
- .is_array = QMI_COMMON_TLV_TYPE,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
},
};
@@ -1822,7 +1982,7 @@ struct elem_info wlfw_mac_addr_resp_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
- .is_array = QMI_COMMON_TLV_TYPE,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
},
};
@@ -1834,16 +1994,16 @@ struct elem_info wlfw_host_cap_req_msg_v01_ei[] = {
.is_array = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct wlfw_host_cap_req_msg_v01,
- daemon_support_valid),
+ num_clients_valid),
},
{
- .data_type = QMI_UNSIGNED_1_BYTE,
+ .data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
- .elem_size = sizeof(u8),
+ .elem_size = sizeof(u32),
.is_array = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct wlfw_host_cap_req_msg_v01,
- daemon_support),
+ num_clients),
},
{
.data_type = QMI_OPT_FLAG,
@@ -1864,9 +2024,216 @@ struct elem_info wlfw_host_cap_req_msg_v01_ei[] = {
wake_msi),
},
{
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ gpios_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ gpios_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = QMI_WLFW_MAX_NUM_GPIO_V01,
+ .elem_size = sizeof(u32),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ gpios),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ nm_modem_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ nm_modem),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ bdf_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ bdf_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ bdf_cache_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ bdf_cache_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ m3_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ m3_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ m3_cache_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ m3_cache_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ cal_filesys_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ cal_filesys_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ cal_cache_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ cal_cache_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x1A,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ cal_done_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x1A,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ cal_done),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x1B,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ mem_bucket_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x1B,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ mem_bucket),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x1C,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ mem_cfg_mode_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x1C,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ mem_cfg_mode),
+ },
+ {
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
- .is_array = QMI_COMMON_TLV_TYPE,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
},
};
@@ -1884,50 +2251,61 @@ struct elem_info wlfw_host_cap_resp_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
- .is_array = QMI_COMMON_TLV_TYPE,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
},
};
struct elem_info wlfw_request_mem_ind_msg_v01_ei[] = {
{
- .data_type = QMI_UNSIGNED_4_BYTE,
+ .data_type = QMI_DATA_LEN,
.elem_len = 1,
- .elem_size = sizeof(u32),
+ .elem_size = sizeof(u8),
.is_array = NO_ARRAY,
.tlv_type = 0x01,
.offset = offsetof(struct wlfw_request_mem_ind_msg_v01,
- size),
+ mem_seg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLFW_MAX_NUM_MEM_SEG_V01,
+ .elem_size = sizeof(struct wlfw_mem_seg_s_v01),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_request_mem_ind_msg_v01,
+ mem_seg),
+ .ei_array = wlfw_mem_seg_s_v01_ei,
},
{
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
- .is_array = QMI_COMMON_TLV_TYPE,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
},
};
struct elem_info wlfw_respond_mem_req_msg_v01_ei[] = {
{
- .data_type = QMI_UNSIGNED_8_BYTE,
+ .data_type = QMI_DATA_LEN,
.elem_len = 1,
- .elem_size = sizeof(u64),
+ .elem_size = sizeof(u8),
.is_array = NO_ARRAY,
.tlv_type = 0x01,
.offset = offsetof(struct wlfw_respond_mem_req_msg_v01,
- addr),
+ mem_seg_len),
},
{
- .data_type = QMI_UNSIGNED_4_BYTE,
- .elem_len = 1,
- .elem_size = sizeof(u32),
- .is_array = NO_ARRAY,
- .tlv_type = 0x02,
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLFW_MAX_NUM_MEM_SEG_V01,
+ .elem_size = sizeof(struct wlfw_mem_seg_resp_s_v01),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x01,
.offset = offsetof(struct wlfw_respond_mem_req_msg_v01,
- size),
+ mem_seg),
+ .ei_array = wlfw_mem_seg_resp_s_v01_ei,
},
{
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
- .is_array = QMI_COMMON_TLV_TYPE,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
},
};
@@ -1945,7 +2323,7 @@ struct elem_info wlfw_respond_mem_resp_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
- .is_array = QMI_COMMON_TLV_TYPE,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
},
};
@@ -1953,15 +2331,15 @@ struct elem_info wlfw_fw_mem_ready_ind_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
- .is_array = QMI_COMMON_TLV_TYPE,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
},
};
-struct elem_info wlfw_cold_boot_cal_done_ind_msg_v01_ei[] = {
+struct elem_info wlfw_fw_init_done_ind_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
- .is_array = QMI_COMMON_TLV_TYPE,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
},
};
@@ -2041,7 +2419,7 @@ struct elem_info wlfw_rejuvenate_ind_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
- .is_array = QMI_COMMON_TLV_TYPE,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
},
};
@@ -2049,7 +2427,7 @@ struct elem_info wlfw_rejuvenate_ack_req_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
- .is_array = QMI_COMMON_TLV_TYPE,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
},
};
@@ -2068,7 +2446,7 @@ struct elem_info wlfw_rejuvenate_ack_resp_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
- .is_array = QMI_COMMON_TLV_TYPE,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
},
};
@@ -2096,7 +2474,7 @@ struct elem_info wlfw_dynamic_feature_mask_req_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
- .is_array = QMI_COMMON_TLV_TYPE,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
},
};
@@ -2155,7 +2533,7 @@ struct elem_info wlfw_dynamic_feature_mask_resp_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
- .is_array = QMI_COMMON_TLV_TYPE,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
},
};
@@ -2181,7 +2559,7 @@ struct elem_info wlfw_m3_info_req_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
- .is_array = QMI_COMMON_TLV_TYPE,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
},
};
@@ -2199,7 +2577,7 @@ struct elem_info wlfw_m3_info_resp_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
- .is_array = QMI_COMMON_TLV_TYPE,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
},
};
@@ -2216,6 +2594,14 @@ struct elem_info wlfw_xo_cal_ind_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
- .is_array = QMI_COMMON_TLV_TYPE,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_cal_done_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
},
};
diff --git a/drivers/net/wireless/cnss2/wlan_firmware_service_v01.h b/drivers/net/wireless/cnss2/wlan_firmware_service_v01.h
index 9b56eb0c02fb..00a873d11d14 100644
--- a/drivers/net/wireless/cnss2/wlan_firmware_service_v01.h
+++ b/drivers/net/wireless/cnss2/wlan_firmware_service_v01.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -23,10 +23,12 @@
#define QMI_WLFW_BDF_DOWNLOAD_REQ_V01 0x0025
#define QMI_WLFW_FW_MEM_READY_IND_V01 0x0037
#define QMI_WLFW_INITIATE_CAL_UPDATE_IND_V01 0x002A
+#define QMI_WLFW_CAL_DONE_IND_V01 0x003E
#define QMI_WLFW_HOST_CAP_REQ_V01 0x0034
#define QMI_WLFW_DYNAMIC_FEATURE_MASK_RESP_V01 0x003B
#define QMI_WLFW_M3_INFO_REQ_V01 0x003C
#define QMI_WLFW_CAP_REQ_V01 0x0024
+#define QMI_WLFW_FW_INIT_DONE_IND_V01 0x0038
#define QMI_WLFW_CAL_REPORT_REQ_V01 0x0026
#define QMI_WLFW_M3_INFO_RESP_V01 0x003C
#define QMI_WLFW_CAL_UPDATE_RESP_V01 0x0029
@@ -42,7 +44,6 @@
#define QMI_WLFW_WLAN_MODE_REQ_V01 0x0022
#define QMI_WLFW_IND_REGISTER_REQ_V01 0x0020
#define QMI_WLFW_WLAN_CFG_RESP_V01 0x0023
-#define QMI_WLFW_COLD_BOOT_CAL_DONE_IND_V01 0x0038
#define QMI_WLFW_REQUEST_MEM_IND_V01 0x0035
#define QMI_WLFW_REJUVENATE_IND_V01 0x0039
#define QMI_WLFW_DYNAMIC_FEATURE_MASK_REQ_V01 0x003B
@@ -72,13 +73,16 @@
#define QMI_WLFW_IND_REGISTER_RESP_V01 0x0020
#define QMI_WLFW_MAX_NUM_MEMORY_REGIONS_V01 2
+#define QMI_WLFW_MAX_NUM_MEM_SEG_V01 32
#define QMI_WLFW_MAX_NUM_CAL_V01 5
#define QMI_WLFW_MAX_DATA_SIZE_V01 6144
#define QMI_WLFW_FUNCTION_NAME_LEN_V01 128
#define QMI_WLFW_MAX_NUM_CE_V01 12
#define QMI_WLFW_MAX_TIMESTAMP_LEN_V01 32
#define QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01 6144
+#define QMI_WLFW_MAX_NUM_GPIO_V01 32
#define QMI_WLFW_MAX_BUILD_ID_LEN_V01 128
+#define QMI_WLFW_MAX_NUM_MEM_CFG_V01 2
#define QMI_WLFW_MAX_STR_LEN_V01 16
#define QMI_WLFW_MAX_NUM_SHADOW_REG_V01 24
#define QMI_WLFW_MAC_ADDR_SIZE_V01 6
@@ -117,6 +121,17 @@ enum wlfw_pipedir_enum_v01 {
WLFW_PIPEDIR_ENUM_MAX_VAL_V01 = INT_MAX,
};
+enum wlfw_mem_type_enum_v01 {
+ WLFW_MEM_TYPE_ENUM_MIN_VAL_V01 = INT_MIN,
+ QMI_WLFW_MEM_TYPE_MSA_V01 = 0,
+ QMI_WLFW_MEM_TYPE_DDR_V01 = 1,
+ QMI_WLFW_MEM_BDF_V01 = 2,
+ QMI_WLFW_MEM_M3_V01 = 3,
+ QMI_WLFW_MEM_CAL_V01 = 4,
+ QMI_WLFW_MEM_DPD_V01 = 5,
+ WLFW_MEM_TYPE_ENUM_MAX_VAL_V01 = INT_MAX,
+};
+
#define QMI_WLFW_CE_ATTR_FLAGS_V01 ((u32)0x00)
#define QMI_WLFW_CE_ATTR_NO_SNOOP_V01 ((u32)0x01)
#define QMI_WLFW_CE_ATTR_BYTE_SWAP_DATA_V01 ((u32)0x02)
@@ -128,6 +143,7 @@ enum wlfw_pipedir_enum_v01 {
#define QMI_WLFW_FW_READY_V01 ((u64)0x02ULL)
#define QMI_WLFW_MSA_READY_V01 ((u64)0x04ULL)
#define QMI_WLFW_FW_MEM_READY_V01 ((u64)0x08ULL)
+#define QMI_WLFW_FW_INIT_DONE_V01 ((u64)0x10ULL)
#define QMI_WLFW_FW_REJUVENATE_V01 ((u64)0x01ULL)
@@ -160,6 +176,26 @@ struct wlfw_memory_region_info_s_v01 {
u8 secure_flag;
};
+struct wlfw_mem_cfg_s_v01 {
+ u64 offset;
+ u32 size;
+ u8 secure_flag;
+};
+
+struct wlfw_mem_seg_s_v01 {
+ u32 size;
+ enum wlfw_mem_type_enum_v01 type;
+ u32 mem_cfg_len;
+ struct wlfw_mem_cfg_s_v01 mem_cfg[QMI_WLFW_MAX_NUM_MEM_CFG_V01];
+};
+
+struct wlfw_mem_seg_resp_s_v01 {
+ u64 addr;
+ u32 size;
+ enum wlfw_mem_type_enum_v01 type;
+ u8 restore;
+};
+
struct wlfw_rf_chip_info_s_v01 {
u32 chip_id;
u32 chip_family;
@@ -195,13 +231,17 @@ struct wlfw_ind_register_req_msg_v01 {
u8 request_mem_enable;
u8 fw_mem_ready_enable_valid;
u8 fw_mem_ready_enable;
- u8 cold_boot_cal_done_enable_valid;
- u8 cold_boot_cal_done_enable;
+ u8 fw_init_done_enable_valid;
+ u8 fw_init_done_enable;
u8 rejuvenate_enable_valid;
u32 rejuvenate_enable;
+ u8 xo_cal_enable_valid;
+ u8 xo_cal_enable;
+ u8 cal_done_enable_valid;
+ u8 cal_done_enable;
};
-#define WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN 46
+#define WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN 54
extern struct elem_info wlfw_ind_register_req_msg_v01_ei[];
struct wlfw_ind_register_resp_msg_v01 {
@@ -533,13 +573,36 @@ struct wlfw_mac_addr_resp_msg_v01 {
extern struct elem_info wlfw_mac_addr_resp_msg_v01_ei[];
struct wlfw_host_cap_req_msg_v01 {
- u8 daemon_support_valid;
- u8 daemon_support;
+ u8 num_clients_valid;
+ u32 num_clients;
u8 wake_msi_valid;
u32 wake_msi;
-};
-
-#define WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN 11
+ u8 gpios_valid;
+ u32 gpios_len;
+ u32 gpios[QMI_WLFW_MAX_NUM_GPIO_V01];
+ u8 nm_modem_valid;
+ u8 nm_modem;
+ u8 bdf_support_valid;
+ u8 bdf_support;
+ u8 bdf_cache_support_valid;
+ u8 bdf_cache_support;
+ u8 m3_support_valid;
+ u8 m3_support;
+ u8 m3_cache_support_valid;
+ u8 m3_cache_support;
+ u8 cal_filesys_support_valid;
+ u8 cal_filesys_support;
+ u8 cal_cache_support_valid;
+ u8 cal_cache_support;
+ u8 cal_done_valid;
+ u8 cal_done;
+ u8 mem_bucket_valid;
+ u32 mem_bucket;
+ u8 mem_cfg_mode_valid;
+ u8 mem_cfg_mode;
+};
+
+#define WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN 189
extern struct elem_info wlfw_host_cap_req_msg_v01_ei[];
struct wlfw_host_cap_resp_msg_v01 {
@@ -550,18 +613,19 @@ struct wlfw_host_cap_resp_msg_v01 {
extern struct elem_info wlfw_host_cap_resp_msg_v01_ei[];
struct wlfw_request_mem_ind_msg_v01 {
- u32 size;
+ u32 mem_seg_len;
+ struct wlfw_mem_seg_s_v01 mem_seg[QMI_WLFW_MAX_NUM_MEM_SEG_V01];
};
-#define WLFW_REQUEST_MEM_IND_MSG_V01_MAX_MSG_LEN 7
+#define WLFW_REQUEST_MEM_IND_MSG_V01_MAX_MSG_LEN 1124
extern struct elem_info wlfw_request_mem_ind_msg_v01_ei[];
struct wlfw_respond_mem_req_msg_v01 {
- u64 addr;
- u32 size;
+ u32 mem_seg_len;
+ struct wlfw_mem_seg_resp_s_v01 mem_seg[QMI_WLFW_MAX_NUM_MEM_SEG_V01];
};
-#define WLFW_RESPOND_MEM_REQ_MSG_V01_MAX_MSG_LEN 18
+#define WLFW_RESPOND_MEM_REQ_MSG_V01_MAX_MSG_LEN 548
extern struct elem_info wlfw_respond_mem_req_msg_v01_ei[];
struct wlfw_respond_mem_resp_msg_v01 {
@@ -578,12 +642,12 @@ struct wlfw_fw_mem_ready_ind_msg_v01 {
#define WLFW_FW_MEM_READY_IND_MSG_V01_MAX_MSG_LEN 0
extern struct elem_info wlfw_fw_mem_ready_ind_msg_v01_ei[];
-struct wlfw_cold_boot_cal_done_ind_msg_v01 {
+struct wlfw_fw_init_done_ind_msg_v01 {
char placeholder;
};
-#define WLFW_COLD_BOOT_CAL_DONE_IND_MSG_V01_MAX_MSG_LEN 0
-extern struct elem_info wlfw_cold_boot_cal_done_ind_msg_v01_ei[];
+#define WLFW_FW_INIT_DONE_IND_MSG_V01_MAX_MSG_LEN 0
+extern struct elem_info wlfw_fw_init_done_ind_msg_v01_ei[];
struct wlfw_rejuvenate_ind_msg_v01 {
u8 cause_for_rejuvenation_valid;
@@ -654,4 +718,11 @@ struct wlfw_xo_cal_ind_msg_v01 {
#define WLFW_XO_CAL_IND_MSG_V01_MAX_MSG_LEN 4
extern struct elem_info wlfw_xo_cal_ind_msg_v01_ei[];
+struct wlfw_cal_done_ind_msg_v01 {
+ char placeholder;
+};
+
+#define WLFW_CAL_DONE_IND_MSG_V01_MAX_MSG_LEN 0
+extern struct elem_info wlfw_cal_done_ind_msg_v01_ei[];
+
#endif
diff --git a/drivers/net/wireless/cw1200/cw1200_spi.c b/drivers/net/wireless/cw1200/cw1200_spi.c
index a740083634d8..63f95e9c2992 100644
--- a/drivers/net/wireless/cw1200/cw1200_spi.c
+++ b/drivers/net/wireless/cw1200/cw1200_spi.c
@@ -446,8 +446,7 @@ static int cw1200_spi_disconnect(struct spi_device *func)
return 0;
}
-#ifdef CONFIG_PM
-static int cw1200_spi_suspend(struct device *dev)
+static int __maybe_unused cw1200_spi_suspend(struct device *dev)
{
struct hwbus_priv *self = spi_get_drvdata(to_spi_device(dev));
@@ -460,16 +459,12 @@ static int cw1200_spi_suspend(struct device *dev)
static SIMPLE_DEV_PM_OPS(cw1200_pm_ops, cw1200_spi_suspend, NULL);
-#endif
-
static struct spi_driver spi_driver = {
.probe = cw1200_spi_probe,
.remove = cw1200_spi_disconnect,
.driver = {
.name = "cw1200_wlan_spi",
-#ifdef CONFIG_PM
- .pm = &cw1200_pm_ops,
-#endif
+ .pm = IS_ENABLED(CONFIG_PM) ? &cw1200_pm_ops : NULL,
},
};
diff --git a/drivers/net/wireless/cw1200/pm.h b/drivers/net/wireless/cw1200/pm.h
index 3ed90ff22bb8..534548470ebc 100644
--- a/drivers/net/wireless/cw1200/pm.h
+++ b/drivers/net/wireless/cw1200/pm.h
@@ -31,13 +31,18 @@ int cw1200_pm_init(struct cw1200_pm_state *pm,
void cw1200_pm_deinit(struct cw1200_pm_state *pm);
int cw1200_wow_suspend(struct ieee80211_hw *hw,
struct cfg80211_wowlan *wowlan);
-int cw1200_wow_resume(struct ieee80211_hw *hw);
int cw1200_can_suspend(struct cw1200_common *priv);
+int cw1200_wow_resume(struct ieee80211_hw *hw);
void cw1200_pm_stay_awake(struct cw1200_pm_state *pm,
unsigned long tmo);
#else
static inline void cw1200_pm_stay_awake(struct cw1200_pm_state *pm,
- unsigned long tmo) {
+ unsigned long tmo)
+{
+}
+static inline int cw1200_can_suspend(struct cw1200_common *priv)
+{
+ return 0;
}
#endif
#endif
diff --git a/drivers/net/wireless/cw1200/wsm.c b/drivers/net/wireless/cw1200/wsm.c
index 9e0ca3048657..3dd46c78c1cc 100644
--- a/drivers/net/wireless/cw1200/wsm.c
+++ b/drivers/net/wireless/cw1200/wsm.c
@@ -379,7 +379,6 @@ static int wsm_multi_tx_confirm(struct cw1200_common *priv,
{
int ret;
int count;
- int i;
count = WSM_GET32(buf);
if (WARN_ON(count <= 0))
@@ -395,11 +394,10 @@ static int wsm_multi_tx_confirm(struct cw1200_common *priv,
}
cw1200_debug_txed_multi(priv, count);
- for (i = 0; i < count; ++i) {
+ do {
ret = wsm_tx_confirm(priv, buf, link_id);
- if (ret)
- return ret;
- }
+ } while (!ret && --count);
+
return ret;
underflow:
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index f877fbc7d7af..8a9164da6c50 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -699,16 +699,21 @@ static int hwsim_fops_ps_write(void *dat, u64 val)
val != PS_MANUAL_POLL)
return -EINVAL;
- old_ps = data->ps;
- data->ps = val;
-
- local_bh_disable();
if (val == PS_MANUAL_POLL) {
+ if (data->ps != PS_ENABLED)
+ return -EINVAL;
+ local_bh_disable();
ieee80211_iterate_active_interfaces_atomic(
data->hw, IEEE80211_IFACE_ITER_NORMAL,
hwsim_send_ps_poll, data);
- data->ps_poll_pending = true;
- } else if (old_ps == PS_DISABLED && val != PS_DISABLED) {
+ local_bh_enable();
+ return 0;
+ }
+ old_ps = data->ps;
+ data->ps = val;
+
+ local_bh_disable();
+ if (old_ps == PS_DISABLED && val != PS_DISABLED) {
ieee80211_iterate_active_interfaces_atomic(
data->hw, IEEE80211_IFACE_ITER_NORMAL,
hwsim_send_nullfunc_ps, data);
diff --git a/drivers/net/wireless/mediatek/mt7601u/mcu.c b/drivers/net/wireless/mediatek/mt7601u/mcu.c
index fbb1986eda3c..686b1b5dd394 100644
--- a/drivers/net/wireless/mediatek/mt7601u/mcu.c
+++ b/drivers/net/wireless/mediatek/mt7601u/mcu.c
@@ -66,8 +66,10 @@ mt7601u_mcu_msg_alloc(struct mt7601u_dev *dev, const void *data, int len)
WARN_ON(len % 4); /* if length is not divisible by 4 we need to pad */
skb = alloc_skb(len + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
- skb_reserve(skb, MT_DMA_HDR_LEN);
- memcpy(skb_put(skb, len), data, len);
+ if (skb) {
+ skb_reserve(skb, MT_DMA_HDR_LEN);
+ memcpy(skb_put(skb, len), data, len);
+ }
return skb;
}
@@ -170,6 +172,8 @@ static int mt7601u_mcu_function_select(struct mt7601u_dev *dev,
};
skb = mt7601u_mcu_msg_alloc(dev, &msg, sizeof(msg));
+ if (!skb)
+ return -ENOMEM;
return mt7601u_mcu_msg_send(dev, skb, CMD_FUN_SET_OP, func == 5);
}
@@ -205,6 +209,8 @@ mt7601u_mcu_calibrate(struct mt7601u_dev *dev, enum mcu_calibrate cal, u32 val)
};
skb = mt7601u_mcu_msg_alloc(dev, &msg, sizeof(msg));
+ if (!skb)
+ return -ENOMEM;
return mt7601u_mcu_msg_send(dev, skb, CMD_CALIBRATION_OP, true);
}
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 0881ba8535f4..c78abfc7bd96 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -247,7 +247,10 @@ static const UCHAR b4_default_startup_parms[] = {
0x04, 0x08, /* Noise gain, limit offset */
0x28, 0x28, /* det rssi, med busy offsets */
7, /* det sync thresh */
- 0, 2, 2 /* test mode, min, max */
+ 0, 2, 2, /* test mode, min, max */
+ 0, /* rx/tx delay */
+ 0, 0, 0, 0, 0, 0, /* current BSS id */
+ 0 /* hop set */
};
/*===========================================================================*/
@@ -598,7 +601,7 @@ static void init_startup_params(ray_dev_t *local)
* a_beacon_period = hops a_beacon_period = KuS
*//* 64ms = 010000 */
if (local->fw_ver == 0x55) {
- memcpy((UCHAR *) &local->sparm.b4, b4_default_startup_parms,
+ memcpy(&local->sparm.b4, b4_default_startup_parms,
sizeof(struct b4_startup_params));
/* Translate sane kus input values to old build 4/5 format */
/* i = hop time in uS truncated to 3 bytes */
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
index b7f72f9c7988..b3691712df61 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
@@ -1454,6 +1454,7 @@ static int rtl8187_probe(struct usb_interface *intf,
goto err_free_dev;
}
mutex_init(&priv->io_mutex);
+ mutex_init(&priv->conf_mutex);
SET_IEEE80211_DEV(dev, &intf->dev);
usb_set_intfdata(intf, dev);
@@ -1627,7 +1628,6 @@ static int rtl8187_probe(struct usb_interface *intf,
printk(KERN_ERR "rtl8187: Cannot register device\n");
goto err_free_dmabuf;
}
- mutex_init(&priv->conf_mutex);
skb_queue_head_init(&priv->b_tx_status.queue);
wiphy_info(dev->wiphy, "hwaddr %pM, %s V%d + %s, rfkill mask %d\n",
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
index c48b7e8ee0d6..b51815eccdb3 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
@@ -1572,7 +1572,14 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
dev_kfree_skb_irq(skb);
ring->idx = (ring->idx + 1) % ring->entries;
}
+
+ if (rtlpriv->use_new_trx_flow) {
+ rtlpci->tx_ring[i].cur_tx_rp = 0;
+ rtlpci->tx_ring[i].cur_tx_wp = 0;
+ }
+
ring->idx = 0;
+ ring->entries = rtlpci->txringcount[i];
}
}
spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
index 5a3df9198ddf..89515f02c353 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
@@ -1123,7 +1123,8 @@ static void _rtl8723be_enable_aspm_back_door(struct ieee80211_hw *hw)
/* Configuration Space offset 0x70f BIT7 is used to control L0S */
tmp8 = _rtl8723be_dbi_read(rtlpriv, 0x70f);
- _rtl8723be_dbi_write(rtlpriv, 0x70f, tmp8 | BIT(7));
+ _rtl8723be_dbi_write(rtlpriv, 0x70f, tmp8 | BIT(7) |
+ ASPM_L1_LATENCY << 3);
/* Configuration Space offset 0x719 Bit3 is for L1
* BIT4 is for clock request
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
index b57cfd965196..7b13962ec9da 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
@@ -2488,9 +2488,9 @@ void rtl8821ae_dm_txpower_tracking_callback_thermalmeter(
for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++)
rtldm->swing_idx_ofdm_base[p] = rtldm->swing_idx_ofdm[p];
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "pDM_Odm->RFCalibrateInfo.ThermalValue = %d ThermalValue= %d\n",
- rtldm->thermalvalue, thermal_value);
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "pDM_Odm->RFCalibrateInfo.ThermalValue = %d ThermalValue= %d\n",
+ rtldm->thermalvalue, thermal_value);
/*Record last Power Tracking Thermal Value*/
rtldm->thermalvalue = thermal_value;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
index 738d541a2255..348ed1b0e58b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
@@ -1127,7 +1127,7 @@ static u8 _rtl8821ae_dbi_read(struct rtl_priv *rtlpriv, u16 addr)
}
if (0 == tmp) {
read_addr = REG_DBI_RDATA + addr % 4;
- ret = rtl_read_word(rtlpriv, read_addr);
+ ret = rtl_read_byte(rtlpriv, read_addr);
}
return ret;
}
@@ -1169,7 +1169,8 @@ static void _rtl8821ae_enable_aspm_back_door(struct ieee80211_hw *hw)
}
tmp = _rtl8821ae_dbi_read(rtlpriv, 0x70f);
- _rtl8821ae_dbi_write(rtlpriv, 0x70f, tmp | BIT(7));
+ _rtl8821ae_dbi_write(rtlpriv, 0x70f, tmp | BIT(7) |
+ ASPM_L1_LATENCY << 3);
tmp = _rtl8821ae_dbi_read(rtlpriv, 0x719);
_rtl8821ae_dbi_write(rtlpriv, 0x719, tmp | BIT(3) | BIT(4));
diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
index b6faf624480e..d676d055feda 100644
--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
+++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
@@ -99,6 +99,7 @@
#define RTL_USB_MAX_RX_COUNT 100
#define QBSS_LOAD_SIZE 5
#define MAX_WMMELE_LENGTH 64
+#define ASPM_L1_LATENCY 7
#define TOTAL_CAM_ENTRY 32
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index a13d1f2b5912..259590013382 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -3425,6 +3425,10 @@ static int rndis_wlan_bind(struct usbnet *usbdev, struct usb_interface *intf)
/* because rndis_command() sleeps we need to use workqueue */
priv->workqueue = create_singlethread_workqueue("rndis_wlan");
+ if (!priv->workqueue) {
+ wiphy_free(wiphy);
+ return -ENOMEM;
+ }
INIT_WORK(&priv->work, rndis_wlan_worker);
INIT_DELAYED_WORK(&priv->dev_poller_work, rndis_device_poller);
INIT_DELAYED_WORK(&priv->scan_work, rndis_get_scan_results);
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index 9bee3f11898a..869411f55d88 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -1196,8 +1196,7 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
WARN_ON(wl->bss_type != BSS_TYPE_STA_BSS);
enable = bss_conf->arp_addr_cnt == 1 && bss_conf->assoc;
- wl1251_acx_arp_ip_filter(wl, enable, addr);
-
+ ret = wl1251_acx_arp_ip_filter(wl, enable, addr);
if (ret < 0)
goto out_sleep;
}
diff --git a/drivers/net/wireless/wcnss/wcnss_vreg.c b/drivers/net/wireless/wcnss/wcnss_vreg.c
index d94bd90f64da..1be4c652f465 100644
--- a/drivers/net/wireless/wcnss/wcnss_vreg.c
+++ b/drivers/net/wireless/wcnss/wcnss_vreg.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2015, 2017 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2015, 2018 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -194,27 +194,6 @@ int validate_iris_chip_id(u32 reg)
}
}
-static void wcnss_free_regulator(void)
-{
- int vreg_i;
-
- /* Free pronto voltage regulators from device node */
- for (vreg_i = 0; vreg_i < PRONTO_REGULATORS; vreg_i++) {
- if (pronto_vregs[vreg_i].state) {
- regulator_put(pronto_vregs[vreg_i].regulator);
- pronto_vregs[vreg_i].state = VREG_NULL_CONFIG;
- }
- }
-
- /* Free IRIS voltage regulators from device node */
- for (vreg_i = 0; vreg_i < IRIS_REGULATORS; vreg_i++) {
- if (iris_vregs[vreg_i].state) {
- regulator_put(iris_vregs[vreg_i].regulator);
- iris_vregs[vreg_i].state = VREG_NULL_CONFIG;
- }
- }
-}
-
static int
wcnss_dt_parse_vreg_level(struct device *dev, int index,
const char *current_vreg_name, const char *vreg_name,
@@ -257,13 +236,14 @@ wcnss_parse_voltage_regulator(struct wcnss_wlan_config *wlan_config,
/* Parse pronto voltage regulators from device node */
for (vreg_i = 0; vreg_i < PRONTO_REGULATORS; vreg_i++) {
pronto_vregs[vreg_i].regulator =
- regulator_get(dev, pronto_vregs[vreg_i].name);
+ devm_regulator_get_optional(dev,
+ pronto_vregs[vreg_i].name);
if (IS_ERR(pronto_vregs[vreg_i].regulator)) {
if (pronto_vregs[vreg_i].required) {
rc = PTR_ERR(pronto_vregs[vreg_i].regulator);
dev_err(dev, "regulator get of %s failed (%d)\n",
pronto_vregs[vreg_i].name, rc);
- goto wcnss_vreg_get_err;
+ return rc;
} else {
dev_dbg(dev, "Skip optional regulator configuration: %s\n",
pronto_vregs[vreg_i].name);
@@ -271,27 +251,28 @@ wcnss_parse_voltage_regulator(struct wcnss_wlan_config *wlan_config,
}
}
- pronto_vregs[vreg_i].state |= VREG_GET_REGULATOR_MASK;
rc = wcnss_dt_parse_vreg_level(dev, vreg_i,
pronto_vregs[vreg_i].curr,
pronto_vregs[vreg_i].volt,
wlan_config->pronto_vlevel);
if (rc) {
dev_err(dev, "error reading voltage-level property\n");
- goto wcnss_vreg_get_err;
+ return rc;
}
+ pronto_vregs[vreg_i].state |= VREG_GET_REGULATOR_MASK;
}
/* Parse iris voltage regulators from device node */
for (vreg_i = 0; vreg_i < IRIS_REGULATORS; vreg_i++) {
iris_vregs[vreg_i].regulator =
- regulator_get(dev, iris_vregs[vreg_i].name);
+ devm_regulator_get_optional(dev,
+ iris_vregs[vreg_i].name);
if (IS_ERR(iris_vregs[vreg_i].regulator)) {
if (iris_vregs[vreg_i].required) {
rc = PTR_ERR(iris_vregs[vreg_i].regulator);
dev_err(dev, "regulator get of %s failed (%d)\n",
iris_vregs[vreg_i].name, rc);
- goto wcnss_vreg_get_err;
+ return rc;
} else {
dev_dbg(dev, "Skip optional regulator configuration: %s\n",
iris_vregs[vreg_i].name);
@@ -299,22 +280,18 @@ wcnss_parse_voltage_regulator(struct wcnss_wlan_config *wlan_config,
}
}
- iris_vregs[vreg_i].state |= VREG_GET_REGULATOR_MASK;
rc = wcnss_dt_parse_vreg_level(dev, vreg_i,
iris_vregs[vreg_i].curr,
iris_vregs[vreg_i].volt,
wlan_config->iris_vlevel);
if (rc) {
dev_err(dev, "error reading voltage-level property\n");
- goto wcnss_vreg_get_err;
+ return rc;
}
+ iris_vregs[vreg_i].state |= VREG_GET_REGULATOR_MASK;
}
return 0;
-
-wcnss_vreg_get_err:
- wcnss_free_regulator();
- return rc;
}
void wcnss_iris_reset(u32 reg, void __iomem *pmu_conf_reg)
@@ -586,12 +563,6 @@ static void wcnss_vregs_off(struct vregs_info regulators[], uint size,
pr_err("vreg %s disable failed (%d)\n",
regulators[i].name, rc);
}
-
- /* Free the regulator source */
- if (regulators[i].state & VREG_GET_REGULATOR_MASK)
- regulator_put(regulators[i].regulator);
-
- regulators[i].state = VREG_NULL_CONFIG;
}
}
diff --git a/drivers/net/wireless/wcnss/wcnss_wlan.c b/drivers/net/wireless/wcnss/wcnss_wlan.c
index 04d6b2e6fec1..13ae5c3c2471 100644
--- a/drivers/net/wireless/wcnss/wcnss_wlan.c
+++ b/drivers/net/wireless/wcnss/wcnss_wlan.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -2163,7 +2163,7 @@ static void wcnssctrl_rx_handler(struct work_struct *worker)
return;
}
if (len < sizeof(struct smd_msg_hdr)) {
- pr_err("wcnss: incomplete header available len = %d\n", len);
+ pr_debug("wcnss: incomplete header available len = %d\n", len);
return;
}
@@ -3336,8 +3336,8 @@ static int wcnss_notif_cb(struct notifier_block *this, unsigned long code,
return NOTIFY_DONE;
}
- pr_debug("%s: wcnss notification event: %lu : %s\n",
- __func__, code, wcnss_subsys_notif_type[code]);
+ pr_info("%s: wcnss notification event: %lu : %s\n",
+ __func__, code, wcnss_subsys_notif_type[code]);
if (code == SUBSYS_PROXY_VOTE) {
if (pdev && pwlanconfig) {
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index eb7a9e62371c..fee4c01fbdfd 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1331,6 +1331,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
netif_carrier_off(netdev);
+ xenbus_switch_state(dev, XenbusStateInitialising);
return netdev;
exit:
@@ -2023,7 +2024,10 @@ static void netback_changed(struct xenbus_device *dev,
case XenbusStateInitialised:
case XenbusStateReconfiguring:
case XenbusStateReconfigured:
+ break;
+
case XenbusStateUnknown:
+ wake_up_all(&module_unload_q);
break;
case XenbusStateInitWait:
@@ -2154,7 +2158,9 @@ static int xennet_remove(struct xenbus_device *dev)
xenbus_switch_state(dev, XenbusStateClosing);
wait_event(module_unload_q,
xenbus_read_driver_state(dev->otherend) ==
- XenbusStateClosing);
+ XenbusStateClosing ||
+ xenbus_read_driver_state(dev->otherend) ==
+ XenbusStateUnknown);
xenbus_switch_state(dev, XenbusStateClosed);
wait_event(module_unload_q,
diff --git a/drivers/nfc/nfcmrvl/fw_dnld.c b/drivers/nfc/nfcmrvl/fw_dnld.c
index af62c4c854f3..b4f31dad40d6 100644
--- a/drivers/nfc/nfcmrvl/fw_dnld.c
+++ b/drivers/nfc/nfcmrvl/fw_dnld.c
@@ -17,7 +17,7 @@
*/
#include <linux/module.h>
-#include <linux/unaligned/access_ok.h>
+#include <asm/unaligned.h>
#include <linux/firmware.h>
#include <linux/nfc.h>
#include <net/nfc/nci.h>
diff --git a/drivers/nfc/nfcmrvl/spi.c b/drivers/nfc/nfcmrvl/spi.c
index a7faa0bcc01e..fc8e78a29d77 100644
--- a/drivers/nfc/nfcmrvl/spi.c
+++ b/drivers/nfc/nfcmrvl/spi.c
@@ -96,10 +96,9 @@ static int nfcmrvl_spi_nci_send(struct nfcmrvl_private *priv,
/* Send the SPI packet */
err = nci_spi_send(drv_data->nci_spi, &drv_data->handshake_completion,
skb);
- if (err != 0) {
+ if (err)
nfc_err(priv->dev, "spi_send failed %d", err);
- kfree_skb(skb);
- }
+
return err;
}
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index d6ceb8b91cd6..1c8aedf21370 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -2976,10 +2976,16 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
mutex_unlock(&dev->shutdown_lock);
}
-static void nvme_dev_remove(struct nvme_dev *dev)
+static void nvme_remove_namespaces(struct nvme_dev *dev)
{
struct nvme_ns *ns, *next;
+ list_for_each_entry_safe(ns, next, &dev->namespaces, list)
+ nvme_ns_remove(ns);
+}
+
+static void nvme_dev_remove(struct nvme_dev *dev)
+{
if (nvme_io_incapable(dev)) {
/*
* If the device is not capable of IO (surprise hot-removal,
@@ -2989,8 +2995,7 @@ static void nvme_dev_remove(struct nvme_dev *dev)
*/
nvme_dev_shutdown(dev);
}
- list_for_each_entry_safe(ns, next, &dev->namespaces, list)
- nvme_ns_remove(ns);
+ nvme_remove_namespaces(dev);
}
static int nvme_setup_prp_pools(struct nvme_dev *dev)
@@ -3174,7 +3179,7 @@ static void nvme_probe_work(struct work_struct *work)
*/
if (dev->online_queues < 2) {
dev_warn(dev->dev, "IO queues not created\n");
- nvme_dev_remove(dev);
+ nvme_remove_namespaces(dev);
} else {
nvme_unfreeze_queues(dev);
nvme_dev_add(dev);
diff --git a/drivers/of/device.c b/drivers/of/device.c
index 97a280d50d6d..7c509bff9295 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -223,7 +223,7 @@ ssize_t of_device_get_modalias(struct device *dev, char *str, ssize_t len)
str[i] = '_';
}
- return tsize;
+ return repend;
}
EXPORT_SYMBOL_GPL(of_device_get_modalias);
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index 78530d1714dc..bdce0679674c 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -2646,6 +2646,7 @@ enum parport_pc_pci_cards {
netmos_9901,
netmos_9865,
quatech_sppxp100,
+ wch_ch382l,
};
@@ -2708,6 +2709,7 @@ static struct parport_pc_pci {
/* netmos_9901 */ { 1, { { 0, -1 }, } },
/* netmos_9865 */ { 1, { { 0, -1 }, } },
/* quatech_sppxp100 */ { 1, { { 0, 1 }, } },
+ /* wch_ch382l */ { 1, { { 2, -1 }, } },
};
static const struct pci_device_id parport_pc_pci_tbl[] = {
@@ -2797,6 +2799,8 @@ static const struct pci_device_id parport_pc_pci_tbl[] = {
/* Quatech SPPXP-100 Parallel port PCI ExpressCard */
{ PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_SPPXP_100,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, quatech_sppxp100 },
+ /* WCH CH382L PCI-E single parallel port card */
+ { 0x1c00, 0x3050, 0x1c00, 0x3050, 0, 0, wch_ch382l },
{ 0, } /* terminate list */
};
MODULE_DEVICE_TABLE(pci, parport_pc_pci_tbl);
diff --git a/drivers/pci/host/pci-keystone.c b/drivers/pci/host/pci-keystone.c
index 0aa81bd3de12..fb682e8af74d 100644
--- a/drivers/pci/host/pci-keystone.c
+++ b/drivers/pci/host/pci-keystone.c
@@ -179,14 +179,16 @@ static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie,
}
/* interrupt controller is in a child node */
- *np_temp = of_find_node_by_name(np_pcie, controller);
+ *np_temp = of_get_child_by_name(np_pcie, controller);
if (!(*np_temp)) {
dev_err(dev, "Node for %s is absent\n", controller);
goto out;
}
temp = of_irq_count(*np_temp);
- if (!temp)
+ if (!temp) {
+ of_node_put(*np_temp);
goto out;
+ }
if (temp > max_host_irqs)
dev_warn(dev, "Too many %s interrupts defined %u\n",
(legacy ? "legacy" : "MSI"), temp);
@@ -200,6 +202,9 @@ static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie,
if (!host_irqs[temp])
break;
}
+
+ of_node_put(*np_temp);
+
if (temp) {
*num_irqs = temp;
ret = 0;
diff --git a/drivers/pci/host/pci-msm.c b/drivers/pci/host/pci-msm.c
index 0ecf64f4afe9..b180e67acafb 100644
--- a/drivers/pci/host/pci-msm.c
+++ b/drivers/pci/host/pci-msm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -2421,6 +2421,13 @@ static void msm_pcie_sel_debug_testcase(struct msm_pcie_dev_t *dev,
break;
}
+ if (((base_sel - 1) >= MSM_PCIE_MAX_RES) ||
+ (!dev->res[base_sel - 1].resource)) {
+ PCIE_DBG_FS(dev, "PCIe: RC%d Resource does not exist\n",
+ dev->rc_idx);
+ break;
+ }
+
PCIE_DBG_FS(dev,
"base: %s: 0x%p\nwr_offset: 0x%x\nwr_mask: 0x%x\nwr_value: 0x%x\n",
dev->res[base_sel - 1].name,
@@ -2440,6 +2447,13 @@ static void msm_pcie_sel_debug_testcase(struct msm_pcie_dev_t *dev,
break;
case 13: /* dump all registers of base_sel */
+ if (((base_sel - 1) >= MSM_PCIE_MAX_RES) ||
+ (!dev->res[base_sel - 1].resource)) {
+ PCIE_DBG_FS(dev, "PCIe: RC%d Resource does not exist\n",
+ dev->rc_idx);
+ break;
+ }
+
if (!base_sel) {
PCIE_DBG_FS(dev, "Invalid base_sel: 0x%x\n", base_sel);
break;
@@ -6798,7 +6812,7 @@ static int msm_pcie_pm_suspend(struct pci_dev *dev,
return ret;
}
-static void msm_pcie_fixup_suspend(struct pci_dev *dev)
+static void msm_pcie_fixup_suspend_late(struct pci_dev *dev)
{
int ret;
struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
@@ -6830,8 +6844,8 @@ static void msm_pcie_fixup_suspend(struct pci_dev *dev)
mutex_unlock(&pcie_dev->recovery_lock);
}
-DECLARE_PCI_FIXUP_SUSPEND(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP,
- msm_pcie_fixup_suspend);
+DECLARE_PCI_FIXUP_SUSPEND_LATE(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP,
+ msm_pcie_fixup_suspend_late);
/* Resume the PCIe link */
static int msm_pcie_pm_resume(struct pci_dev *dev,
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 0b3e0bfa7be5..572ca192cb1f 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -587,6 +587,7 @@ static unsigned int get_slot_status(struct acpiphp_slot *slot)
{
unsigned long long sta = 0;
struct acpiphp_func *func;
+ u32 dvid;
list_for_each_entry(func, &slot->funcs, sibling) {
if (func->flags & FUNC_HAS_STA) {
@@ -597,19 +598,27 @@ static unsigned int get_slot_status(struct acpiphp_slot *slot)
if (ACPI_SUCCESS(status) && sta)
break;
} else {
- u32 dvid;
-
- pci_bus_read_config_dword(slot->bus,
- PCI_DEVFN(slot->device,
- func->function),
- PCI_VENDOR_ID, &dvid);
- if (dvid != 0xffffffff) {
+ if (pci_bus_read_dev_vendor_id(slot->bus,
+ PCI_DEVFN(slot->device, func->function),
+ &dvid, 0)) {
sta = ACPI_STA_ALL;
break;
}
}
}
+ if (!sta) {
+ /*
+ * Check for the slot itself since it may be that the
+ * ACPI slot is a device below PCIe upstream port so in
+ * that case it may not even be reachable yet.
+ */
+ if (pci_bus_read_dev_vendor_id(slot->bus,
+ PCI_DEVFN(slot->device, 0), &dvid, 0)) {
+ sta = ACPI_STA_ALL;
+ }
+ }
+
return (unsigned int)sta;
}
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 193ac13de49b..566897f24dee 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -230,7 +230,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
res->flags |= IORESOURCE_ROM_ENABLE;
l64 = l & PCI_ROM_ADDRESS_MASK;
sz64 = sz & PCI_ROM_ADDRESS_MASK;
- mask64 = (u32)PCI_ROM_ADDRESS_MASK;
+ mask64 = PCI_ROM_ADDRESS_MASK;
}
if (res->flags & IORESOURCE_MEM_64) {
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 254192b5dad1..4eb1cf0ed00c 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -3631,6 +3631,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9230,
quirk_dma_func1_alias);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TTI, 0x0642,
quirk_dma_func1_alias);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TTI, 0x0645,
+ quirk_dma_func1_alias);
/* https://bugs.gentoo.org/show_bug.cgi?id=497630 */
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_JMICRON,
PCI_DEVICE_ID_JMICRON_JMB388_ESD,
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 25062966cbfa..8b2f8b2a574e 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -63,7 +63,7 @@ static void pci_std_update_resource(struct pci_dev *dev, int resno)
mask = (u32)PCI_BASE_ADDRESS_IO_MASK;
new |= res->flags & ~PCI_BASE_ADDRESS_IO_MASK;
} else if (resno == PCI_ROM_RESOURCE) {
- mask = (u32)PCI_ROM_ADDRESS_MASK;
+ mask = PCI_ROM_ADDRESS_MASK;
} else {
mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
new |= res->flags & ~PCI_BASE_ADDRESS_MEM_MASK;
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 39400dda27c2..d6d671a925e1 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -323,10 +323,16 @@ validate_group(struct perf_event *event)
return 0;
}
+static struct arm_pmu_platdata *armpmu_get_platdata(struct arm_pmu *armpmu)
+{
+ struct platform_device *pdev = armpmu->plat_device;
+
+ return pdev ? dev_get_platdata(&pdev->dev) : NULL;
+}
+
static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
{
struct arm_pmu *armpmu;
- struct platform_device *plat_device;
struct arm_pmu_platdata *plat;
int ret;
u64 start_clock, finish_clock;
@@ -338,8 +344,8 @@ static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
* dereference.
*/
armpmu = *(void **)dev;
- plat_device = armpmu->plat_device;
- plat = dev_get_platdata(&plat_device->dev);
+
+ plat = armpmu_get_platdata(armpmu);
start_clock = sched_clock();
if (plat && plat->handle_irq)
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 2686a4450dfc..f4639a9f1e48 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -979,19 +979,16 @@ struct pinctrl_state *pinctrl_lookup_state(struct pinctrl *p,
EXPORT_SYMBOL_GPL(pinctrl_lookup_state);
/**
- * pinctrl_select_state() - select/activate/program a pinctrl state to HW
+ * pinctrl_commit_state() - select/activate/program a pinctrl state to HW
* @p: the pinctrl handle for the device that requests configuration
* @state: the state handle to select/activate/program
*/
-int pinctrl_select_state(struct pinctrl *p, struct pinctrl_state *state)
+static int pinctrl_commit_state(struct pinctrl *p, struct pinctrl_state *state)
{
struct pinctrl_setting *setting, *setting2;
struct pinctrl_state *old_state = p->state;
int ret;
- if (p->state == state)
- return 0;
-
if (p->state) {
/*
* For each pinmux setting in the old state, forget SW's record
@@ -1055,6 +1052,19 @@ unapply_new_state:
return ret;
}
+
+/**
+ * pinctrl_select_state() - select/activate/program a pinctrl state to HW
+ * @p: the pinctrl handle for the device that requests configuration
+ * @state: the state handle to select/activate/program
+ */
+int pinctrl_select_state(struct pinctrl *p, struct pinctrl_state *state)
+{
+ if (p->state == state)
+ return 0;
+
+ return pinctrl_commit_state(p, state);
+}
EXPORT_SYMBOL_GPL(pinctrl_select_state);
static void devm_pinctrl_release(struct device *dev, void *res)
@@ -1223,7 +1233,7 @@ void pinctrl_unregister_map(struct pinctrl_map const *map)
int pinctrl_force_sleep(struct pinctrl_dev *pctldev)
{
if (!IS_ERR(pctldev->p) && !IS_ERR(pctldev->hog_sleep))
- return pinctrl_select_state(pctldev->p, pctldev->hog_sleep);
+ return pinctrl_commit_state(pctldev->p, pctldev->hog_sleep);
return 0;
}
EXPORT_SYMBOL_GPL(pinctrl_force_sleep);
@@ -1235,7 +1245,7 @@ EXPORT_SYMBOL_GPL(pinctrl_force_sleep);
int pinctrl_force_default(struct pinctrl_dev *pctldev)
{
if (!IS_ERR(pctldev->p) && !IS_ERR(pctldev->hog_default))
- return pinctrl_select_state(pctldev->p, pctldev->hog_default);
+ return pinctrl_commit_state(pctldev->p, pctldev->hog_default);
return 0;
}
EXPORT_SYMBOL_GPL(pinctrl_force_default);
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c b/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c
index 1b580ba76453..907d7db3fcee 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c
@@ -145,19 +145,19 @@ static const struct sunxi_desc_pin sun9i_a80_pins[] = {
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x3, "mcsi"), /* MCLK */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 14)), /* PB_EINT14 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 14)), /* PB_EINT14 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 15),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x3, "mcsi"), /* SCK */
SUNXI_FUNCTION(0x4, "i2c4"), /* SCK */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 15)), /* PB_EINT15 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 15)), /* PB_EINT15 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 16),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x3, "mcsi"), /* SDA */
SUNXI_FUNCTION(0x4, "i2c4"), /* SDA */
- SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 16)), /* PB_EINT16 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 16)), /* PB_EINT16 */
/* Hole */
SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 0),
diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
index 92430f781eb7..a0b8c8a8c323 100644
--- a/drivers/platform/chrome/cros_ec_proto.c
+++ b/drivers/platform/chrome/cros_ec_proto.c
@@ -59,12 +59,14 @@ static int send_command(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg)
{
int ret;
+ int (*xfer_fxn)(struct cros_ec_device *ec, struct cros_ec_command *msg);
if (ec_dev->proto_version > 2)
- ret = ec_dev->pkt_xfer(ec_dev, msg);
+ xfer_fxn = ec_dev->pkt_xfer;
else
- ret = ec_dev->cmd_xfer(ec_dev, msg);
+ xfer_fxn = ec_dev->cmd_xfer;
+ ret = (*xfer_fxn)(ec_dev, msg);
if (msg->result == EC_RES_IN_PROGRESS) {
int i;
struct cros_ec_command *status_msg;
@@ -87,7 +89,7 @@ static int send_command(struct cros_ec_device *ec_dev,
for (i = 0; i < EC_COMMAND_RETRIES; i++) {
usleep_range(10000, 11000);
- ret = ec_dev->cmd_xfer(ec_dev, status_msg);
+ ret = (*xfer_fxn)(ec_dev, status_msg);
if (ret < 0)
break;
diff --git a/drivers/platform/chrome/cros_ec_sysfs.c b/drivers/platform/chrome/cros_ec_sysfs.c
index f3baf9973989..24f1630a8b3f 100644
--- a/drivers/platform/chrome/cros_ec_sysfs.c
+++ b/drivers/platform/chrome/cros_ec_sysfs.c
@@ -187,7 +187,7 @@ static ssize_t show_ec_version(struct device *dev,
count += scnprintf(buf + count, PAGE_SIZE - count,
"Build info: EC error %d\n", msg->result);
else {
- msg->data[sizeof(msg->data) - 1] = '\0';
+ msg->data[EC_HOST_PARAM_SIZE - 1] = '\0';
count += scnprintf(buf + count, PAGE_SIZE - count,
"Build info: %s\n", msg->data);
}
diff --git a/drivers/platform/goldfish/goldfish_pipe.c b/drivers/platform/goldfish/goldfish_pipe.c
index fd1452e28352..df3f5c301a61 100644
--- a/drivers/platform/goldfish/goldfish_pipe.c
+++ b/drivers/platform/goldfish/goldfish_pipe.c
@@ -35,8 +35,10 @@
#define PIPE_REG_ADDRESS 0x10 /* write: physical address */
#define PIPE_REG_ADDRESS_HIGH 0x34 /* write: physical address */
#define PIPE_REG_WAKES 0x14 /* read: wake flags */
-#define PIPE_REG_PARAMS_ADDR_LOW 0x18 /* read/write: batch data address */
-#define PIPE_REG_PARAMS_ADDR_HIGH 0x1c /* read/write: batch data address */
+#define PIPE_REG_PARAMS_ADDR_LOW 0x18 /* read/write: batch data address
+ */
+#define PIPE_REG_PARAMS_ADDR_HIGH 0x1c /* read/write: batch data address
+ */
#define PIPE_REG_ACCESS_PARAMS 0x20 /* write: batch access */
#define PIPE_REG_VERSION 0x24 /* read: device version */
@@ -53,12 +55,16 @@
/* The following commands are related to write operations */
#define CMD_WRITE_BUFFER 4 /* send a user buffer to the emulator */
#define CMD_WAKE_ON_WRITE 5 /* tell the emulator to wake us when writing
- is possible */
+ * is possible
+ */
#define CMD_READ_BUFFER 6 /* receive a user buffer from the emulator */
#define CMD_WAKE_ON_READ 7 /* tell the emulator to wake us when reading
- * is possible */
+ * is possible
+ */
-/* Possible status values used to signal errors - see goldfish_pipe_error_convert */
+/* Possible status values used to signal errors -
+ * see goldfish_pipe_error_convert
+ */
#define PIPE_ERROR_INVAL -1
#define PIPE_ERROR_AGAIN -2
#define PIPE_ERROR_NOMEM -3
@@ -71,14 +77,6 @@
#define MAX_PAGES_TO_GRAB 32
-#define DEBUG 0
-
-#if DEBUG
-#define DPRINT(...) { printk(KERN_ERR __VA_ARGS__); }
-#else
-#define DPRINT(...)
-#endif
-
/* This data type models a given pipe instance */
struct goldfish_pipe {
struct goldfish_pipe_dev *dev;
@@ -158,6 +156,7 @@ static int valid_batchbuffer_addr(struct goldfish_pipe_dev *dev,
{
u32 aph, apl;
u64 paddr;
+
aph = readl(dev->base + PIPE_REG_PARAMS_ADDR_HIGH);
apl = readl(dev->base + PIPE_REG_PARAMS_ADDR_LOW);
@@ -174,7 +173,8 @@ static int setup_access_params_addr(struct platform_device *pdev,
u64 paddr;
struct access_params *aps;
- aps = devm_kzalloc(&pdev->dev, sizeof(struct access_params), GFP_KERNEL);
+ aps = devm_kzalloc(&pdev->dev, sizeof(struct access_params),
+ GFP_KERNEL);
if (!aps)
return -1;
@@ -226,7 +226,7 @@ static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer,
struct goldfish_pipe *pipe = filp->private_data;
struct goldfish_pipe_dev *dev = pipe->dev;
unsigned long address, address_end;
- struct page* pages[MAX_PAGES_TO_GRAB] = {};
+ struct page *pages[MAX_PAGES_TO_GRAB] = {};
int count = 0, ret = -EINVAL;
/* If the emulator already closed the pipe, no need to go further */
@@ -268,17 +268,17 @@ static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer,
ret = get_user_pages_fast(first_page, requested_pages,
!is_write, pages);
- DPRINT("%s: requested pages: %d %d %p\n", __FUNCTION__,
- ret, requested_pages, first_page);
+ pr_debug("%s: requested pages: %d %ld %p\n", __func__, ret,
+ requested_pages, (void*)first_page);
if (ret == 0) {
- DPRINT("%s: error: (requested pages == 0) (wanted %d)\n",
- __FUNCTION__, requested_pages);
+ pr_err("%s: error: (requested pages == 0) (wanted %ld)\n",
+ __func__, requested_pages);
mutex_unlock(&pipe->lock);
return ret;
}
if (ret < 0) {
- DPRINT("%s: (requested pages < 0) %d \n",
- __FUNCTION__, requested_pages);
+ pr_err("%s: (requested pages < 0) %ld \n",
+ __func__, requested_pages);
mutex_unlock(&pipe->lock);
return ret;
}
@@ -293,8 +293,8 @@ static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer,
xaddr_prev = xaddr_i;
num_contiguous_pages++;
} else {
- DPRINT("%s: discontinuous page boundary: %d pages instead\n",
- __FUNCTION__, page_i);
+ pr_err("%s: discontinuous page boundary: %d pages instead\n",
+ __func__, page_i);
break;
}
}
@@ -345,8 +345,8 @@ static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer,
* ABI relies on this behavior.
*/
if (status != PIPE_ERROR_AGAIN)
- pr_info_ratelimited("goldfish_pipe: backend returned error %d on %s\n",
- status, is_write ? "write" : "read");
+ pr_err_ratelimited("goldfish_pipe: backend returned error %d on %s\n",
+ status, is_write ? "write" : "read");
ret = 0;
break;
}
@@ -506,7 +506,7 @@ static irqreturn_t goldfish_pipe_interrupt(int irq, void *dev_id)
static int goldfish_pipe_open(struct inode *inode, struct file *file)
{
struct goldfish_pipe *pipe;
- struct goldfish_pipe_dev *dev = pipe_dev;
+ struct goldfish_pipe_dev *dev = &goldfish_pipe_dev;
int32_t status;
/* Allocate new pipe kernel object */
@@ -516,8 +516,9 @@ static int goldfish_pipe_open(struct inode *inode, struct file *file)
pipe->dev = dev;
mutex_init(&pipe->lock);
- DPRINT("%s: call. pipe_dev pipe_dev=0x%lx new_pipe_addr=0x%lx file=0x%lx\n", __FUNCTION__, pipe_dev, pipe, file);
- // spin lock init, write head of list, i guess
+ pr_debug("%s: call. pipe_dev dev=%p new_pipe_addr=%p file=%p\n",
+ __func__, dev, pipe, file);
+ /* spin lock init, write head of list, i guess */
init_waitqueue_head(&pipe->wake_queue);
/*
@@ -540,7 +541,7 @@ static int goldfish_pipe_release(struct inode *inode, struct file *filp)
{
struct goldfish_pipe *pipe = filp->private_data;
- DPRINT("%s: call. pipe=0x%lx file=0x%lx\n", __FUNCTION__, pipe, filp);
+ pr_debug("%s: call. pipe=%p file=%p\n", __func__, pipe, filp);
/* The guest is closing the channel, so tell the emulator right now */
goldfish_cmd(pipe, CMD_CLOSE);
kfree(pipe);
@@ -557,7 +558,7 @@ static const struct file_operations goldfish_pipe_fops = {
.release = goldfish_pipe_release,
};
-static struct miscdevice goldfish_pipe_dev = {
+static struct miscdevice goldfish_pipe_miscdev = {
.minor = MISC_DYNAMIC_MINOR,
.name = "goldfish_pipe",
.fops = &goldfish_pipe_fops,
@@ -565,15 +566,16 @@ static struct miscdevice goldfish_pipe_dev = {
int goldfish_pipe_device_init_v1(struct platform_device *pdev)
{
- struct goldfish_pipe_dev *dev = pipe_dev;
- int err = devm_request_irq(&pdev->dev, dev->irq, goldfish_pipe_interrupt,
- IRQF_SHARED, "goldfish_pipe", dev);
+ struct goldfish_pipe_dev *dev = &goldfish_pipe_dev;
+ int err = devm_request_irq(&pdev->dev, dev->irq,
+ goldfish_pipe_interrupt, IRQF_SHARED, "goldfish_pipe", dev);
+
if (err) {
dev_err(&pdev->dev, "unable to allocate IRQ for v1\n");
return err;
}
- err = misc_register(&goldfish_pipe_dev);
+ err = misc_register(&goldfish_pipe_miscdev);
if (err) {
dev_err(&pdev->dev, "unable to register v1 device\n");
return err;
@@ -585,5 +587,5 @@ int goldfish_pipe_device_init_v1(struct platform_device *pdev)
void goldfish_pipe_device_deinit_v1(struct platform_device *pdev)
{
- misc_deregister(&goldfish_pipe_dev);
+ misc_deregister(&goldfish_pipe_miscdev);
}
diff --git a/drivers/platform/goldfish/goldfish_pipe.h b/drivers/platform/goldfish/goldfish_pipe.h
index 9b75a51dba24..e24bef314468 100644
--- a/drivers/platform/goldfish/goldfish_pipe.h
+++ b/drivers/platform/goldfish/goldfish_pipe.h
@@ -84,8 +84,14 @@ struct goldfish_pipe_dev {
/* v1-specific access parameters */
struct access_params *aps;
+
+ /* ptr to platform device's device struct */
+ struct device *pdev_dev;
+
+ /* DMA info */
+ size_t dma_alloc_total;
};
-extern struct goldfish_pipe_dev pipe_dev[1];
+extern struct goldfish_pipe_dev goldfish_pipe_dev;
#endif /* GOLDFISH_PIPE_H */
diff --git a/drivers/platform/goldfish/goldfish_pipe_v2.c b/drivers/platform/goldfish/goldfish_pipe_v2.c
index ad373ed36555..90bac4b055a3 100644
--- a/drivers/platform/goldfish/goldfish_pipe_v2.c
+++ b/drivers/platform/goldfish/goldfish_pipe_v2.c
@@ -46,20 +46,29 @@
* exchange is properly mapped during a transfer.
*/
+#include <linux/printk.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/bug.h>
+#include <uapi/linux/goldfish/goldfish_dma.h>
#include "goldfish_pipe.h"
-
/*
* Update this when something changes in the driver's behavior so the host
* can benefit from knowing it
+ * Notes:
+ * version 2 was an intermediate release and isn't supported anymore.
+ * version 3 is goldfish_pipe_v2 without DMA support.
+ version 4 (current) is goldfish_pipe_v2 with DMA support.
*/
enum {
- PIPE_DRIVER_VERSION = 2,
+ PIPE_DRIVER_VERSION = 4,
PIPE_CURRENT_DEVICE_VERSION = 2
};
-/*
- * IMPORTANT: The following constants must match the ones used and defined
+/* IMPORTANT: The following constants must match the ones used and defined
* in external/qemu/hw/goldfish_pipe.c in the Android source tree.
*/
@@ -70,7 +79,10 @@ enum PipePollFlags {
PIPE_POLL_HUP = 1 << 2
};
-/* Possible status values used to signal errors - see goldfish_pipe_error_convert */
+/*
+ * Possible status values used to signal errors - see
+ * goldfish_pipe_error_convert
+ */
enum PipeErrors {
PIPE_ERROR_INVAL = -1,
PIPE_ERROR_AGAIN = -2,
@@ -80,9 +92,9 @@ enum PipeErrors {
/* Bit-flags used to signal events from the emulator */
enum PipeWakeFlags {
- PIPE_WAKE_CLOSED = 1 << 0, /* emulator closed pipe */
- PIPE_WAKE_READ = 1 << 1, /* pipe can now be read from */
- PIPE_WAKE_WRITE = 1 << 2 /* pipe can now be written to */
+ PIPE_WAKE_CLOSED = BIT(0), /* emulator closed pipe */
+ PIPE_WAKE_READ = BIT(1), /* pipe can now be read from */
+ PIPE_WAKE_WRITE = BIT(2), /* pipe can now be written to */
};
/* Bit flags for the 'flags' field */
@@ -117,16 +129,20 @@ enum PipeCmdCode {
PIPE_CMD_WAKE_ON_READ,
/*
- * TODO(zyy): implement a deferred read/write execution to allow parallel
- * processing of pipe operations on the host.
+ * TODO(zyy): implement a deferred read/write execution to allow
+ * parallel processing of pipe operations on the host.
*/
PIPE_CMD_WAKE_ON_DONE_IO,
+ PIPE_CMD_DMA_HOST_MAP,
+ PIPE_CMD_DMA_HOST_UNMAP,
};
enum {
MAX_BUFFERS_PER_COMMAND = 336,
MAX_SIGNALLED_PIPES = 64,
- INITIAL_PIPES_CAPACITY = 64
+ INITIAL_PIPES_CAPACITY = 64,
+ DMA_REGION_MIN_SIZE = PAGE_SIZE,
+ DMA_REGION_MAX_SIZE = 256 << 20
};
struct goldfish_pipe_dev;
@@ -135,18 +151,27 @@ struct goldfish_pipe_command;
/* A per-pipe command structure, shared with the host */
struct goldfish_pipe_command {
- s32 cmd; /* PipeCmdCode, guest -> host */
- s32 id; /* pipe id, guest -> host */
- s32 status; /* command execution status, host -> guest */
+ s32 cmd; /* PipeCmdCode, guest -> host */
+ s32 id; /* pipe id, guest -> host */
+ s32 status; /* command execution status, host -> guest */
s32 reserved; /* to pad to 64-bit boundary */
union {
/* Parameters for PIPE_CMD_{READ,WRITE} */
struct {
- u32 buffers_count; /* number of buffers, guest -> host */
- s32 consumed_size; /* number of consumed bytes, host -> guest */
- u64 ptrs[MAX_BUFFERS_PER_COMMAND]; /* buffer pointers, guest -> host */
- u32 sizes[MAX_BUFFERS_PER_COMMAND]; /* buffer sizes, guest -> host */
+ /* number of buffers, guest -> host */
+ u32 buffers_count;
+ /* number of consumed bytes, host -> guest */
+ s32 consumed_size;
+ /* buffer pointers, guest -> host */
+ u64 ptrs[MAX_BUFFERS_PER_COMMAND];
+ /* buffer sizes, guest -> host */
+ u32 sizes[MAX_BUFFERS_PER_COMMAND];
} rw_params;
+ /* Parameters for PIPE_CMD_DMA_HOST_(UN)MAP */
+ struct {
+ u64 dma_paddr;
+ u64 sz;
+ } dma_maphost_params;
};
};
@@ -165,52 +190,89 @@ struct open_command_param {
/* Device-level set of buffers shared with the host */
struct goldfish_pipe_dev_buffers {
struct open_command_param open_command_params;
- struct signalled_pipe_buffer signalled_pipe_buffers[MAX_SIGNALLED_PIPES];
+ struct signalled_pipe_buffer
+ signalled_pipe_buffers[MAX_SIGNALLED_PIPES];
+};
+
+/*
+ * The main data structure tracking state is
+ * struct goldfish_dma_context, which is included
+ * as an extra pointer field in struct goldfish_pipe.
+ * Each such context is associated with possibly
+ * one physical address and size describing the
+ * allocated DMA region, and only one allocation
+ * is allowed for each pipe fd. Further allocations
+ * require more open()'s of pipe fd's.
+ */
+struct goldfish_dma_context {
+ struct device *pdev_dev; /* pointer to feed to dma_*_coherent */
+ void *dma_vaddr; /* kernel vaddr of dma region */
+ size_t dma_size; /* size of dma region */
+ dma_addr_t phys_begin; /* paddr of dma region */
+ dma_addr_t phys_end; /* paddr of dma region + dma_size */
};
/* This data type models a given pipe instance */
struct goldfish_pipe {
- u32 id; /* pipe ID - index into goldfish_pipe_dev::pipes array */
- unsigned long flags; /* The wake flags pipe is waiting for
- * Note: not protected with any lock, uses atomic operations
- * and barriers to make it thread-safe.
- */
- unsigned long signalled_flags; /* wake flags host have signalled,
- * - protected by goldfish_pipe_dev::lock */
+ /* pipe ID - index into goldfish_pipe_dev::pipes array */
+ u32 id;
- struct goldfish_pipe_command *command_buffer; /* A pointer to command buffer */
+ /* The wake flags pipe is waiting for.
+ * Note: not protected with any lock, uses atomic operations and
+ * barriers to make it thread-safe.
+ */
+ unsigned long flags;
- /* doubly linked list of signalled pipes, protected by goldfish_pipe_dev::lock */
+ /* wake flags host have signalled,
+ * protected by goldfish_pipe_dev::lock
+ */
+ unsigned long signalled_flags;
+
+ /* A pointer to command buffer */
+ struct goldfish_pipe_command *command_buffer;
+
+ /* doubly linked list of signalled pipes,
+ * protected by goldfish_pipe_dev::lock
+ */
struct goldfish_pipe *prev_signalled;
struct goldfish_pipe *next_signalled;
/*
* A pipe's own lock. Protects the following:
- * - *command_buffer - makes sure a command can safely write its parameters
- * to the host and read the results back.
+ * - *command_buffer - makes sure a command can safely write its
+ * parameters to the host and read the results back.
*/
struct mutex lock;
- wait_queue_head_t wake_queue; /* A wake queue for sleeping until host signals an event */
- struct goldfish_pipe_dev *dev; /* Pointer to the parent goldfish_pipe_dev instance */
+ /* A wake queue for sleeping until host signals an event */
+ wait_queue_head_t wake_queue;
+ /* Pointer to the parent goldfish_pipe_dev instance */
+ struct goldfish_pipe_dev *dev;
+ /* Holds information about reserved DMA region for this pipe */
+ struct goldfish_dma_context *dma;
};
-struct goldfish_pipe_dev pipe_dev[1] = {};
+struct goldfish_pipe_dev goldfish_pipe_dev;
-static int goldfish_cmd_locked(struct goldfish_pipe *pipe, enum PipeCmdCode cmd)
+static int goldfish_pipe_cmd_locked(
+ struct goldfish_pipe *pipe, enum PipeCmdCode cmd)
{
pipe->command_buffer->cmd = cmd;
- pipe->command_buffer->status = PIPE_ERROR_INVAL; /* failure by default */
+ /* failure by default */
+ pipe->command_buffer->status = PIPE_ERROR_INVAL;
writel(pipe->id, pipe->dev->base + PIPE_REG_CMD);
return pipe->command_buffer->status;
}
-static int goldfish_cmd(struct goldfish_pipe *pipe, enum PipeCmdCode cmd)
+static int goldfish_pipe_cmd(struct goldfish_pipe *pipe, enum PipeCmdCode cmd)
{
int status;
+
if (mutex_lock_interruptible(&pipe->lock))
return PIPE_ERROR_IO;
- status = goldfish_cmd_locked(pipe, cmd);
+
+ status = goldfish_pipe_cmd_locked(pipe, cmd);
+
mutex_unlock(&pipe->lock);
return status;
}
@@ -235,10 +297,12 @@ static int goldfish_pipe_error_convert(int status)
static int pin_user_pages(unsigned long first_page, unsigned long last_page,
unsigned last_page_size, int is_write,
- struct page *pages[MAX_BUFFERS_PER_COMMAND], unsigned *iter_last_page_size)
+ struct page *pages[MAX_BUFFERS_PER_COMMAND],
+ unsigned *iter_last_page_size)
{
int ret;
int requested_pages = ((last_page - first_page) >> PAGE_SHIFT) + 1;
+
if (requested_pages > MAX_BUFFERS_PER_COMMAND) {
requested_pages = MAX_BUFFERS_PER_COMMAND;
*iter_last_page_size = PAGE_SIZE;
@@ -260,10 +324,11 @@ static void release_user_pages(struct page **pages, int pages_count,
int is_write, s32 consumed_size)
{
int i;
+
for (i = 0; i < pages_count; i++) {
- if (!is_write && consumed_size > 0) {
+ if (!is_write && consumed_size > 0)
set_page_dirty(pages[i]);
- }
+
put_page(pages[i]);
}
}
@@ -291,7 +356,9 @@ static void populate_rw_params(
command->rw_params.sizes[0] = size_on_page;
for (; i < pages_count; ++i) {
xaddr = page_to_phys(pages[i]);
- size_on_page = (i == pages_count - 1) ? iter_last_page_size : PAGE_SIZE;
+ size_on_page = (i == pages_count - 1) ?
+ iter_last_page_size : PAGE_SIZE;
+
if (xaddr == xaddr_prev + PAGE_SIZE) {
command->rw_params.sizes[buffer_idx] += size_on_page;
} else {
@@ -304,10 +371,10 @@ static void populate_rw_params(
command->rw_params.buffers_count = buffer_idx + 1;
}
-static int transfer_max_buffers(struct goldfish_pipe* pipe,
+static int transfer_max_buffers(struct goldfish_pipe *pipe,
unsigned long address, unsigned long address_end, int is_write,
unsigned long last_page, unsigned int last_page_size,
- s32* consumed_size, int* status)
+ s32 *consumed_size, int *status)
{
struct page *pages[MAX_BUFFERS_PER_COMMAND];
unsigned long first_page = address & PAGE_MASK;
@@ -327,26 +394,27 @@ static int transfer_max_buffers(struct goldfish_pipe* pipe,
pipe->command_buffer);
/* Transfer the data */
- *status = goldfish_cmd_locked(pipe,
- is_write ? PIPE_CMD_WRITE : PIPE_CMD_READ);
+ *status = goldfish_pipe_cmd_locked(
+ pipe,
+ is_write ? PIPE_CMD_WRITE : PIPE_CMD_READ);
*consumed_size = pipe->command_buffer->rw_params.consumed_size;
mutex_unlock(&pipe->lock);
release_user_pages(pages, pages_count, is_write, *consumed_size);
-
return 0;
}
static int wait_for_host_signal(struct goldfish_pipe *pipe, int is_write)
{
u32 wakeBit = is_write ? BIT_WAKE_ON_WRITE : BIT_WAKE_ON_READ;
+ u32 cmdBit = is_write ? PIPE_CMD_WAKE_ON_WRITE : PIPE_CMD_WAKE_ON_READ;
+
set_bit(wakeBit, &pipe->flags);
/* Tell the emulator we're going to wait for a wake event */
- (void)goldfish_cmd(pipe,
- is_write ? PIPE_CMD_WAKE_ON_WRITE : PIPE_CMD_WAKE_ON_READ);
+ goldfish_pipe_cmd(pipe, cmdBit);
while (test_bit(wakeBit, &pipe->flags)) {
if (wait_event_interruptible(
@@ -368,6 +436,7 @@ static ssize_t goldfish_pipe_read_write(struct file *filp,
int count = 0, ret = -EINVAL;
unsigned long address, address_end, last_page;
unsigned int last_page_size;
+ struct device *pdev_dev;
/* If the emulator already closed the pipe, no need to go further */
if (unlikely(test_bit(BIT_CLOSED_ON_HOST, &pipe->flags)))
@@ -385,16 +454,21 @@ static ssize_t goldfish_pipe_read_write(struct file *filp,
last_page = (address_end - 1) & PAGE_MASK;
last_page_size = ((address_end - 1) & ~PAGE_MASK) + 1;
+ pdev_dev = pipe->dev->pdev_dev;
+
while (address < address_end) {
s32 consumed_size;
int status;
+
ret = transfer_max_buffers(pipe, address, address_end, is_write,
- last_page, last_page_size, &consumed_size, &status);
+ last_page, last_page_size, &consumed_size, &status);
if (ret < 0)
break;
if (consumed_size > 0) {
- /* No matter what's the status, we've transfered something */
+ /* No matter what's the status, we've transfered
+ * something
+ */
count += consumed_size;
address += consumed_size;
}
@@ -413,8 +487,9 @@ static ssize_t goldfish_pipe_read_write(struct file *filp,
* err.
*/
if (status != PIPE_ERROR_AGAIN)
- pr_info_ratelimited("goldfish_pipe: backend error %d on %s\n",
- status, is_write ? "write" : "read");
+ dev_err_ratelimited(pdev_dev,
+ "goldfish_pipe: backend error %d on %s\n",
+ status, is_write ? "write" : "read");
break;
}
@@ -422,7 +497,8 @@ static ssize_t goldfish_pipe_read_write(struct file *filp,
* If the error is not PIPE_ERROR_AGAIN, or if we are in
* non-blocking mode, just return the error code.
*/
- if (status != PIPE_ERROR_AGAIN || (filp->f_flags & O_NONBLOCK) != 0) {
+ if (status != PIPE_ERROR_AGAIN
+ || (filp->f_flags & O_NONBLOCK) != 0) {
ret = goldfish_pipe_error_convert(status);
break;
}
@@ -440,7 +516,8 @@ static ssize_t goldfish_pipe_read_write(struct file *filp,
static ssize_t goldfish_pipe_read(struct file *filp, char __user *buffer,
size_t bufflen, loff_t *ppos)
{
- return goldfish_pipe_read_write(filp, buffer, bufflen, /* is_write */ 0);
+ return goldfish_pipe_read_write(filp, buffer, bufflen,
+ /* is_write */ 0);
}
static ssize_t goldfish_pipe_write(struct file *filp,
@@ -460,10 +537,9 @@ static unsigned int goldfish_pipe_poll(struct file *filp, poll_table *wait)
poll_wait(filp, &pipe->wake_queue, wait);
- status = goldfish_cmd(pipe, PIPE_CMD_POLL);
- if (status < 0) {
+ status = goldfish_pipe_cmd(pipe, PIPE_CMD_POLL);
+ if (status < 0)
return -ERESTARTSYS;
- }
if (status & PIPE_POLL_IN)
mask |= POLLIN | POLLRDNORM;
@@ -477,26 +553,30 @@ static unsigned int goldfish_pipe_poll(struct file *filp, poll_table *wait)
return mask;
}
-static void signalled_pipes_add_locked(struct goldfish_pipe_dev *dev,
+static int signalled_pipes_add_locked(struct goldfish_pipe_dev *dev,
u32 id, u32 flags)
{
struct goldfish_pipe *pipe;
- BUG_ON(id >= dev->pipes_capacity);
+ if (id >= dev->pipes_capacity)
+ return -EINVAL;
pipe = dev->pipes[id];
if (!pipe)
- return;
+ return -ENXIO;
+
pipe->signalled_flags |= flags;
if (pipe->prev_signalled || pipe->next_signalled
|| dev->first_signalled_pipe == pipe)
- return; /* already in the list */
+ return 0; /* already in the list */
+
pipe->next_signalled = dev->first_signalled_pipe;
- if (dev->first_signalled_pipe) {
+ if (dev->first_signalled_pipe)
dev->first_signalled_pipe->prev_signalled = pipe;
- }
dev->first_signalled_pipe = pipe;
+
+ return 0;
}
static void signalled_pipes_remove_locked(struct goldfish_pipe_dev *dev,
@@ -511,21 +591,22 @@ static void signalled_pipes_remove_locked(struct goldfish_pipe_dev *dev,
pipe->next_signalled = NULL;
}
-static struct goldfish_pipe *signalled_pipes_pop_front(struct goldfish_pipe_dev *dev,
+static struct goldfish_pipe *signalled_pipes_pop_front(
+ struct goldfish_pipe_dev *dev,
int *wakes)
{
struct goldfish_pipe *pipe;
unsigned long flags;
+
spin_lock_irqsave(&dev->lock, flags);
pipe = dev->first_signalled_pipe;
if (pipe) {
*wakes = pipe->signalled_flags;
pipe->signalled_flags = 0;
- /*
- * This is an optimized version of signalled_pipes_remove_locked() -
- * we want to make it as fast as possible to wake the sleeping pipe
- * operations faster
+ /* This is an optimized version of
+ * signalled_pipes_remove_locked() - we want to make it as fast
+ * as possible to wake the sleeping pipe operations faster.
*/
dev->first_signalled_pipe = pipe->next_signalled;
if (dev->first_signalled_pipe)
@@ -539,11 +620,12 @@ static struct goldfish_pipe *signalled_pipes_pop_front(struct goldfish_pipe_dev
static void goldfish_interrupt_task(unsigned long unused)
{
- struct goldfish_pipe_dev *dev = pipe_dev;
/* Iterate over the signalled pipes and wake them one by one */
struct goldfish_pipe *pipe;
int wakes;
- while ((pipe = signalled_pipes_pop_front(dev, &wakes)) != NULL) {
+
+ while ((pipe = signalled_pipes_pop_front(&goldfish_pipe_dev, &wakes)) !=
+ NULL) {
if (wakes & PIPE_WAKE_CLOSED) {
pipe->flags = 1 << BIT_CLOSED_ON_HOST;
} else {
@@ -553,8 +635,8 @@ static void goldfish_interrupt_task(unsigned long unused)
clear_bit(BIT_WAKE_ON_WRITE, &pipe->flags);
}
/*
- * wake_up_interruptible() implies a write barrier, so don't explicitly
- * add another one here.
+ * wake_up_interruptible() implies a write barrier, so don't
+ * explicitly add another one here.
*/
wake_up_interruptible(&pipe->wake_queue);
}
@@ -580,7 +662,8 @@ static irqreturn_t goldfish_pipe_interrupt(int irq, void *dev_id)
u32 i;
unsigned long flags;
struct goldfish_pipe_dev *dev = dev_id;
- if (dev != pipe_dev)
+
+ if (dev != &goldfish_pipe_dev)
return IRQ_NONE;
/* Request the signalled pipes from the device */
@@ -608,12 +691,16 @@ static irqreturn_t goldfish_pipe_interrupt(int irq, void *dev_id)
static int get_free_pipe_id_locked(struct goldfish_pipe_dev *dev)
{
int id;
+
for (id = 0; id < dev->pipes_capacity; ++id)
if (!dev->pipes[id])
return id;
{
- /* Reallocate the array */
+ /* Reallocate the array.
+ * Since get_free_pipe_id_locked runs with interrupts disabled,
+ * we don't want to make calls that could lead to sleep.
+ */
u32 new_capacity = 2 * dev->pipes_capacity;
struct goldfish_pipe **pipes =
kcalloc(new_capacity, sizeof(*pipes),
@@ -642,13 +729,15 @@ static int get_free_pipe_id_locked(struct goldfish_pipe_dev *dev)
*/
static int goldfish_pipe_open(struct inode *inode, struct file *file)
{
- struct goldfish_pipe_dev *dev = pipe_dev;
+ struct goldfish_pipe_dev *dev = &goldfish_pipe_dev;
+ struct device *pdev_dev;
unsigned long flags;
int id;
int status;
/* Allocate new pipe kernel object */
struct goldfish_pipe *pipe = kzalloc(sizeof(*pipe), GFP_KERNEL);
+
if (pipe == NULL)
return -ENOMEM;
@@ -656,13 +745,16 @@ static int goldfish_pipe_open(struct inode *inode, struct file *file)
mutex_init(&pipe->lock);
init_waitqueue_head(&pipe->wake_queue);
+ pdev_dev = dev->pdev_dev;
+
/*
- * Command buffer needs to be allocated on its own page to make sure it is
- * physically contiguous in host's address space.
+ * Command buffer needs to be allocated on its own page to make sure it
+ * is physically contiguous in host's address space.
*/
pipe->command_buffer =
- (struct goldfish_pipe_command*)__get_free_page(GFP_KERNEL);
+ (struct goldfish_pipe_command *)__get_free_page(GFP_KERNEL);
if (!pipe->command_buffer) {
+ dev_err(pdev_dev, "Could not alloc pipe command buffer!\n");
status = -ENOMEM;
goto err_pipe;
}
@@ -671,6 +763,7 @@ static int goldfish_pipe_open(struct inode *inode, struct file *file)
id = get_free_pipe_id_locked(dev);
if (id < 0) {
+ dev_err(pdev_dev, "Could not get free pipe id!\n");
status = id;
goto err_id_locked;
}
@@ -683,11 +776,18 @@ static int goldfish_pipe_open(struct inode *inode, struct file *file)
dev->buffers->open_command_params.rw_params_max_count =
MAX_BUFFERS_PER_COMMAND;
dev->buffers->open_command_params.command_buffer_ptr =
- (u64)(unsigned long)__pa(pipe->command_buffer);
- status = goldfish_cmd_locked(pipe, PIPE_CMD_OPEN);
+ (u64)__pa(pipe->command_buffer);
+ status = goldfish_pipe_cmd_locked(pipe, PIPE_CMD_OPEN);
spin_unlock_irqrestore(&dev->lock, flags);
- if (status < 0)
+ if (status < 0) {
+ dev_err(pdev_dev,
+ "Could not tell host of new pipe! status=%d\n",
+ status);
goto err_cmd;
+ }
+
+ pipe->dma = NULL;
+
/* All is done, save the pipe into the file's private data field */
file->private_data = pipe;
return 0;
@@ -703,6 +803,55 @@ err_pipe:
return status;
}
+static void goldfish_pipe_dma_release_host(struct goldfish_pipe *pipe)
+{
+ struct goldfish_dma_context *dma = pipe->dma;
+ struct device *pdev_dev;
+
+ if (!dma)
+ return;
+
+ pdev_dev = pipe->dev->pdev_dev;
+
+ if (dma->dma_vaddr) {
+ dev_dbg(pdev_dev, "Last ref for dma region @ 0x%llx\n",
+ dma->phys_begin);
+
+ pipe->command_buffer->dma_maphost_params.dma_paddr =
+ dma->phys_begin;
+ pipe->command_buffer->dma_maphost_params.sz = dma->dma_size;
+ goldfish_pipe_cmd(pipe, PIPE_CMD_DMA_HOST_UNMAP);
+ }
+
+ dev_dbg(pdev_dev,
+ "after delete of dma @ 0x%llx: alloc total %zu\n",
+ dma->phys_begin, pipe->dev->dma_alloc_total);
+}
+
+static void goldfish_pipe_dma_release_guest(struct goldfish_pipe *pipe)
+{
+ struct goldfish_dma_context *dma = pipe->dma;
+ struct device *pdev_dev;
+
+ if (!dma)
+ return;
+
+ pdev_dev = pipe->dev->pdev_dev;
+
+ if (dma->dma_vaddr) {
+ dma_free_coherent(
+ dma->pdev_dev,
+ dma->dma_size,
+ dma->dma_vaddr,
+ dma->phys_begin);
+ pipe->dev->dma_alloc_total -= dma->dma_size;
+
+ dev_dbg(pdev_dev,
+ "after delete of dma @ 0x%llx: alloc total %zu\n",
+ dma->phys_begin, pipe->dev->dma_alloc_total);
+ }
+}
+
static int goldfish_pipe_release(struct inode *inode, struct file *filp)
{
unsigned long flags;
@@ -710,7 +859,8 @@ static int goldfish_pipe_release(struct inode *inode, struct file *filp)
struct goldfish_pipe_dev *dev = pipe->dev;
/* The guest is closing the channel, so tell the emulator right now */
- (void)goldfish_cmd(pipe, PIPE_CMD_CLOSE);
+ goldfish_pipe_dma_release_host(pipe);
+ goldfish_pipe_cmd(pipe, PIPE_CMD_CLOSE);
spin_lock_irqsave(&dev->lock, flags);
dev->pipes[pipe->id] = NULL;
@@ -718,11 +868,273 @@ static int goldfish_pipe_release(struct inode *inode, struct file *filp)
spin_unlock_irqrestore(&dev->lock, flags);
filp->private_data = NULL;
+
+ /* Even if a fd is duped or involved in a forked process,
+ * open/release methods are called only once, ever.
+ * This makes goldfish_pipe_release a safe point
+ * to delete the DMA region.
+ */
+ goldfish_pipe_dma_release_guest(pipe);
+
+ kfree(pipe->dma);
free_page((unsigned long)pipe->command_buffer);
kfree(pipe);
+
return 0;
}
+/* VMA open/close are for debugging purposes only.
+ * One might think that fork() (and thus pure calls to open())
+ * will require some sort of bookkeeping or refcounting
+ * for dma contexts (incl. when to call dma_free_coherent),
+ * but |vm_private_data| field and |vma_open/close| are only
+ * for situations where the driver needs to interact with vma's
+ * directly with its own per-VMA data structure (which does
+ * need to be refcounted).
+ *
+ * Here, we just use the kernel's existing
+ * VMA processing; we don't do anything on our own.
+ * The only reason we would want to do so is if we had to do
+ * special processing for the virtual (not physical) memory
+ * already associated with DMA memory; it is much less related
+ * to the task of knowing when to alloc/dealloc DMA memory.
+ */
+static void goldfish_dma_vma_open(struct vm_area_struct *vma)
+{
+ /* Not used */
+}
+
+static void goldfish_dma_vma_close(struct vm_area_struct *vma)
+{
+ /* Not used */
+}
+
+static const struct vm_operations_struct goldfish_dma_vm_ops = {
+ .open = goldfish_dma_vma_open,
+ .close = goldfish_dma_vma_close,
+};
+
+static bool is_page_size_multiple(unsigned long sz)
+{
+ return !(sz & (PAGE_SIZE - 1));
+}
+
+static bool check_region_size_valid(size_t size)
+{
+ if (size < DMA_REGION_MIN_SIZE)
+ return false;
+
+ if (size > DMA_REGION_MAX_SIZE)
+ return false;
+
+ return is_page_size_multiple(size);
+}
+
+static int goldfish_pipe_dma_alloc_locked(struct goldfish_pipe *pipe)
+{
+ struct goldfish_dma_context *dma = pipe->dma;
+ struct device *pdev_dev = pipe->dev->pdev_dev;
+
+ dev_dbg(pdev_dev, "%s: try alloc dma for pipe %p\n",
+ __func__, pipe);
+
+ if (dma->dma_vaddr) {
+ dev_dbg(pdev_dev, "%s: already alloced, return.\n",
+ __func__);
+ return 0;
+ }
+
+ dma->phys_begin = 0;
+ dma->dma_vaddr =
+ dma_alloc_coherent(
+ dma->pdev_dev,
+ dma->dma_size,
+ &dma->phys_begin,
+ GFP_KERNEL);
+ return -ENOMEM;
+
+ dma->phys_end = dma->phys_begin + dma->dma_size;
+ pipe->dev->dma_alloc_total += dma->dma_size;
+
+ dev_dbg(pdev_dev, "%s: got v/p addrs "
+ "%p 0x%llx sz %zu total alloc %zu\n",
+ __func__,
+ dma->dma_vaddr,
+ dma->phys_begin,
+ dma->dma_size,
+ pipe->dev->dma_alloc_total);
+ pipe->command_buffer->dma_maphost_params.dma_paddr = dma->phys_begin;
+ pipe->command_buffer->dma_maphost_params.sz = dma->dma_size;
+ return goldfish_pipe_cmd_locked(pipe, PIPE_CMD_DMA_HOST_MAP);
+}
+
+static int goldfish_dma_mmap_locked(
+ struct goldfish_pipe *pipe, struct vm_area_struct *vma)
+{
+ struct goldfish_dma_context *dma = pipe->dma;
+ struct device *pdev_dev = pipe->dev->pdev_dev;
+ size_t sz_requested = vma->vm_end - vma->vm_start;
+ int status;
+
+ if (!check_region_size_valid(sz_requested)) {
+ dev_err(pdev_dev, "%s: bad size (%zu) requested\n", __func__,
+ sz_requested);
+ return -EINVAL;
+ }
+
+ dev_dbg(pdev_dev, "Mapping dma at 0x%llx\n", dma->phys_begin);
+
+ /* Alloc phys region if not allocated already. */
+ status = goldfish_pipe_dma_alloc_locked(pipe);
+ if (status)
+ return status;
+
+ status =
+ remap_pfn_range(
+ vma,
+ vma->vm_start,
+ dma->phys_begin >> PAGE_SHIFT,
+ sz_requested,
+ vma->vm_page_prot);
+
+ if (status < 0) {
+ dev_err(pdev_dev, "Cannot remap pfn range....\n");
+ return -EAGAIN;
+ }
+
+ vma->vm_ops = &goldfish_dma_vm_ops;
+ dev_dbg(pdev_dev, "goldfish_dma_mmap for host vaddr 0x%llx succeeded\n",
+ dma->phys_begin);
+
+ return 0;
+}
+
+/* When we call mmap() on a pipe fd, we obtain a pointer into
+ * the physically contiguous DMA region of the pipe device
+ * (Goldfish DMA).
+ */
+static int goldfish_dma_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct goldfish_pipe *pipe =
+ (struct goldfish_pipe *)(filp->private_data);
+ int status;
+
+ if (mutex_lock_interruptible(&pipe->lock))
+ return -ERESTARTSYS;
+
+ status = goldfish_dma_mmap_locked(pipe, vma);
+ mutex_unlock(&pipe->lock);
+ return status;
+
+}
+
+static int goldfish_pipe_dma_create_region(
+ struct goldfish_pipe *pipe, size_t size)
+{
+ struct goldfish_dma_context *dma =
+ kzalloc(sizeof(struct goldfish_dma_context), GFP_KERNEL);
+ struct device *pdev_dev = pipe->dev->pdev_dev;
+
+ if (dma) {
+ if (mutex_lock_interruptible(&pipe->lock)) {
+ kfree(dma);
+ return -ERESTARTSYS;
+ }
+
+ if (pipe->dma) {
+ mutex_unlock(&pipe->lock);
+ kfree(dma);
+ dev_err(pdev_dev, "The DMA region already allocated\n");
+ return -EBUSY;
+ }
+
+ dma->dma_size = size;
+ dma->pdev_dev = pipe->dev->pdev_dev;
+ pipe->dma = dma;
+ mutex_unlock(&pipe->lock);
+ return 0;
+ }
+
+ dev_err(pdev_dev, "Could not allocate DMA context info!\n");
+ return -ENOMEM;
+}
+
+static long goldfish_dma_ioctl_getoff(struct goldfish_pipe *pipe,
+ unsigned long arg)
+{
+ struct device *pdev_dev = pipe->dev->pdev_dev;
+ struct goldfish_dma_ioctl_info ioctl_data;
+ struct goldfish_dma_context *dma;
+
+ BUILD_BUG_ON(FIELD_SIZEOF(struct goldfish_dma_ioctl_info, phys_begin) <
+ FIELD_SIZEOF(struct goldfish_dma_context, phys_begin));
+
+ if (mutex_lock_interruptible(&pipe->lock)) {
+ dev_err(pdev_dev, "DMA_GETOFF: the pipe is not locked\n");
+ return -EACCES;
+ }
+
+ dma = pipe->dma;
+ if (dma) {
+ ioctl_data.phys_begin = dma->phys_begin;
+ ioctl_data.size = dma->dma_size;
+ } else {
+ ioctl_data.phys_begin = 0;
+ ioctl_data.size = 0;
+ }
+
+ if (copy_to_user((void __user *)arg, &ioctl_data,
+ sizeof(ioctl_data))) {
+ mutex_unlock(&pipe->lock);
+ return -EFAULT;
+ }
+
+ dev_dbg(pdev_dev,
+ "DMA_IOC_GETOFF: phys_begin=0x%llx size=%lld\n",
+ ioctl_data.phys_begin, ioctl_data.size);
+
+ mutex_unlock(&pipe->lock);
+ return 0;
+}
+
+static long goldfish_dma_ioctl_create_region(struct goldfish_pipe *pipe,
+ unsigned long arg)
+{
+ struct goldfish_dma_ioctl_info ioctl_data;
+
+ if (copy_from_user(&ioctl_data, (void __user *)arg, sizeof(ioctl_data)))
+ return -EFAULT;
+
+ if (!check_region_size_valid(ioctl_data.size)) {
+ dev_err(pipe->dev->pdev_dev,
+ "DMA_CREATE_REGION: bad size (%lld) requested\n",
+ ioctl_data.size);
+ return -EINVAL;
+ }
+
+ return goldfish_pipe_dma_create_region(pipe, ioctl_data.size);
+}
+
+static long goldfish_dma_ioctl(
+ struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct goldfish_pipe *pipe =
+ (struct goldfish_pipe *)(file->private_data);
+
+ switch (cmd) {
+ case GOLDFISH_DMA_IOC_LOCK:
+ return 0;
+ case GOLDFISH_DMA_IOC_UNLOCK:
+ wake_up_interruptible(&pipe->wake_queue);
+ return 0;
+ case GOLDFISH_DMA_IOC_GETOFF:
+ return goldfish_dma_ioctl_getoff(pipe, arg);
+ case GOLDFISH_DMA_IOC_CREATE_REGION:
+ return goldfish_dma_ioctl_create_region(pipe, arg);
+ }
+ return -ENOTTY;
+}
+
static const struct file_operations goldfish_pipe_fops = {
.owner = THIS_MODULE,
.read = goldfish_pipe_read,
@@ -730,9 +1142,13 @@ static const struct file_operations goldfish_pipe_fops = {
.poll = goldfish_pipe_poll,
.open = goldfish_pipe_open,
.release = goldfish_pipe_release,
+ /* DMA-related operations */
+ .mmap = goldfish_dma_mmap,
+ .unlocked_ioctl = goldfish_dma_ioctl,
+ .compat_ioctl = goldfish_dma_ioctl,
};
-static struct miscdevice goldfish_pipe_dev = {
+static struct miscdevice goldfish_pipe_miscdev = {
.minor = MISC_DYNAMIC_MINOR,
.name = "goldfish_pipe",
.fops = &goldfish_pipe_fops,
@@ -740,69 +1156,76 @@ static struct miscdevice goldfish_pipe_dev = {
static int goldfish_pipe_device_init_v2(struct platform_device *pdev)
{
+ struct goldfish_pipe_dev *dev = &goldfish_pipe_dev;
+ struct device *pdev_dev = &pdev->dev;
char *page;
- struct goldfish_pipe_dev *dev = pipe_dev;
- int err = devm_request_irq(&pdev->dev, dev->irq, goldfish_pipe_interrupt,
+ int err;
+
+ err = devm_request_irq(pdev_dev, dev->irq, goldfish_pipe_interrupt,
IRQF_SHARED, "goldfish_pipe", dev);
if (err) {
- dev_err(&pdev->dev, "unable to allocate IRQ for v2\n");
+ dev_err(pdev_dev, "unable to allocate IRQ for v2\n");
return err;
}
- err = misc_register(&goldfish_pipe_dev);
+ err = misc_register(&goldfish_pipe_miscdev);
if (err) {
- dev_err(&pdev->dev, "unable to register v2 device\n");
+ dev_err(pdev_dev, "unable to register v2 device\n");
return err;
}
+ dev->pdev_dev = pdev_dev;
dev->first_signalled_pipe = NULL;
dev->pipes_capacity = INITIAL_PIPES_CAPACITY;
- dev->pipes = kcalloc(dev->pipes_capacity, sizeof(*dev->pipes), GFP_KERNEL);
+ dev->pipes = kcalloc(dev->pipes_capacity, sizeof(*dev->pipes),
+ GFP_KERNEL);
if (!dev->pipes)
return -ENOMEM;
/*
* We're going to pass two buffers, open_command_params and
* signalled_pipe_buffers, to the host. This means each of those buffers
- * needs to be contained in a single physical page. The easiest choice is
- * to just allocate a page and place the buffers in it.
+ * needs to be contained in a single physical page. The easiest choice
+ * is to just allocate a page and place the buffers in it.
*/
- BUG_ON(sizeof(*dev->buffers) > PAGE_SIZE);
- page = (char*)__get_free_page(GFP_KERNEL);
+ BUILD_BUG_ON(sizeof(*dev->buffers) > PAGE_SIZE);
+ page = (char *)__get_free_page(GFP_KERNEL);
if (!page) {
kfree(dev->pipes);
return -ENOMEM;
}
- dev->buffers = (struct goldfish_pipe_dev_buffers*)page;
+ dev->buffers = (struct goldfish_pipe_dev_buffers *)page;
/* Send the buffer addresses to the host */
- {
- u64 paddr = __pa(&dev->buffers->signalled_pipe_buffers);
- writel((u32)(unsigned long)(paddr >> 32), dev->base + PIPE_REG_SIGNAL_BUFFER_HIGH);
- writel((u32)(unsigned long)paddr, dev->base + PIPE_REG_SIGNAL_BUFFER);
- writel((u32)MAX_SIGNALLED_PIPES, dev->base + PIPE_REG_SIGNAL_BUFFER_COUNT);
+ gf_write_ptr(&dev->buffers->signalled_pipe_buffers,
+ dev->base + PIPE_REG_SIGNAL_BUFFER,
+ dev->base + PIPE_REG_SIGNAL_BUFFER_HIGH);
+
+ writel((u32)MAX_SIGNALLED_PIPES,
+ dev->base + PIPE_REG_SIGNAL_BUFFER_COUNT);
+
+ gf_write_ptr(&dev->buffers->open_command_params,
+ dev->base + PIPE_REG_OPEN_BUFFER,
+ dev->base + PIPE_REG_OPEN_BUFFER_HIGH);
- paddr = __pa(&dev->buffers->open_command_params);
- writel((u32)(unsigned long)(paddr >> 32), dev->base + PIPE_REG_OPEN_BUFFER_HIGH);
- writel((u32)(unsigned long)paddr, dev->base + PIPE_REG_OPEN_BUFFER);
- }
return 0;
}
-static void goldfish_pipe_device_deinit_v2(struct platform_device *pdev) {
- struct goldfish_pipe_dev *dev = pipe_dev;
- misc_deregister(&goldfish_pipe_dev);
- kfree(dev->pipes);
- free_page((unsigned long)dev->buffers);
+static void goldfish_pipe_device_deinit_v2(struct platform_device *pdev)
+{
+ misc_deregister(&goldfish_pipe_miscdev);
+ kfree(goldfish_pipe_dev.pipes);
+ free_page((unsigned long)goldfish_pipe_dev.buffers);
}
static int goldfish_pipe_probe(struct platform_device *pdev)
{
int err;
struct resource *r;
- struct goldfish_pipe_dev *dev = pipe_dev;
+ struct goldfish_pipe_dev *dev = &goldfish_pipe_dev;
+ struct device *pdev_dev = &pdev->dev;
- BUG_ON(sizeof(struct goldfish_pipe_command) > PAGE_SIZE);
+ BUILD_BUG_ON(sizeof(struct goldfish_pipe_command) > PAGE_SIZE);
/* not thread safe, but this should not happen */
WARN_ON(dev->base != NULL);
@@ -811,12 +1234,12 @@ static int goldfish_pipe_probe(struct platform_device *pdev)
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (r == NULL || resource_size(r) < PAGE_SIZE) {
- dev_err(&pdev->dev, "can't allocate i/o page\n");
+ dev_err(pdev_dev, "can't allocate i/o page\n");
return -EINVAL;
}
- dev->base = devm_ioremap(&pdev->dev, r->start, PAGE_SIZE);
+ dev->base = devm_ioremap(pdev_dev, r->start, PAGE_SIZE);
if (dev->base == NULL) {
- dev_err(&pdev->dev, "ioremap failed\n");
+ dev_err(pdev_dev, "ioremap failed\n");
return -EINVAL;
}
@@ -853,7 +1276,8 @@ error:
static int goldfish_pipe_remove(struct platform_device *pdev)
{
- struct goldfish_pipe_dev *dev = pipe_dev;
+ struct goldfish_pipe_dev *dev = &goldfish_pipe_dev;
+
if (dev->version < PIPE_CURRENT_DEVICE_VERSION)
goldfish_pipe_device_deinit_v1(pdev);
else
@@ -886,4 +1310,4 @@ static struct platform_driver goldfish_pipe_driver = {
module_platform_driver(goldfish_pipe_driver);
MODULE_AUTHOR("David Turner <digit@google.com>");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/goldfish/pdev_bus.c b/drivers/platform/goldfish/pdev_bus.c
index dd9ea463c2a4..d97340477cf3 100644
--- a/drivers/platform/goldfish/pdev_bus.c
+++ b/drivers/platform/goldfish/pdev_bus.c
@@ -21,6 +21,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/io.h>
+#include <linux/goldfish.h>
#define PDEV_BUS_OP_DONE (0x00)
#define PDEV_BUS_OP_REMOVE_DEV (0x04)
@@ -130,10 +131,9 @@ static int goldfish_new_pdev(void)
dev->pdev.dev.dma_mask = (void *)(dev->pdev.name + name_len + 1);
*dev->pdev.dev.dma_mask = ~0;
-#ifdef CONFIG_64BIT
- writel((u32)((u64)name>>32), pdev_bus_base + PDEV_BUS_GET_NAME_HIGH);
-#endif
- writel((u32)(unsigned long)name, pdev_bus_base + PDEV_BUS_GET_NAME);
+ gf_write_ptr(name, pdev_bus_base + PDEV_BUS_GET_NAME,
+ pdev_bus_base + PDEV_BUS_GET_NAME_HIGH);
+
name[name_len] = '\0';
dev->pdev.id = readl(pdev_bus_base + PDEV_BUS_ID);
dev->pdev.resource[0].start = base;
diff --git a/drivers/platform/msm/gsi/gsi.c b/drivers/platform/msm/gsi/gsi.c
index 2ae2438032b7..20351dd8a492 100644
--- a/drivers/platform/msm/gsi/gsi.c
+++ b/drivers/platform/msm/gsi/gsi.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1082,13 +1082,14 @@ int gsi_alloc_evt_ring(struct gsi_evt_ring_props *props, unsigned long dev_hdl,
uint32_t val;
struct gsi_evt_ctx *ctx;
int res;
- int ee = gsi_ctx->per.ee;
+ int ee;
unsigned long flags;
if (!gsi_ctx) {
pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
return -GSI_STATUS_NODEV;
}
+ ee = gsi_ctx->per.ee;
if (!props || !evt_ring_hdl || dev_hdl != (uintptr_t)gsi_ctx) {
GSIERR("bad params props=%p dev_hdl=0x%lx evt_ring_hdl=%p\n",
@@ -1605,7 +1606,7 @@ int gsi_alloc_channel(struct gsi_chan_props *props, unsigned long dev_hdl,
struct gsi_chan_ctx *ctx;
uint32_t val;
int res;
- int ee = gsi_ctx->per.ee;
+ int ee;
enum gsi_ch_cmd_opcode op = GSI_CH_ALLOCATE;
uint8_t erindex;
void **user_data;
@@ -1614,6 +1615,7 @@ int gsi_alloc_channel(struct gsi_chan_props *props, unsigned long dev_hdl,
pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
return -GSI_STATUS_NODEV;
}
+ ee = gsi_ctx->per.ee;
if (!props || !chan_hdl || dev_hdl != (uintptr_t)gsi_ctx) {
GSIERR("bad params props=%p dev_hdl=0x%lx chan_hdl=%p\n",
@@ -2208,12 +2210,13 @@ int gsi_query_channel_info(unsigned long chan_hdl,
unsigned long flags;
uint64_t rp;
uint64_t wp;
- int ee = gsi_ctx->per.ee;
+ int ee;
if (!gsi_ctx) {
pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
return -GSI_STATUS_NODEV;
}
+ ee = gsi_ctx->per.ee;
if (chan_hdl >= gsi_ctx->max_ch || !info) {
GSIERR("bad params chan_hdl=%lu info=%p\n", chan_hdl, info);
@@ -2278,12 +2281,13 @@ int gsi_is_channel_empty(unsigned long chan_hdl, bool *is_empty)
unsigned long flags;
uint64_t rp;
uint64_t wp;
- int ee = gsi_ctx->per.ee;
+ int ee;
if (!gsi_ctx) {
pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
return -GSI_STATUS_NODEV;
}
+ ee = gsi_ctx->per.ee;
if (chan_hdl >= gsi_ctx->max_ch || !is_empty) {
GSIERR("bad params chan_hdl=%lu is_empty=%p\n",
@@ -2464,13 +2468,14 @@ int gsi_poll_channel(unsigned long chan_hdl,
{
struct gsi_chan_ctx *ctx;
uint64_t rp;
- int ee = gsi_ctx->per.ee;
+ int ee;
unsigned long flags;
if (!gsi_ctx) {
pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
return -GSI_STATUS_NODEV;
}
+ ee = gsi_ctx->per.ee;
if (chan_hdl >= gsi_ctx->max_ch || !notify) {
GSIERR("bad params chan_hdl=%lu notify=%p\n", chan_hdl, notify);
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
index f135d3977509..1a704ffab07a 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
@@ -432,6 +432,8 @@ static ssize_t ipa_read_hdr(struct file *file, char __user *ubuf, size_t count,
list_for_each_entry(entry, &ipa_ctx->hdr_tbl.head_hdr_entry_list,
link) {
+ if (entry->cookie != IPA_HDR_COOKIE)
+ continue;
nbytes = scnprintf(
dbg_buff,
IPA_MAX_MSG_LEN,
@@ -606,6 +608,14 @@ static int ipa_attrib_dump_eq(struct ipa_ipfltri_rule_eq *attrib)
if (attrib->protocol_eq_present)
pr_err("protocol:%d ", attrib->protocol_eq);
+ if (attrib->num_ihl_offset_range_16 >
+ IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS) {
+ IPAERR_RL("num_ihl_offset_range_16 Max %d passed value %d\n",
+ IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS,
+ attrib->num_ihl_offset_range_16);
+ return -EPERM;
+ }
+
for (i = 0; i < attrib->num_ihl_offset_range_16; i++) {
pr_err(
"(ihl_ofst_range16: ofst:%u lo:%u hi:%u) ",
@@ -614,6 +624,12 @@ static int ipa_attrib_dump_eq(struct ipa_ipfltri_rule_eq *attrib)
attrib->ihl_offset_range_16[i].range_high);
}
+ if (attrib->num_offset_meq_32 > IPA_IPFLTR_NUM_MEQ_32_EQNS) {
+ IPAERR_RL("num_offset_meq_32 Max %d passed value %d\n",
+ IPA_IPFLTR_NUM_MEQ_32_EQNS, attrib->num_offset_meq_32);
+ return -EPERM;
+ }
+
for (i = 0; i < attrib->num_offset_meq_32; i++) {
pr_err(
"(ofst_meq32: ofst:%u mask:0x%x val:0x%x) ",
@@ -635,6 +651,12 @@ static int ipa_attrib_dump_eq(struct ipa_ipfltri_rule_eq *attrib)
attrib->ihl_offset_eq_16.value);
}
+ if (attrib->num_ihl_offset_meq_32 > IPA_IPFLTR_NUM_IHL_MEQ_32_EQNS) {
+ IPAERR_RL("num_ihl_offset_meq_32 Max %d passed value %d\n",
+ IPA_IPFLTR_NUM_IHL_MEQ_32_EQNS, attrib->num_ihl_offset_meq_32);
+ return -EPERM;
+ }
+
for (i = 0; i < attrib->num_ihl_offset_meq_32; i++) {
pr_err(
"(ihl_ofst_meq32: ofts:%d mask:0x%x val:0x%x) ",
@@ -643,6 +665,12 @@ static int ipa_attrib_dump_eq(struct ipa_ipfltri_rule_eq *attrib)
attrib->ihl_offset_meq_32[i].value);
}
+ if (attrib->num_offset_meq_128 > IPA_IPFLTR_NUM_MEQ_128_EQNS) {
+ IPAERR_RL("num_offset_meq_128 Max %d passed value %d\n",
+ IPA_IPFLTR_NUM_MEQ_128_EQNS, attrib->num_offset_meq_128);
+ return -EPERM;
+ }
+
for (i = 0; i < attrib->num_offset_meq_128; i++) {
for (j = 0; j < 16; j++) {
addr[j] = attrib->offset_meq_128[i].value[j];
@@ -812,11 +840,14 @@ static ssize_t ipa_read_flt(struct file *file, char __user *ubuf, size_t count,
u32 rt_tbl_idx;
u32 bitmap;
bool eq;
+ int res = 0;
tbl = &ipa_ctx->glob_flt_tbl[ip];
mutex_lock(&ipa_ctx->lock);
i = 0;
list_for_each_entry(entry, &tbl->head_flt_rule_list, link) {
+ if (entry->cookie != IPA_FLT_COOKIE)
+ continue;
if (entry->rule.eq_attrib_type) {
rt_tbl_idx = entry->rule.rt_tbl_idx;
bitmap = entry->rule.eq_attrib.rule_eq_bitmap;
@@ -835,10 +866,14 @@ static ssize_t ipa_read_flt(struct file *file, char __user *ubuf, size_t count,
i, entry->rule.action, rt_tbl_idx);
pr_err("attrib_mask:%08x retain_hdr:%d eq:%d ",
bitmap, entry->rule.retain_hdr, eq);
- if (eq)
- ipa_attrib_dump_eq(
+ if (eq) {
+ res = ipa_attrib_dump_eq(
&entry->rule.eq_attrib);
- else
+ if (res) {
+ IPAERR_RL("failed read attrib eq\n");
+ goto bail;
+ }
+ } else
ipa_attrib_dump(
&entry->rule.attrib, ip);
i++;
@@ -848,6 +883,8 @@ static ssize_t ipa_read_flt(struct file *file, char __user *ubuf, size_t count,
tbl = &ipa_ctx->flt_tbl[j][ip];
i = 0;
list_for_each_entry(entry, &tbl->head_flt_rule_list, link) {
+ if (entry->cookie != IPA_FLT_COOKIE)
+ continue;
if (entry->rule.eq_attrib_type) {
rt_tbl_idx = entry->rule.rt_tbl_idx;
bitmap = entry->rule.eq_attrib.rule_eq_bitmap;
@@ -867,18 +904,23 @@ static ssize_t ipa_read_flt(struct file *file, char __user *ubuf, size_t count,
pr_err("attrib_mask:%08x retain_hdr:%d ",
bitmap, entry->rule.retain_hdr);
pr_err("eq:%d ", eq);
- if (eq)
- ipa_attrib_dump_eq(
- &entry->rule.eq_attrib);
- else
+ if (eq) {
+ res = ipa_attrib_dump_eq(
+ &entry->rule.eq_attrib);
+ if (res) {
+ IPAERR_RL("failed read attrib eq\n");
+ goto bail;
+ }
+ } else
ipa_attrib_dump(
&entry->rule.attrib, ip);
i++;
}
}
+bail:
mutex_unlock(&ipa_ctx->lock);
- return 0;
+ return res;
}
static ssize_t ipa_read_stats(struct file *file, char __user *ubuf,
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
index 826d449edbd2..66c5366ebde3 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -2115,8 +2115,10 @@ static void ipa_replenish_rx_cache(struct ipa_sys_context *sys)
goto fail_dma_mapping;
}
+ spin_lock_bh(&sys->spinlock);
list_add_tail(&rx_pkt->link, &sys->head_desc_list);
rx_len_cached = ++sys->len;
+ spin_unlock_bh(&sys->spinlock);
ret = sps_transfer_one(sys->ep->ep_hdl,
rx_pkt->data.dma_addr, sys->rx_buff_sz, rx_pkt, 0);
@@ -2130,8 +2132,10 @@ static void ipa_replenish_rx_cache(struct ipa_sys_context *sys)
return;
fail_sps_transfer:
+ spin_lock_bh(&sys->spinlock);
list_del(&rx_pkt->link);
rx_len_cached = --sys->len;
+ spin_unlock_bh(&sys->spinlock);
dma_unmap_single(ipa_ctx->pdev, rx_pkt->data.dma_addr,
sys->rx_buff_sz, DMA_FROM_DEVICE);
fail_dma_mapping:
@@ -2171,8 +2175,10 @@ static void ipa_replenish_rx_cache_recycle(struct ipa_sys_context *sys)
goto fail_dma_mapping;
}
+ spin_lock_bh(&sys->spinlock);
list_add_tail(&rx_pkt->link, &sys->head_desc_list);
rx_len_cached = ++sys->len;
+ spin_unlock_bh(&sys->spinlock);
ret = sps_transfer_one(sys->ep->ep_hdl,
rx_pkt->data.dma_addr, sys->rx_buff_sz, rx_pkt, 0);
@@ -2185,9 +2191,11 @@ static void ipa_replenish_rx_cache_recycle(struct ipa_sys_context *sys)
return;
fail_sps_transfer:
+ spin_lock_bh(&sys->spinlock);
rx_len_cached = --sys->len;
list_del(&rx_pkt->link);
INIT_LIST_HEAD(&rx_pkt->link);
+ spin_unlock_bh(&sys->spinlock);
dma_unmap_single(ipa_ctx->pdev, rx_pkt->data.dma_addr,
sys->rx_buff_sz, DMA_FROM_DEVICE);
fail_dma_mapping:
@@ -2219,7 +2227,9 @@ static void ipa_fast_replenish_rx_cache(struct ipa_sys_context *sys)
}
rx_pkt = sys->repl.cache[curr];
+ spin_lock_bh(&sys->spinlock);
list_add_tail(&rx_pkt->link, &sys->head_desc_list);
+ spin_unlock_bh(&sys->spinlock);
ret = sps_transfer_one(sys->ep->ep_hdl,
rx_pkt->data.dma_addr, sys->rx_buff_sz, rx_pkt, 0);
@@ -2278,6 +2288,7 @@ static void ipa_cleanup_rx(struct ipa_sys_context *sys)
u32 head;
u32 tail;
+ spin_lock_bh(&sys->spinlock);
list_for_each_entry_safe(rx_pkt, r,
&sys->head_desc_list, link) {
list_del(&rx_pkt->link);
@@ -2295,6 +2306,7 @@ static void ipa_cleanup_rx(struct ipa_sys_context *sys)
sys->free_skb(rx_pkt->data.skb);
kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
}
+ spin_unlock_bh(&sys->spinlock);
if (sys->repl.cache) {
head = atomic_read(&sys->repl.head_idx);
@@ -2970,6 +2982,7 @@ static void ipa_wq_rx_common(struct ipa_sys_context *sys, u32 size)
struct ipa_rx_pkt_wrapper *rx_pkt_expected;
struct sk_buff *rx_skb;
+ spin_lock_bh(&sys->spinlock);
if (unlikely(list_empty(&sys->head_desc_list))) {
WARN_ON(1);
return;
@@ -2979,6 +2992,7 @@ static void ipa_wq_rx_common(struct ipa_sys_context *sys, u32 size)
link);
list_del(&rx_pkt_expected->link);
sys->len--;
+ spin_unlock_bh(&sys->spinlock);
if (size)
rx_pkt_expected->len = size;
rx_skb = rx_pkt_expected->data.skb;
@@ -2999,6 +3013,7 @@ static void ipa_wlan_wq_rx_common(struct ipa_sys_context *sys, u32 size)
struct ipa_rx_pkt_wrapper *rx_pkt_expected;
struct sk_buff *rx_skb;
+ spin_lock_bh(&sys->spinlock);
if (unlikely(list_empty(&sys->head_desc_list))) {
WARN_ON(1);
return;
@@ -3008,6 +3023,7 @@ static void ipa_wlan_wq_rx_common(struct ipa_sys_context *sys, u32 size)
link);
list_del(&rx_pkt_expected->link);
sys->len--;
+ spin_unlock_bh(&sys->spinlock);
if (size)
rx_pkt_expected->len = size;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c b/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c
index e7092e9acbc7..1be68b31656b 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -327,6 +327,11 @@ int ipa2_nat_init_cmd(struct ipa_ioc_v4_nat_init *init)
size_t tmp;
gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
+ if (!ipa_ctx->nat_mem.is_dev_init) {
+ IPAERR_RL("Nat table not initialized\n");
+ return -EPERM;
+ }
+
IPADBG("\n");
if (init->table_entries == 0) {
IPADBG("Table entries is zero\n");
@@ -572,6 +577,11 @@ int ipa2_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
int ret = 0;
gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
+ if (!ipa_ctx->nat_mem.is_dev_init) {
+ IPAERR_RL("Nat table not initialized\n");
+ return -EPERM;
+ }
+
IPADBG("\n");
if (dma->entries <= 0) {
IPAERR_RL("Invalid number of commands %d\n",
@@ -758,6 +768,16 @@ int ipa2_nat_del_cmd(struct ipa_ioc_v4_nat_del *del)
int result;
gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
+ if (!ipa_ctx->nat_mem.is_dev_init) {
+ IPAERR_RL("Nat table not initialized\n");
+ return -EPERM;
+ }
+
+ if (ipa_ctx->nat_mem.public_ip_addr) {
+ IPAERR_RL("Public IP addr not assigned and trying to delete\n");
+ return -EPERM;
+ }
+
IPADBG("\n");
if (ipa_ctx->nat_mem.is_tmp_mem) {
IPAERR("using temp memory during nat del\n");
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
index 4a68b96ca89a..7cc3c380ee71 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
@@ -51,6 +51,7 @@ int __ipa_generate_rt_hw_rule_v2(enum ipa_ip_type ip,
u32 tmp[IPA_RT_FLT_HW_RULE_BUF_SIZE/4];
u8 *start;
int pipe_idx;
+ struct ipa_hdr_entry *hdr_entry;
if (buf == NULL) {
memset(tmp, 0, (IPA_RT_FLT_HW_RULE_BUF_SIZE/4));
@@ -74,6 +75,18 @@ int __ipa_generate_rt_hw_rule_v2(enum ipa_ip_type ip,
}
rule_hdr->u.hdr.pipe_dest_idx = pipe_idx;
rule_hdr->u.hdr.system = !ipa_ctx->hdr_tbl_lcl;
+
+ /* Adding check to confirm still
+ * header entry present in header table or not
+ */
+
+ if (entry->hdr) {
+ hdr_entry = ipa_id_find(entry->rule.hdr_hdl);
+ if (!hdr_entry || hdr_entry->cookie != IPA_HDR_COOKIE) {
+ IPAERR_RL("Header entry already deleted\n");
+ return -EPERM;
+ }
+ }
if (entry->hdr) {
if (entry->hdr->cookie == IPA_HDR_COOKIE) {
rule_hdr->u.hdr.hdr_offset =
@@ -140,6 +153,8 @@ int __ipa_generate_rt_hw_rule_v2_5(enum ipa_ip_type ip,
u32 tmp[IPA_RT_FLT_HW_RULE_BUF_SIZE/4];
u8 *start;
int pipe_idx;
+ struct ipa_hdr_entry *hdr_entry;
+ struct ipa_hdr_proc_ctx_entry *hdr_proc_entry;
if (buf == NULL) {
memset(tmp, 0, IPA_RT_FLT_HW_RULE_BUF_SIZE);
@@ -162,6 +177,24 @@ int __ipa_generate_rt_hw_rule_v2_5(enum ipa_ip_type ip,
return -EPERM;
}
rule_hdr->u.hdr_v2_5.pipe_dest_idx = pipe_idx;
+ /* Adding check to confirm still
+ * header entry present in header table or not
+ */
+
+ if (entry->hdr) {
+ hdr_entry = ipa_id_find(entry->rule.hdr_hdl);
+ if (!hdr_entry || hdr_entry->cookie != IPA_HDR_COOKIE) {
+ IPAERR_RL("Header entry already deleted\n");
+ return -EPERM;
+ }
+ } else if (entry->proc_ctx) {
+ hdr_proc_entry = ipa_id_find(entry->rule.hdr_proc_ctx_hdl);
+ if (!hdr_proc_entry ||
+ hdr_proc_entry->cookie != IPA_PROC_HDR_COOKIE) {
+ IPAERR_RL("Proc header entry already deleted\n");
+ return -EPERM;
+ }
+ }
if (entry->proc_ctx || (entry->hdr && entry->hdr->is_hdr_proc_ctx)) {
struct ipa_hdr_proc_ctx_entry *proc_ctx;
@@ -1130,6 +1163,8 @@ int __ipa_del_rt_rule(u32 rule_hdl)
{
struct ipa_rt_entry *entry;
int id;
+ struct ipa_hdr_entry *hdr_entry;
+ struct ipa_hdr_proc_ctx_entry *hdr_proc_entry;
entry = ipa_id_find(rule_hdl);
@@ -1151,6 +1186,24 @@ int __ipa_del_rt_rule(u32 rule_hdl)
return -EINVAL;
}
}
+ /* Adding check to confirm still
+ * header entry present in header table or not
+ */
+
+ if (entry->hdr) {
+ hdr_entry = ipa_id_find(entry->rule.hdr_hdl);
+ if (!hdr_entry || hdr_entry->cookie != IPA_HDR_COOKIE) {
+ IPAERR_RL("Header entry already deleted\n");
+ return -EINVAL;
+ }
+ } else if (entry->proc_ctx) {
+ hdr_proc_entry = ipa_id_find(entry->rule.hdr_proc_ctx_hdl);
+ if (!hdr_proc_entry ||
+ hdr_proc_entry->cookie != IPA_PROC_HDR_COOKIE) {
+ IPAERR_RL("Proc header entry already deleted\n");
+ return -EINVAL;
+ }
+ }
if (entry->hdr)
__ipa_release_hdr(entry->hdr->id);
@@ -1463,6 +1516,7 @@ static int __ipa_mdfy_rt_rule(struct ipa_rt_rule_mdfy *rtrule)
{
struct ipa_rt_entry *entry;
struct ipa_hdr_entry *hdr = NULL;
+ struct ipa_hdr_entry *hdr_entry;
if (rtrule->rule.hdr_hdl) {
hdr = ipa_id_find(rtrule->rule.hdr_hdl);
@@ -1483,6 +1537,17 @@ static int __ipa_mdfy_rt_rule(struct ipa_rt_rule_mdfy *rtrule)
goto error;
}
+ /* Adding check to confirm still
+ * header entry present in header table or not
+ */
+
+ if (entry->hdr) {
+ hdr_entry = ipa_id_find(entry->rule.hdr_hdl);
+ if (!hdr_entry || hdr_entry->cookie != IPA_HDR_COOKIE) {
+ IPAERR_RL("Header entry already deleted\n");
+ return -EPERM;
+ }
+ }
if (entry->hdr)
entry->hdr->ref_cnt--;
diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
index 039a8b6a50b5..3defc03c2571 100644
--- a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
@@ -1432,6 +1432,8 @@ static int ipa_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
/* Extended IOCTLs */
case RMNET_IOCTL_EXTENDED:
+ if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN))
+ return -EPERM;
IPAWANDBG("get ioctl: RMNET_IOCTL_EXTENDED\n");
if (copy_from_user(&extend_ioctl_data,
(u8 *)ifr->ifr_ifru.ifru_data,
@@ -2869,7 +2871,7 @@ int rmnet_ipa_query_tethering_stats_modem(
if (reset) {
req->reset_stats_valid = true;
req->reset_stats = true;
- IPAWANERR("reset the pipe stats\n");
+ IPAWANDBG("reset the pipe stats\n");
} else {
/* print tethered-client enum */
IPAWANDBG("Tethered-client enum(%d)\n", data->ipa_client);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
index 6c8b3573465d..eb9a6877c39f 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
@@ -357,6 +357,8 @@ static ssize_t ipa3_read_hdr(struct file *file, char __user *ubuf, size_t count,
list_for_each_entry(entry, &ipa3_ctx->hdr_tbl.head_hdr_entry_list,
link) {
+ if (entry->cookie != IPA_HDR_COOKIE)
+ continue;
nbytes = scnprintf(
dbg_buff,
IPA_MAX_MSG_LEN,
@@ -540,6 +542,12 @@ static int ipa3_attrib_dump_eq(struct ipa_ipfltri_rule_eq *attrib)
if (attrib->tc_eq_present)
pr_err("tc:%d ", attrib->tc_eq);
+ if (attrib->num_offset_meq_128 > IPA_IPFLTR_NUM_MEQ_128_EQNS) {
+ IPAERR_RL("num_offset_meq_128 Max %d passed value %d\n",
+ IPA_IPFLTR_NUM_MEQ_128_EQNS, attrib->num_offset_meq_128);
+ return -EPERM;
+ }
+
for (i = 0; i < attrib->num_offset_meq_128; i++) {
for (j = 0; j < 16; j++) {
addr[j] = attrib->offset_meq_128[i].value[j];
@@ -551,6 +559,12 @@ static int ipa3_attrib_dump_eq(struct ipa_ipfltri_rule_eq *attrib)
mask, addr);
}
+ if (attrib->num_offset_meq_32 > IPA_IPFLTR_NUM_MEQ_32_EQNS) {
+ IPAERR_RL("num_offset_meq_32 Max %d passed value %d\n",
+ IPA_IPFLTR_NUM_MEQ_32_EQNS, attrib->num_offset_meq_32);
+ return -EPERM;
+ }
+
for (i = 0; i < attrib->num_offset_meq_32; i++)
pr_err(
"(ofst_meq32: ofst:%u mask:0x%x val:0x%x) ",
@@ -558,6 +572,12 @@ static int ipa3_attrib_dump_eq(struct ipa_ipfltri_rule_eq *attrib)
attrib->offset_meq_32[i].mask,
attrib->offset_meq_32[i].value);
+ if (attrib->num_ihl_offset_meq_32 > IPA_IPFLTR_NUM_IHL_MEQ_32_EQNS) {
+ IPAERR_RL("num_ihl_offset_meq_32 Max %d passed value %d\n",
+ IPA_IPFLTR_NUM_IHL_MEQ_32_EQNS, attrib->num_ihl_offset_meq_32);
+ return -EPERM;
+ }
+
for (i = 0; i < attrib->num_ihl_offset_meq_32; i++)
pr_err(
"(ihl_ofst_meq32: ofts:%d mask:0x%x val:0x%x) ",
@@ -572,6 +592,14 @@ static int ipa3_attrib_dump_eq(struct ipa_ipfltri_rule_eq *attrib)
attrib->metadata_meq32.mask,
attrib->metadata_meq32.value);
+ if (attrib->num_ihl_offset_range_16 >
+ IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS) {
+ IPAERR_RL("num_ihl_offset_range_16 Max %d passed value %d\n",
+ IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS,
+ attrib->num_ihl_offset_range_16);
+ return -EPERM;
+ }
+
for (i = 0; i < attrib->num_ihl_offset_range_16; i++)
pr_err(
"(ihl_ofst_range16: ofst:%u lo:%u hi:%u) ",
@@ -764,7 +792,11 @@ static ssize_t ipa3_read_rt_hw(struct file *file, char __user *ubuf,
pr_err("rule_id:%u prio:%u retain_hdr:%u ",
rules[rl].id, rules[rl].priority,
rules[rl].retain_hdr);
- ipa3_attrib_dump_eq(&rules[rl].eq_attrib);
+ res = ipa3_attrib_dump_eq(&rules[rl].eq_attrib);
+ if (res) {
+ IPAERR_RL("failed read attrib eq\n");
+ goto bail;
+ }
}
pr_err("=== Routing Table %d = Non-Hashable Rules ===\n", tbl);
@@ -795,7 +827,11 @@ static ssize_t ipa3_read_rt_hw(struct file *file, char __user *ubuf,
pr_err("rule_id:%u prio:%u retain_hdr:%u\n",
rules[rl].id, rules[rl].priority,
rules[rl].retain_hdr);
- ipa3_attrib_dump_eq(&rules[rl].eq_attrib);
+ res = ipa3_attrib_dump_eq(&rules[rl].eq_attrib);
+ if (res) {
+ IPAERR_RL("failed read attrib eq\n");
+ goto bail;
+ }
}
pr_err("\n");
}
@@ -869,6 +905,7 @@ static ssize_t ipa3_read_flt(struct file *file, char __user *ubuf, size_t count,
u32 rt_tbl_idx;
u32 bitmap;
bool eq;
+ int res = 0;
mutex_lock(&ipa3_ctx->lock);
@@ -878,6 +915,8 @@ static ssize_t ipa3_read_flt(struct file *file, char __user *ubuf, size_t count,
tbl = &ipa3_ctx->flt_tbl[j][ip];
i = 0;
list_for_each_entry(entry, &tbl->head_flt_rule_list, link) {
+ if (entry->cookie != IPA_FLT_COOKIE)
+ continue;
if (entry->rule.eq_attrib_type) {
rt_tbl_idx = entry->rule.rt_tbl_idx;
bitmap = entry->rule.eq_attrib.rule_eq_bitmap;
@@ -899,18 +938,23 @@ static ssize_t ipa3_read_flt(struct file *file, char __user *ubuf, size_t count,
pr_err("hashable:%u rule_id:%u max_prio:%u prio:%u ",
entry->rule.hashable, entry->rule_id,
entry->rule.max_prio, entry->prio);
- if (eq)
- ipa3_attrib_dump_eq(
+ if (eq) {
+ res = ipa3_attrib_dump_eq(
&entry->rule.eq_attrib);
- else
+ if (res) {
+ IPAERR_RL("failed read attrib eq\n");
+ goto bail;
+ }
+ } else
ipa3_attrib_dump(
&entry->rule.attrib, ip);
i++;
}
}
+bail:
mutex_unlock(&ipa3_ctx->lock);
- return 0;
+ return res;
}
static ssize_t ipa3_read_flt_hw(struct file *file, char __user *ubuf,
@@ -961,7 +1005,11 @@ static ssize_t ipa3_read_flt_hw(struct file *file, char __user *ubuf,
bitmap, rules[rl].rule.retain_hdr);
pr_err("rule_id:%u prio:%u ",
rules[rl].id, rules[rl].priority);
- ipa3_attrib_dump_eq(&rules[rl].rule.eq_attrib);
+ res = ipa3_attrib_dump_eq(&rules[rl].rule.eq_attrib);
+ if (res) {
+ IPAERR_RL("failed read attrib eq\n");
+ goto bail;
+ }
}
pr_err("=== Filtering Table ep:%d = Non-Hashable Rules ===\n",
@@ -985,7 +1033,11 @@ static ssize_t ipa3_read_flt_hw(struct file *file, char __user *ubuf,
bitmap, rules[rl].rule.retain_hdr);
pr_err("rule_id:%u prio:%u ",
rules[rl].id, rules[rl].priority);
- ipa3_attrib_dump_eq(&rules[rl].rule.eq_attrib);
+ res = ipa3_attrib_dump_eq(&rules[rl].rule.eq_attrib);
+ if (res) {
+ IPAERR_RL("failed read attrib eq\n");
+ goto bail;
+ }
}
pr_err("\n");
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
index ced8c8b2d3ab..128b859ee152 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -61,8 +61,10 @@ static int ipa3_generate_flt_hw_rule(enum ipa_ip_type ip,
gen_params.rule = (const struct ipa_flt_rule *)&entry->rule;
res = ipahal_flt_generate_hw_rule(&gen_params, &entry->hw_len, buf);
- if (res)
- IPAERR("failed to generate flt h/w rule\n");
+ if (res) {
+ IPAERR_RL("failed to generate flt h/w rule\n");
+ return res;
+ }
return 0;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
index a78a0a608cb4..5f9cfc208854 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -793,6 +793,12 @@ int ipa3_nat_del_cmd(struct ipa_ioc_v4_nat_del *del)
}
memset(&desc, 0, sizeof(desc));
+
+ if (!ipa3_ctx->nat_mem.is_dev_init) {
+ IPAERR_RL("NAT hasn't been initialized\n");
+ return -EPERM;
+ }
+
/* NO-OP IC for ensuring that IPA pipeline is empty */
nop_cmd_pyld =
ipahal_construct_nop_imm_cmd(false, IPAHAL_HPS_CLEAR, false);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
index a10e2cb9ce8b..fd455f72e09e 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
@@ -50,6 +50,8 @@ static int ipa_generate_rt_hw_rule(enum ipa_ip_type ip,
struct ipa3_rt_entry *entry, u8 *buf)
{
struct ipahal_rt_rule_gen_params gen_params;
+ struct ipa3_hdr_entry *hdr_entry;
+ struct ipa3_hdr_proc_ctx_entry *hdr_proc_entry;
int res = 0;
memset(&gen_params, 0, sizeof(gen_params));
@@ -69,6 +71,25 @@ static int ipa_generate_rt_hw_rule(enum ipa_ip_type ip,
return -EPERM;
}
+ /* Adding check to confirm still
+ * header entry present in header table or not
+ */
+
+ if (entry->hdr) {
+ hdr_entry = ipa3_id_find(entry->rule.hdr_hdl);
+ if (!hdr_entry || hdr_entry->cookie != IPA_HDR_COOKIE) {
+ IPAERR_RL("Header entry already deleted\n");
+ return -EPERM;
+ }
+ } else if (entry->proc_ctx) {
+ hdr_proc_entry = ipa3_id_find(entry->rule.hdr_proc_ctx_hdl);
+ if (!hdr_proc_entry ||
+ hdr_proc_entry->cookie != IPA_PROC_HDR_COOKIE) {
+ IPAERR_RL("Proc header entry already deleted\n");
+ return -EPERM;
+ }
+ }
+
if (entry->proc_ctx || (entry->hdr && entry->hdr->is_hdr_proc_ctx)) {
struct ipa3_hdr_proc_ctx_entry *proc_ctx;
proc_ctx = (entry->proc_ctx) ? : entry->hdr->proc_ctx;
@@ -1268,6 +1289,8 @@ int __ipa3_del_rt_rule(u32 rule_hdl)
{
struct ipa3_rt_entry *entry;
int id;
+ struct ipa3_hdr_entry *hdr_entry;
+ struct ipa3_hdr_proc_ctx_entry *hdr_proc_entry;
entry = ipa3_id_find(rule_hdl);
@@ -1290,6 +1313,25 @@ int __ipa3_del_rt_rule(u32 rule_hdl)
}
}
+ /* Adding check to confirm still
+ * header entry present in header table or not
+ */
+
+ if (entry->hdr) {
+ hdr_entry = ipa3_id_find(entry->rule.hdr_hdl);
+ if (!hdr_entry || hdr_entry->cookie != IPA_HDR_COOKIE) {
+ IPAERR_RL("Header entry already deleted\n");
+ return -EINVAL;
+ }
+ } else if (entry->proc_ctx) {
+ hdr_proc_entry = ipa3_id_find(entry->rule.hdr_proc_ctx_hdl);
+ if (!hdr_proc_entry ||
+ hdr_proc_entry->cookie != IPA_PROC_HDR_COOKIE) {
+ IPAERR_RL("Proc header entry already deleted\n");
+ return -EINVAL;
+ }
+ }
+
if (entry->hdr)
__ipa3_release_hdr(entry->hdr->id);
else if (entry->proc_ctx)
@@ -1609,7 +1651,8 @@ static int __ipa_mdfy_rt_rule(struct ipa_rt_rule_mdfy *rtrule)
struct ipa3_rt_entry *entry;
struct ipa3_hdr_entry *hdr = NULL;
struct ipa3_hdr_proc_ctx_entry *proc_ctx = NULL;
-
+ struct ipa3_hdr_entry *hdr_entry;
+ struct ipa3_hdr_proc_ctx_entry *hdr_proc_entry;
if (rtrule->rule.hdr_hdl) {
hdr = ipa3_id_find(rtrule->rule.hdr_hdl);
if ((hdr == NULL) || (hdr->cookie != IPA_HDR_COOKIE)) {
@@ -1636,6 +1679,25 @@ static int __ipa_mdfy_rt_rule(struct ipa_rt_rule_mdfy *rtrule)
goto error;
}
+ /* Adding check to confirm still
+ * header entry present in header table or not
+ */
+
+ if (entry->hdr) {
+ hdr_entry = ipa3_id_find(entry->rule.hdr_hdl);
+ if (!hdr_entry || hdr_entry->cookie != IPA_HDR_COOKIE) {
+ IPAERR_RL("Header entry already deleted\n");
+ return -EPERM;
+ }
+ } else if (entry->proc_ctx) {
+ hdr_proc_entry = ipa3_id_find(entry->rule.hdr_proc_ctx_hdl);
+ if (!hdr_proc_entry ||
+ hdr_proc_entry->cookie != IPA_PROC_HDR_COOKIE) {
+ IPAERR_RL("Proc header entry already deleted\n");
+ return -EPERM;
+ }
+ }
+
if (entry->hdr)
entry->hdr->ref_cnt--;
if (entry->proc_ctx)
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index c9e5a46c08f0..900f5077e901 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -1566,6 +1566,8 @@ static int ipa3_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
/* Extended IOCTLs */
case RMNET_IOCTL_EXTENDED:
+ if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN))
+ return -EPERM;
IPAWANDBG("get ioctl: RMNET_IOCTL_EXTENDED\n");
if (copy_from_user(&extend_ioctl_data,
(u8 *)ifr->ifr_ifru.ifru_data,
@@ -3013,7 +3015,7 @@ static int rmnet_ipa3_query_tethering_stats_modem(
if (reset) {
req->reset_stats_valid = true;
req->reset_stats = true;
- IPAWANERR("reset the pipe stats\n");
+ IPAWANDBG("reset the pipe stats\n");
} else {
/* print tethered-client enum */
IPAWANDBG("Tethered-client enum(%d)\n", data->ipa_client);
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 1089eaa02b00..988ebe9a6b90 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -95,6 +95,7 @@ config DELL_LAPTOP
tristate "Dell Laptop Extras"
depends on X86
depends on DCDBAS
+ depends on DMI
depends on BACKLIGHT_CLASS_DEVICE
depends on ACPI_VIDEO || ACPI_VIDEO = n
depends on RFKILL || RFKILL = n
@@ -110,6 +111,7 @@ config DELL_LAPTOP
config DELL_WMI
tristate "Dell WMI extras"
depends on ACPI_WMI
+ depends on DMI
depends on INPUT
depends on ACPI_VIDEO || ACPI_VIDEO = n
select INPUT_SPARSEKMAP
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index a3661cc44f86..0e0403e024c5 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -101,6 +101,15 @@ static const struct dmi_system_id asus_quirks[] = {
},
{
.callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. X302UA",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X302UA"),
+ },
+ .driver_data = &quirk_asus_wapf4,
+ },
+ {
+ .callback = dmi_matched,
.ident = "ASUSTeK COMPUTER INC. X401U",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
diff --git a/drivers/platform/x86/intel_mid_thermal.c b/drivers/platform/x86/intel_mid_thermal.c
index 5c768c4627d3..78e1bfee698a 100644
--- a/drivers/platform/x86/intel_mid_thermal.c
+++ b/drivers/platform/x86/intel_mid_thermal.c
@@ -415,6 +415,7 @@ static struct thermal_device_info *initialize_sensor(int index)
return td_info;
}
+#ifdef CONFIG_PM_SLEEP
/**
* mid_thermal_resume - resume routine
* @dev: device structure
@@ -442,6 +443,7 @@ static int mid_thermal_suspend(struct device *dev)
*/
return configure_adc(0);
}
+#endif
static SIMPLE_DEV_PM_OPS(mid_thermal_pm,
mid_thermal_suspend, mid_thermal_resume);
diff --git a/drivers/platform/x86/tc1100-wmi.c b/drivers/platform/x86/tc1100-wmi.c
index 89aa976f0ab2..65b0a4845ddd 100644
--- a/drivers/platform/x86/tc1100-wmi.c
+++ b/drivers/platform/x86/tc1100-wmi.c
@@ -52,7 +52,9 @@ struct tc1100_data {
u32 jogdial;
};
+#ifdef CONFIG_PM
static struct tc1100_data suspend_data;
+#endif
/* --------------------------------------------------------------------------
Device Management
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 91fdeaf67037..8017e9a05eb2 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -159,6 +159,7 @@ config BATTERY_SBS
config BATTERY_BQ27XXX
tristate "BQ27xxx battery driver"
+ depends on I2C || I2C=n
help
Say Y here to enable support for batteries with BQ27xxx (I2C/HDQ) chips.
diff --git a/drivers/power/bq27xxx_battery.c b/drivers/power/bq27xxx_battery.c
index 880233ce9343..6c3a447f378b 100644
--- a/drivers/power/bq27xxx_battery.c
+++ b/drivers/power/bq27xxx_battery.c
@@ -285,7 +285,7 @@ static u8 bq27421_regs[] = {
0x18, /* AP */
};
-static u8 *bq27xxx_regs[] = {
+static u8 *bq27xxx_regs[] __maybe_unused = {
[BQ27000] = bq27000_regs,
[BQ27010] = bq27010_regs,
[BQ27500] = bq27500_regs,
@@ -991,7 +991,7 @@ static void bq27xxx_external_power_changed(struct power_supply *psy)
schedule_delayed_work(&di->work, 0);
}
-static int bq27xxx_powersupply_init(struct bq27xxx_device_info *di,
+static int __maybe_unused bq27xxx_powersupply_init(struct bq27xxx_device_info *di,
const char *name)
{
int ret;
@@ -1026,7 +1026,7 @@ static int bq27xxx_powersupply_init(struct bq27xxx_device_info *di,
return 0;
}
-static void bq27xxx_powersupply_unregister(struct bq27xxx_device_info *di)
+static void __maybe_unused bq27xxx_powersupply_unregister(struct bq27xxx_device_info *di)
{
/*
* power_supply_unregister call bq27xxx_battery_get_property which
diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
index dfe1ee89f7c7..922a86787c5c 100644
--- a/drivers/power/pda_power.c
+++ b/drivers/power/pda_power.c
@@ -30,9 +30,9 @@ static inline unsigned int get_irq_flags(struct resource *res)
static struct device *dev;
static struct pda_power_pdata *pdata;
static struct resource *ac_irq, *usb_irq;
-static struct timer_list charger_timer;
-static struct timer_list supply_timer;
-static struct timer_list polling_timer;
+static struct delayed_work charger_work;
+static struct delayed_work polling_work;
+static struct delayed_work supply_work;
static int polling;
static struct power_supply *pda_psy_ac, *pda_psy_usb;
@@ -140,7 +140,7 @@ static void update_charger(void)
}
}
-static void supply_timer_func(unsigned long unused)
+static void supply_work_func(struct work_struct *work)
{
if (ac_status == PDA_PSY_TO_CHANGE) {
ac_status = new_ac_status;
@@ -161,11 +161,12 @@ static void psy_changed(void)
* Okay, charger set. Now wait a bit before notifying supplicants,
* charge power should stabilize.
*/
- mod_timer(&supply_timer,
- jiffies + msecs_to_jiffies(pdata->wait_for_charger));
+ cancel_delayed_work(&supply_work);
+ schedule_delayed_work(&supply_work,
+ msecs_to_jiffies(pdata->wait_for_charger));
}
-static void charger_timer_func(unsigned long unused)
+static void charger_work_func(struct work_struct *work)
{
update_status();
psy_changed();
@@ -184,13 +185,14 @@ static irqreturn_t power_changed_isr(int irq, void *power_supply)
* Wait a bit before reading ac/usb line status and setting charger,
* because ac/usb status readings may lag from irq.
*/
- mod_timer(&charger_timer,
- jiffies + msecs_to_jiffies(pdata->wait_for_status));
+ cancel_delayed_work(&charger_work);
+ schedule_delayed_work(&charger_work,
+ msecs_to_jiffies(pdata->wait_for_status));
return IRQ_HANDLED;
}
-static void polling_timer_func(unsigned long unused)
+static void polling_work_func(struct work_struct *work)
{
int changed = 0;
@@ -211,8 +213,9 @@ static void polling_timer_func(unsigned long unused)
if (changed)
psy_changed();
- mod_timer(&polling_timer,
- jiffies + msecs_to_jiffies(pdata->polling_interval));
+ cancel_delayed_work(&polling_work);
+ schedule_delayed_work(&polling_work,
+ msecs_to_jiffies(pdata->polling_interval));
}
#if IS_ENABLED(CONFIG_USB_PHY)
@@ -250,8 +253,9 @@ static int otg_handle_notification(struct notifier_block *nb,
* Wait a bit before reading ac/usb line status and setting charger,
* because ac/usb status readings may lag from irq.
*/
- mod_timer(&charger_timer,
- jiffies + msecs_to_jiffies(pdata->wait_for_status));
+ cancel_delayed_work(&charger_work);
+ schedule_delayed_work(&charger_work,
+ msecs_to_jiffies(pdata->wait_for_status));
return NOTIFY_OK;
}
@@ -300,8 +304,8 @@ static int pda_power_probe(struct platform_device *pdev)
if (!pdata->ac_max_uA)
pdata->ac_max_uA = 500000;
- setup_timer(&charger_timer, charger_timer_func, 0);
- setup_timer(&supply_timer, supply_timer_func, 0);
+ INIT_DELAYED_WORK(&charger_work, charger_work_func);
+ INIT_DELAYED_WORK(&supply_work, supply_work_func);
ac_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "ac");
usb_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "usb");
@@ -385,9 +389,10 @@ static int pda_power_probe(struct platform_device *pdev)
if (polling) {
dev_dbg(dev, "will poll for status\n");
- setup_timer(&polling_timer, polling_timer_func, 0);
- mod_timer(&polling_timer,
- jiffies + msecs_to_jiffies(pdata->polling_interval));
+ INIT_DELAYED_WORK(&polling_work, polling_work_func);
+ cancel_delayed_work(&polling_work);
+ schedule_delayed_work(&polling_work,
+ msecs_to_jiffies(pdata->polling_interval));
}
if (ac_irq || usb_irq)
@@ -433,9 +438,9 @@ static int pda_power_remove(struct platform_device *pdev)
free_irq(ac_irq->start, pda_psy_ac);
if (polling)
- del_timer_sync(&polling_timer);
- del_timer_sync(&charger_timer);
- del_timer_sync(&supply_timer);
+ cancel_delayed_work_sync(&polling_work);
+ cancel_delayed_work_sync(&charger_work);
+ cancel_delayed_work_sync(&supply_work);
if (pdata->is_usb_online)
power_supply_unregister(pda_psy_usb);
diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
index 2e9ff2afcba2..1974d6ee032b 100644
--- a/drivers/power/power_supply_sysfs.c
+++ b/drivers/power/power_supply_sysfs.c
@@ -305,6 +305,7 @@ static struct device_attribute power_supply_attrs[] = {
POWER_SUPPLY_ATTR(pd_voltage_max),
POWER_SUPPLY_ATTR(pd_voltage_min),
POWER_SUPPLY_ATTR(sdp_current_max),
+ POWER_SUPPLY_ATTR(fcc_stepper_enable),
/* Local extensions of type int64_t */
POWER_SUPPLY_ATTR(charge_counter_ext),
/* Properties of type `const char *' */
diff --git a/drivers/power/qcom/lpm-stats.c b/drivers/power/qcom/lpm-stats.c
index 90458d6a7212..46056255e5c0 100644
--- a/drivers/power/qcom/lpm-stats.c
+++ b/drivers/power/qcom/lpm-stats.c
@@ -682,9 +682,10 @@ static void cleanup_stats(struct lpm_stats *stats)
{
struct list_head *centry = NULL;
struct lpm_stats *pos = NULL;
+ struct lpm_stats *n = NULL;
centry = &stats->child;
- list_for_each_entry_reverse(pos, centry, sibling) {
+ list_for_each_entry_safe_reverse(pos, n, centry, sibling) {
if (!list_empty(&pos->child)) {
cleanup_stats(pos);
continue;
diff --git a/drivers/power/supply/qcom/battery.c b/drivers/power/supply/qcom/battery.c
index 68640e349765..6d5308b3dd0b 100644
--- a/drivers/power/supply/qcom/battery.c
+++ b/drivers/power/supply/qcom/battery.c
@@ -40,12 +40,14 @@
#define ICL_CHANGE_VOTER "ICL_CHANGE_VOTER"
#define PL_INDIRECT_VOTER "PL_INDIRECT_VOTER"
#define USBIN_I_VOTER "USBIN_I_VOTER"
+#define FCC_STEPPER_VOTER "FCC_STEPPER_VOTER"
struct pl_data {
int pl_mode;
int slave_pct;
int taper_pct;
int slave_fcc_ua;
+ int main_fcc_ua;
int restricted_current;
bool restricted_charging_enabled;
struct votable *fcc_votable;
@@ -58,6 +60,8 @@ struct pl_data {
struct delayed_work status_change_work;
struct work_struct pl_disable_forever_work;
struct delayed_work pl_taper_work;
+ struct delayed_work pl_awake_work;
+ struct delayed_work fcc_step_update_work;
struct power_supply *main_psy;
struct power_supply *pl_psy;
struct power_supply *batt_psy;
@@ -65,6 +69,13 @@ struct pl_data {
int charge_type;
int total_settled_ua;
int pl_settled_ua;
+ int fcc_step_update;
+ int main_step_fcc_dir;
+ int main_step_fcc_count;
+ int main_step_fcc_residual;
+ int parallel_step_fcc_dir;
+ int parallel_step_fcc_count;
+ int parallel_step_fcc_residual;
struct class qcom_batt_class;
struct wakeup_source *pl_ws;
struct notifier_block nb;
@@ -379,6 +390,10 @@ done:
* FCC *
**********/
#define EFFICIENCY_PCT 80
+#define FCC_STEP_SIZE_UA 100000
+#define FCC_STEP_UPDATE_DELAY_MS 1000
+#define STEP_UP 1
+#define STEP_DOWN -1
static void get_fcc_split(struct pl_data *chip, int total_ua,
int *master_ua, int *slave_ua)
{
@@ -431,6 +446,43 @@ static void get_fcc_split(struct pl_data *chip, int total_ua,
*slave_ua = (*slave_ua * chip->taper_pct) / 100;
}
+static void get_fcc_step_update_params(struct pl_data *chip, int main_fcc_ua,
+ int parallel_fcc_ua)
+{
+ union power_supply_propval pval = {0, };
+ int rc;
+
+ /* Read current FCC of main charger */
+ rc = power_supply_get_property(chip->main_psy,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, &pval);
+ if (rc < 0) {
+ pr_err("Couldn't get main charger current fcc, rc=%d\n", rc);
+ return;
+ }
+ chip->main_fcc_ua = pval.intval;
+
+ chip->main_step_fcc_dir = (main_fcc_ua > pval.intval) ?
+ STEP_UP : STEP_DOWN;
+ chip->main_step_fcc_count = abs((main_fcc_ua - pval.intval) /
+ FCC_STEP_SIZE_UA);
+ chip->main_step_fcc_residual = (main_fcc_ua - pval.intval) %
+ FCC_STEP_SIZE_UA;
+
+ chip->parallel_step_fcc_dir = (parallel_fcc_ua > chip->slave_fcc_ua) ?
+ STEP_UP : STEP_DOWN;
+ chip->parallel_step_fcc_count = abs((parallel_fcc_ua -
+ chip->slave_fcc_ua) / FCC_STEP_SIZE_UA);
+ chip->parallel_step_fcc_residual = (parallel_fcc_ua -
+ chip->slave_fcc_ua) % FCC_STEP_SIZE_UA;
+
+ pr_debug("Main FCC Stepper parameters: main_step_direction: %d, main_step_count: %d, main_residual_fcc: %d\n",
+ chip->main_step_fcc_dir, chip->main_step_fcc_count,
+ chip->main_step_fcc_residual);
+ pr_debug("Parallel FCC Stepper parameters: parallel_step_direction: %d, parallel_step_count: %d, parallel_residual_fcc: %d\n",
+ chip->parallel_step_fcc_dir, chip->parallel_step_fcc_count,
+ chip->parallel_step_fcc_residual);
+}
+
static int pl_fcc_vote_callback(struct votable *votable, void *data,
int total_fcc_ua, const char *client)
{
@@ -444,80 +496,305 @@ static int pl_fcc_vote_callback(struct votable *votable, void *data,
if (!chip->main_psy)
return 0;
+ if (!chip->batt_psy) {
+ chip->batt_psy = power_supply_get_by_name("battery");
+ if (!chip->batt_psy)
+ return 0;
+
+ rc = power_supply_get_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_FCC_STEPPER_ENABLE, &pval);
+ if (rc < 0) {
+ pr_err("Couldn't read FCC step update status, rc=%d\n",
+ rc);
+ return rc;
+ }
+ chip->fcc_step_update = pval.intval;
+ pr_debug("FCC Stepper %s\n",
+ pval.intval ? "enabled" : "disabled");
+ }
+
+ if (chip->fcc_step_update)
+ cancel_delayed_work_sync(&chip->fcc_step_update_work);
+
+
if (chip->pl_mode == POWER_SUPPLY_PL_NONE
|| get_effective_result_locked(chip->pl_disable_votable)) {
+ if (chip->fcc_step_update) {
+ vote(chip->pl_awake_votable, FCC_STEPPER_VOTER,
+ true, 0);
+ get_fcc_step_update_params(chip, total_fcc_ua, 0);
+ schedule_delayed_work(&chip->fcc_step_update_work, 0);
+
+ return 0;
+ }
pval.intval = total_fcc_ua;
rc = power_supply_set_property(chip->main_psy,
POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
&pval);
if (rc < 0)
pr_err("Couldn't set main fcc, rc=%d\n", rc);
+
return rc;
}
if (chip->pl_mode != POWER_SUPPLY_PL_NONE) {
get_fcc_split(chip, total_fcc_ua,
&master_fcc_ua, &slave_fcc_ua);
+ if (chip->fcc_step_update) {
+ vote(chip->pl_awake_votable, FCC_STEPPER_VOTER,
+ true, 0);
+ get_fcc_step_update_params(chip, master_fcc_ua,
+ slave_fcc_ua);
+ schedule_delayed_work(&chip->fcc_step_update_work, 0);
+ } else {
+ /*
+ * If there is an increase in slave share
+ * (Also handles parallel enable case)
+ * Set Main ICL then slave FCC
+ * else
+ * (Also handles parallel disable case)
+ * Set slave ICL then main FCC.
+ */
+ if (slave_fcc_ua > chip->slave_fcc_ua) {
+ pval.intval = master_fcc_ua;
+ rc = power_supply_set_property(chip->main_psy,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+ &pval);
+ if (rc < 0) {
+ pr_err("Could not set main fcc, rc=%d\n",
+ rc);
+ return rc;
+ }
- /*
- * If there is an increase in slave share
- * (Also handles parallel enable case)
- * Set Main ICL then slave FCC
- * else
- * (Also handles parallel disable case)
- * Set slave ICL then main FCC.
- */
- if (slave_fcc_ua > chip->slave_fcc_ua) {
- pval.intval = master_fcc_ua;
- rc = power_supply_set_property(chip->main_psy,
+ pval.intval = slave_fcc_ua;
+ rc = power_supply_set_property(chip->pl_psy,
POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
&pval);
- if (rc < 0) {
- pr_err("Could not set main fcc, rc=%d\n", rc);
- return rc;
+ if (rc < 0) {
+ pr_err("Couldn't set parallel fcc, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ chip->slave_fcc_ua = slave_fcc_ua;
+ } else {
+ pval.intval = slave_fcc_ua;
+ rc = power_supply_set_property(chip->pl_psy,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+ &pval);
+ if (rc < 0) {
+ pr_err("Couldn't set parallel fcc, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ chip->slave_fcc_ua = slave_fcc_ua;
+
+ pval.intval = master_fcc_ua;
+ rc = power_supply_set_property(chip->main_psy,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+ &pval);
+ if (rc < 0) {
+ pr_err("Could not set main fcc, rc=%d\n",
+ rc);
+ return rc;
+ }
}
+ }
+ }
+
+ pl_dbg(chip, PR_PARALLEL, "master_fcc=%d slave_fcc=%d distribution=(%d/%d)\n",
+ master_fcc_ua, slave_fcc_ua,
+ (master_fcc_ua * 100) / total_fcc_ua,
+ (slave_fcc_ua * 100) / total_fcc_ua);
+
+ return 0;
+}
+
+static void fcc_step_update_work(struct work_struct *work)
+{
+ struct pl_data *chip = container_of(work,
+ struct pl_data, fcc_step_update_work.work);
+ union power_supply_propval pval = {0, };
+ int reschedule_ms = 0, rc = 0;
+ int main_fcc = chip->main_fcc_ua;
+ int parallel_fcc = chip->slave_fcc_ua;
+
+ if (!chip->usb_psy) {
+ chip->usb_psy = power_supply_get_by_name("usb");
+ if (!chip->usb_psy) {
+ pr_err("Couldn't get usb psy\n");
+ return;
+ }
+ }
+
+ /* Check whether USB is present or not */
+ rc = power_supply_get_property(chip->usb_psy,
+ POWER_SUPPLY_PROP_PRESENT, &pval);
+ if (rc < 0) {
+ pr_err("Couldn't get USB Present status, rc=%d\n", rc);
+ return;
+ }
+
+ /*
+ * If USB is not present, then disable parallel and
+ * Main FCC to the effective value of FCC votable and exit.
+ */
+ if (!pval.intval) {
+ /* Disable parallel */
+ parallel_fcc = 0;
+ pval.intval = 1;
+ rc = power_supply_set_property(chip->pl_psy,
+ POWER_SUPPLY_PROP_INPUT_SUSPEND, &pval);
+ if (rc < 0)
+ pr_err("Couldn't change slave suspend state rc=%d\n",
+ rc);
+
+ main_fcc = get_effective_result_locked(chip->fcc_votable);
+ pval.intval = main_fcc;
+ rc = power_supply_set_property(chip->main_psy,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, &pval);
+ if (rc < 0) {
+ pr_err("Couldn't set main charger fcc, rc=%d\n", rc);
+ return;
+ }
+
+ goto stepper_exit;
+ }
+
+ if (chip->main_step_fcc_count) {
+ main_fcc += (FCC_STEP_SIZE_UA * chip->main_step_fcc_dir);
+ chip->main_step_fcc_count--;
+ reschedule_ms = FCC_STEP_UPDATE_DELAY_MS;
+ } else if (chip->main_step_fcc_residual) {
+ main_fcc += chip->main_step_fcc_residual;
+ chip->main_step_fcc_residual = 0;
+ }
+
+ if (chip->parallel_step_fcc_count) {
+ parallel_fcc += (FCC_STEP_SIZE_UA *
+ chip->parallel_step_fcc_dir);
+ chip->parallel_step_fcc_count--;
+ reschedule_ms = FCC_STEP_UPDATE_DELAY_MS;
+ } else if (chip->parallel_step_fcc_residual) {
+ parallel_fcc += chip->parallel_step_fcc_residual;
+ chip->parallel_step_fcc_residual = 0;
+ }
+
+ if (chip->pl_mode == POWER_SUPPLY_PL_NONE ||
+ get_effective_result_locked(chip->pl_disable_votable)) {
+ /* Set Parallel FCC */
+ pval.intval = parallel_fcc;
+ rc = power_supply_set_property(chip->pl_psy,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, &pval);
+ if (rc < 0) {
+ pr_err("Couldn't set parallel charger fcc, rc=%d\n",
+ rc);
+ return;
+ }
- pval.intval = slave_fcc_ua;
+ /* Set main FCC */
+ pval.intval = main_fcc;
+ rc = power_supply_set_property(chip->main_psy,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, &pval);
+ if (rc < 0) {
+ pr_err("Couldn't set main charger fcc, rc=%d\n", rc);
+ return;
+ }
+
+ if (parallel_fcc < MINIMUM_PARALLEL_FCC_UA) {
+ pval.intval = 1;
+ rc = power_supply_set_property(chip->pl_psy,
+ POWER_SUPPLY_PROP_INPUT_SUSPEND, &pval);
+ if (rc < 0) {
+ pr_err("Couldn't change slave suspend state rc=%d\n",
+ rc);
+ return;
+ }
+ }
+ } else {
+ if (parallel_fcc < chip->slave_fcc_ua) {
+ pval.intval = parallel_fcc;
rc = power_supply_set_property(chip->pl_psy,
POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
&pval);
if (rc < 0) {
- pr_err("Couldn't set parallel fcc, rc=%d\n",
- rc);
- return rc;
+ pr_err("Couldn't set parallel charger fcc, rc=%d\n",
+ rc);
+ return;
}
- chip->slave_fcc_ua = slave_fcc_ua;
+ pval.intval = main_fcc;
+ rc = power_supply_set_property(chip->main_psy,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+ &pval);
+ if (rc < 0) {
+ pr_err("Couldn't set main charger fcc, rc=%d\n",
+ rc);
+ return;
+ }
} else {
- pval.intval = slave_fcc_ua;
- rc = power_supply_set_property(chip->pl_psy,
+ pval.intval = main_fcc;
+ rc = power_supply_set_property(chip->main_psy,
POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
&pval);
if (rc < 0) {
- pr_err("Couldn't set parallel fcc, rc=%d\n",
- rc);
- return rc;
+ pr_err("Couldn't set main charger fcc, rc=%d\n",
+ rc);
+ return;
}
- chip->slave_fcc_ua = slave_fcc_ua;
-
- pval.intval = master_fcc_ua;
- rc = power_supply_set_property(chip->main_psy,
+ pval.intval = parallel_fcc;
+ rc = power_supply_set_property(chip->pl_psy,
POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
&pval);
if (rc < 0) {
- pr_err("Could not set main fcc, rc=%d\n", rc);
- return rc;
+ pr_err("Couldn't set parallel charger fcc, rc=%d\n",
+ rc);
+ return;
+ }
+ }
+
+ rc = power_supply_get_property(chip->pl_psy,
+ POWER_SUPPLY_PROP_INPUT_SUSPEND, &pval);
+ if (rc < 0) {
+ pr_err("Couldn't get slave suspend status, rc=%d\n",
+ rc);
+ return;
+ }
+
+ /*
+ * Enable parallel charger only if it was disabled earlier and
+ * configured slave fcc is greater than or equal to 100mA.
+ */
+ if (pval.intval == 1 && parallel_fcc >= 100000) {
+ pval.intval = 0;
+ rc = power_supply_set_property(chip->pl_psy,
+ POWER_SUPPLY_PROP_INPUT_SUSPEND, &pval);
+ if (rc < 0) {
+ pr_err("Couldn't change slave suspend state rc=%d\n",
+ rc);
+ return;
}
+
+ if ((chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN) ||
+ (chip->pl_mode ==
+ POWER_SUPPLY_PL_USBIN_USBIN_EXT))
+ split_settled(chip);
}
}
- pl_dbg(chip, PR_PARALLEL, "master_fcc=%d slave_fcc=%d distribution=(%d/%d)\n",
- master_fcc_ua, slave_fcc_ua,
- (master_fcc_ua * 100) / total_fcc_ua,
- (slave_fcc_ua * 100) / total_fcc_ua);
+stepper_exit:
+ chip->main_fcc_ua = main_fcc;
+ chip->slave_fcc_ua = parallel_fcc;
- return 0;
+ if (reschedule_ms) {
+ schedule_delayed_work(&chip->fcc_step_update_work,
+ msecs_to_jiffies(reschedule_ms));
+ pr_debug("Rescheduling FCC_STEPPER work\n");
+ } else {
+ vote(chip->pl_awake_votable, FCC_STEPPER_VOTER, false, 0);
+ }
}
#define PARALLEL_FLOAT_VOLTAGE_DELTA_UV 50000
@@ -640,6 +917,14 @@ static void pl_disable_forever_work(struct work_struct *work)
vote(chip->hvdcp_hw_inov_dis_votable, PL_VOTER, false, 0);
}
+static void pl_awake_work(struct work_struct *work)
+{
+ struct pl_data *chip = container_of(work,
+ struct pl_data, pl_awake_work.work);
+
+ vote(chip->pl_awake_votable, PL_VOTER, false, 0);
+}
+
static int pl_disable_vote_callback(struct votable *votable,
void *data, int pl_disable, const char *client)
{
@@ -651,7 +936,15 @@ static int pl_disable_vote_callback(struct votable *votable,
chip->total_settled_ua = 0;
chip->pl_settled_ua = 0;
+ /* Cancel FCC step change work */
+ cancel_delayed_work_sync(&chip->fcc_step_update_work);
+
if (!pl_disable) { /* enable */
+ /* keep system awake to talk to slave charger through i2c */
+ cancel_delayed_work_sync(&chip->pl_awake_work);
+ if (chip->pl_awake_votable)
+ vote(chip->pl_awake_votable, PL_VOTER, true, 0);
+
rc = power_supply_get_property(chip->pl_psy,
POWER_SUPPLY_PROP_CHARGE_TYPE, &pval);
if (rc == -ENODEV) {
@@ -671,16 +964,19 @@ static int pl_disable_vote_callback(struct votable *votable,
* PARALLEL_PSY_VOTER keeps it disabled unless a pl_psy
* is seen.
*/
- pval.intval = 0;
- rc = power_supply_set_property(chip->pl_psy,
+ if (!chip->fcc_step_update) {
+ pval.intval = 0;
+ rc = power_supply_set_property(chip->pl_psy,
POWER_SUPPLY_PROP_INPUT_SUSPEND, &pval);
- if (rc < 0)
- pr_err("Couldn't change slave suspend state rc=%d\n",
- rc);
+ if (rc < 0)
+ pr_err("Couldn't change slave suspend state rc=%d\n",
+ rc);
- if ((chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN)
- || (chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN_EXT))
- split_settled(chip);
+ if ((chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN) ||
+ (chip->pl_mode ==
+ POWER_SUPPLY_PL_USBIN_USBIN_EXT))
+ split_settled(chip);
+ }
/*
* we could have been enabled while in taper mode,
* start the taper work if so
@@ -701,17 +997,25 @@ static int pl_disable_vote_callback(struct votable *votable,
|| (chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN_EXT))
split_settled(chip);
- /* pl_psy may be NULL while in the disable branch */
- if (chip->pl_psy) {
- pval.intval = 1;
- rc = power_supply_set_property(chip->pl_psy,
+ if (!chip->fcc_step_update) {
+ /* pl_psy may be NULL while in the disable branch */
+ if (chip->pl_psy) {
+ pval.intval = 1;
+ rc = power_supply_set_property(chip->pl_psy,
POWER_SUPPLY_PROP_INPUT_SUSPEND, &pval);
- if (rc < 0)
- pr_err("Couldn't change slave suspend state rc=%d\n",
- rc);
+ if (rc < 0)
+ pr_err("Couldn't change slave suspend state rc=%d\n",
+ rc);
+ }
}
+
rerun_election(chip->fcc_votable);
rerun_election(chip->fv_votable);
+
+ cancel_delayed_work_sync(&chip->pl_awake_work);
+ if (chip->pl_awake_votable)
+ schedule_delayed_work(&chip->pl_awake_work,
+ msecs_to_jiffies(5000));
}
pl_dbg(chip, PR_PARALLEL, "parallel charging %s\n",
@@ -1098,6 +1402,8 @@ int qcom_batt_init(void)
INIT_DELAYED_WORK(&chip->status_change_work, status_change_work);
INIT_DELAYED_WORK(&chip->pl_taper_work, pl_taper_work);
INIT_WORK(&chip->pl_disable_forever_work, pl_disable_forever_work);
+ INIT_DELAYED_WORK(&chip->pl_awake_work, pl_awake_work);
+ INIT_DELAYED_WORK(&chip->fcc_step_update_work, fcc_step_update_work);
rc = pl_register_notifier(chip);
if (rc < 0) {
@@ -1151,6 +1457,8 @@ void qcom_batt_deinit(void)
cancel_delayed_work_sync(&chip->status_change_work);
cancel_delayed_work_sync(&chip->pl_taper_work);
cancel_work_sync(&chip->pl_disable_forever_work);
+ cancel_delayed_work_sync(&chip->pl_awake_work);
+ cancel_delayed_work_sync(&chip->fcc_step_update_work);
power_supply_unreg_notifier(&chip->nb);
destroy_votable(chip->pl_enable_votable_indirect);
diff --git a/drivers/power/supply/qcom/qpnp-fg-gen3.c b/drivers/power/supply/qcom/qpnp-fg-gen3.c
index 8d8118745684..a546621d0837 100644
--- a/drivers/power/supply/qcom/qpnp-fg-gen3.c
+++ b/drivers/power/supply/qcom/qpnp-fg-gen3.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -2066,12 +2066,21 @@ static int fg_set_recharge_soc(struct fg_chip *chip, int recharge_soc)
static int fg_adjust_recharge_soc(struct fg_chip *chip)
{
+ union power_supply_propval prop = {0, };
int rc, msoc, recharge_soc, new_recharge_soc = 0;
bool recharge_soc_status;
if (!chip->dt.auto_recharge_soc)
return 0;
+ rc = power_supply_get_property(chip->batt_psy, POWER_SUPPLY_PROP_HEALTH,
+ &prop);
+ if (rc < 0) {
+ pr_err("Error in getting battery health, rc=%d\n", rc);
+ return rc;
+ }
+ chip->health = prop.intval;
+
recharge_soc = chip->dt.recharge_soc_thr;
recharge_soc_status = chip->recharge_soc_adjusted;
/*
@@ -2102,6 +2111,9 @@ static int fg_adjust_recharge_soc(struct fg_chip *chip)
if (!chip->recharge_soc_adjusted)
return 0;
+ if (chip->health != POWER_SUPPLY_HEALTH_GOOD)
+ return 0;
+
/* Restore the default value */
new_recharge_soc = recharge_soc;
chip->recharge_soc_adjusted = false;
diff --git a/drivers/power/supply/qcom/qpnp-smb2.c b/drivers/power/supply/qcom/qpnp-smb2.c
index 4beaddff47b3..8e57bf9d2c31 100644
--- a/drivers/power/supply/qcom/qpnp-smb2.c
+++ b/drivers/power/supply/qcom/qpnp-smb2.c
@@ -325,6 +325,9 @@ static int smb2_parse_dt(struct smb2 *chip)
if (rc < 0)
chg->otg_delay_ms = OTG_DEFAULT_DEGLITCH_TIME_MS;
+ chg->fcc_stepper_mode = of_property_read_bool(node,
+ "qcom,fcc-stepping-enable");
+
return 0;
}
@@ -941,6 +944,7 @@ static enum power_supply_property smb2_batt_props[] = {
POWER_SUPPLY_PROP_RERUN_AICL,
POWER_SUPPLY_PROP_DP_DM,
POWER_SUPPLY_PROP_CHARGE_COUNTER,
+ POWER_SUPPLY_PROP_FCC_STEPPER_ENABLE,
};
static int smb2_batt_get_prop(struct power_supply *psy,
@@ -1049,6 +1053,9 @@ static int smb2_batt_get_prop(struct power_supply *psy,
case POWER_SUPPLY_PROP_CHARGE_COUNTER:
rc = smblib_get_prop_batt_charge_counter(chg, val);
break;
+ case POWER_SUPPLY_PROP_FCC_STEPPER_ENABLE:
+ val->intval = chg->fcc_stepper_mode;
+ break;
default:
pr_err("batt power supply prop %d not supported\n", psp);
return -EINVAL;
@@ -1829,7 +1836,8 @@ static int smb2_chg_config_init(struct smb2 *chip)
switch (pmic_rev_id->pmic_subtype) {
case PMI8998_SUBTYPE:
chip->chg.smb_version = PMI8998_SUBTYPE;
- chip->chg.wa_flags |= BOOST_BACK_WA | QC_AUTH_INTERRUPT_WA_BIT;
+ chip->chg.wa_flags |= BOOST_BACK_WA | QC_AUTH_INTERRUPT_WA_BIT
+ | TYPEC_PBS_WA_BIT;
if (pmic_rev_id->rev4 == PMI8998_V1P1_REV4) /* PMI rev 1.1 */
chg->wa_flags |= QC_CHARGER_DETECTION_WA_BIT;
if (pmic_rev_id->rev4 == PMI8998_V2P0_REV4) /* PMI rev 2.0 */
@@ -1844,7 +1852,8 @@ static int smb2_chg_config_init(struct smb2 *chip)
break;
case PM660_SUBTYPE:
chip->chg.smb_version = PM660_SUBTYPE;
- chip->chg.wa_flags |= BOOST_BACK_WA | OTG_WA;
+ chip->chg.wa_flags |= BOOST_BACK_WA | OTG_WA | OV_IRQ_WA_BIT
+ | TYPEC_PBS_WA_BIT;
chg->param.freq_buck = pm660_params.freq_buck;
chg->param.freq_boost = pm660_params.freq_boost;
chg->chg_freq.freq_5V = 650;
diff --git a/drivers/power/supply/qcom/smb-lib.c b/drivers/power/supply/qcom/smb-lib.c
index 6d3316b934de..e96523a4d43e 100644
--- a/drivers/power/supply/qcom/smb-lib.c
+++ b/drivers/power/supply/qcom/smb-lib.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -670,6 +670,7 @@ static void smblib_uusb_removal(struct smb_charger *chg)
vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER, false, 0);
vote(chg->usb_icl_votable, SW_QC3_VOTER, false, 0);
vote(chg->usb_icl_votable, USBIN_USBIN_BOOST_VOTER, false, 0);
+ vote(chg->hvdcp_hw_inov_dis_votable, OV_VOTER, false, 0);
cancel_delayed_work_sync(&chg->hvdcp_detect_work);
@@ -2012,6 +2013,18 @@ static int smblib_dm_pulse(struct smb_charger *chg)
return rc;
}
+static int smblib_force_vbus_voltage(struct smb_charger *chg, u8 val)
+{
+ int rc;
+
+ rc = smblib_masked_write(chg, CMD_HVDCP_2_REG, val, val);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't write to CMD_HVDCP_2_REG rc=%d\n",
+ rc);
+
+ return rc;
+}
+
int smblib_dp_dm(struct smb_charger *chg, int val)
{
int target_icl_ua, rc = 0;
@@ -2063,6 +2076,21 @@ int smblib_dp_dm(struct smb_charger *chg, int val)
smblib_dbg(chg, PR_PARALLEL, "ICL DOWN ICL=%d reduction=%d\n",
target_icl_ua, chg->usb_icl_delta_ua);
break;
+ case POWER_SUPPLY_DP_DM_FORCE_5V:
+ rc = smblib_force_vbus_voltage(chg, FORCE_5V_BIT);
+ if (rc < 0)
+ pr_err("Failed to force 5V\n");
+ break;
+ case POWER_SUPPLY_DP_DM_FORCE_9V:
+ rc = smblib_force_vbus_voltage(chg, FORCE_9V_BIT);
+ if (rc < 0)
+ pr_err("Failed to force 9V\n");
+ break;
+ case POWER_SUPPLY_DP_DM_FORCE_12V:
+ rc = smblib_force_vbus_voltage(chg, FORCE_12V_BIT);
+ if (rc < 0)
+ pr_err("Failed to force 12V\n");
+ break;
case POWER_SUPPLY_DP_DM_ICL_UP:
default:
break;
@@ -2210,6 +2238,7 @@ int smblib_get_prop_usb_voltage_max(struct smb_charger *chg,
{
switch (chg->real_charger_type) {
case POWER_SUPPLY_TYPE_USB_HVDCP:
+ case POWER_SUPPLY_TYPE_USB_HVDCP_3:
case POWER_SUPPLY_TYPE_USB_PD:
if (chg->smb_version == PM660_SUBTYPE)
val->intval = MICRO_9V;
@@ -2634,19 +2663,21 @@ int smblib_set_prop_typec_power_role(struct smb_charger *chg,
return -EINVAL;
}
- if (power_role == UFP_EN_CMD_BIT) {
- /* disable PBS workaround when forcing sink mode */
- rc = smblib_write(chg, TM_IO_DTEST4_SEL, 0x0);
- if (rc < 0) {
- smblib_err(chg, "Couldn't write to TM_IO_DTEST4_SEL rc=%d\n",
- rc);
- }
- } else {
- /* restore it back to 0xA5 */
- rc = smblib_write(chg, TM_IO_DTEST4_SEL, 0xA5);
- if (rc < 0) {
- smblib_err(chg, "Couldn't write to TM_IO_DTEST4_SEL rc=%d\n",
- rc);
+ if (chg->wa_flags & TYPEC_PBS_WA_BIT) {
+ if (power_role == UFP_EN_CMD_BIT) {
+ /* disable PBS workaround when forcing sink mode */
+ rc = smblib_write(chg, TM_IO_DTEST4_SEL, 0x0);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't write to TM_IO_DTEST4_SEL rc=%d\n",
+ rc);
+ }
+ } else {
+ /* restore it back to 0xA5 */
+ rc = smblib_write(chg, TM_IO_DTEST4_SEL, 0xA5);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't write to TM_IO_DTEST4_SEL rc=%d\n",
+ rc);
+ }
}
}
@@ -3329,7 +3360,8 @@ void smblib_usb_plugin_locked(struct smb_charger *chg)
rc = smblib_request_dpdm(chg, true);
if (rc < 0)
smblib_err(chg, "Couldn't to enable DPDM rc=%d\n", rc);
-
+ if (chg->fcc_stepper_mode)
+ vote(chg->fcc_votable, FCC_STEPPER_VOTER, false, 0);
/* Schedule work to enable parallel charger */
vote(chg->awake_votable, PL_DELAY_VOTER, true, 0);
schedule_delayed_work(&chg->pl_enable_work,
@@ -3348,6 +3380,11 @@ void smblib_usb_plugin_locked(struct smb_charger *chg)
}
}
+ /* Force 1500mA FCC on removal */
+ if (chg->fcc_stepper_mode)
+ vote(chg->fcc_votable, FCC_STEPPER_VOTER,
+ true, 1500000);
+
rc = smblib_request_dpdm(chg, false);
if (rc < 0)
smblib_err(chg, "Couldn't disable DPDM rc=%d\n", rc);
@@ -3426,6 +3463,33 @@ static void smblib_handle_sdp_enumeration_done(struct smb_charger *chg,
rising ? "rising" : "falling");
}
+#define MICRO_10P3V 10300000
+static void smblib_check_ov_condition(struct smb_charger *chg)
+{
+ union power_supply_propval pval = {0, };
+ int rc;
+
+ if (chg->wa_flags & OV_IRQ_WA_BIT) {
+ rc = power_supply_get_property(chg->usb_psy,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW, &pval);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't get current voltage, rc=%d\n",
+ rc);
+ return;
+ }
+
+ if (pval.intval > MICRO_10P3V) {
+ smblib_err(chg, "USBIN OV detected\n");
+ vote(chg->hvdcp_hw_inov_dis_votable, OV_VOTER, true,
+ 0);
+ pval.intval = POWER_SUPPLY_DP_DM_FORCE_5V;
+ rc = power_supply_set_property(chg->batt_psy,
+ POWER_SUPPLY_PROP_DP_DM, &pval);
+ return;
+ }
+ }
+}
+
#define QC3_PULSES_FOR_6V 5
#define QC3_PULSES_FOR_9V 20
#define QC3_PULSES_FOR_12V 35
@@ -3435,6 +3499,7 @@ static void smblib_hvdcp_adaptive_voltage_change(struct smb_charger *chg)
u8 stat;
int pulses;
+ smblib_check_ov_condition(chg);
power_supply_changed(chg->usb_main_psy);
if (chg->real_charger_type == POWER_SUPPLY_TYPE_USB_HVDCP) {
rc = smblib_read(chg, QC_CHANGE_STATUS_REG, &stat);
@@ -3557,7 +3622,8 @@ static void smblib_handle_hvdcp_check_timeout(struct smb_charger *chg,
* if pd is not allowed, then set pd_active = false right here,
* so that it starts the hvdcp engine
*/
- if (!get_effective_result(chg->pd_allowed_votable))
+ if (!get_effective_result(chg->pd_allowed_votable) &&
+ !chg->micro_usb_mode)
__smblib_set_prop_pd_active(chg, 0);
}
@@ -3967,6 +4033,7 @@ static void smblib_handle_typec_removal(struct smb_charger *chg)
/* reset hvdcp voters */
vote(chg->hvdcp_disable_votable_indirect, VBUS_CC_SHORT_VOTER, true, 0);
vote(chg->hvdcp_disable_votable_indirect, PD_INACTIVE_VOTER, true, 0);
+ vote(chg->hvdcp_hw_inov_dis_votable, OV_VOTER, false, 0);
/* reset power delivery voters */
vote(chg->pd_allowed_votable, PD_VOTER, false, 0);
@@ -4041,10 +4108,13 @@ static void smblib_handle_typec_removal(struct smb_charger *chg)
if (rc < 0)
smblib_err(chg, "Couldn't enable HW cc_out rc=%d\n", rc);
- /* restore crude sensor */
- rc = smblib_write(chg, TM_IO_DTEST4_SEL, 0xA5);
- if (rc < 0)
- smblib_err(chg, "Couldn't restore crude sensor rc=%d\n", rc);
+ /* restore crude sensor if PM660/PMI8998 */
+ if (chg->wa_flags & TYPEC_PBS_WA_BIT) {
+ rc = smblib_write(chg, TM_IO_DTEST4_SEL, 0xA5);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't restore crude sensor rc=%d\n",
+ rc);
+ }
mutex_lock(&chg->vconn_oc_lock);
if (!chg->vconn_en)
diff --git a/drivers/power/supply/qcom/smb-lib.h b/drivers/power/supply/qcom/smb-lib.h
index f292ca09f532..4475ccc21a2a 100644
--- a/drivers/power/supply/qcom/smb-lib.h
+++ b/drivers/power/supply/qcom/smb-lib.h
@@ -66,6 +66,8 @@ enum print_reason {
#define USBIN_I_VOTER "USBIN_I_VOTER"
#define WEAK_CHARGER_VOTER "WEAK_CHARGER_VOTER"
#define WBC_VOTER "WBC_VOTER"
+#define OV_VOTER "OV_VOTER"
+#define FCC_STEPPER_VOTER "FCC_STEPPER_VOTER"
#define VCONN_MAX_ATTEMPTS 3
#define OTG_MAX_ATTEMPTS 3
@@ -84,6 +86,8 @@ enum {
TYPEC_CC2_REMOVAL_WA_BIT = BIT(2),
QC_AUTH_INTERRUPT_WA_BIT = BIT(3),
OTG_WA = BIT(4),
+ OV_IRQ_WA_BIT = BIT(5),
+ TYPEC_PBS_WA_BIT = BIT(6),
};
enum smb_irq_index {
@@ -343,6 +347,7 @@ struct smb_charger {
u8 float_cfg;
bool use_extcon;
bool otg_present;
+ bool fcc_stepper_mode;
/* workaround flag */
u32 wa_flags;
diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
index 84419af16f77..fd12ccc11e26 100644
--- a/drivers/powercap/powercap_sys.c
+++ b/drivers/powercap/powercap_sys.c
@@ -538,6 +538,7 @@ struct powercap_zone *powercap_register_zone(
power_zone->id = result;
idr_init(&power_zone->idr);
+ result = -ENOMEM;
power_zone->name = kstrdup(name, GFP_KERNEL);
if (!power_zone->name)
goto err_name_alloc;
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index 2e481b9e8ea5..60a5e0c63a13 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -97,30 +97,26 @@ static s32 scaled_ppm_to_ppb(long ppm)
/* posix clock implementation */
-static int ptp_clock_getres(struct posix_clock *pc, struct timespec *tp)
+static int ptp_clock_getres(struct posix_clock *pc, struct timespec64 *tp)
{
tp->tv_sec = 0;
tp->tv_nsec = 1;
return 0;
}
-static int ptp_clock_settime(struct posix_clock *pc, const struct timespec *tp)
+static int ptp_clock_settime(struct posix_clock *pc, const struct timespec64 *tp)
{
struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
- struct timespec64 ts = timespec_to_timespec64(*tp);
- return ptp->info->settime64(ptp->info, &ts);
+ return ptp->info->settime64(ptp->info, tp);
}
-static int ptp_clock_gettime(struct posix_clock *pc, struct timespec *tp)
+static int ptp_clock_gettime(struct posix_clock *pc, struct timespec64 *tp)
{
struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
- struct timespec64 ts;
int err;
- err = ptp->info->gettime64(ptp->info, &ts);
- if (!err)
- *tp = timespec64_to_timespec(ts);
+ err = ptp->info->gettime64(ptp->info, tp);
return err;
}
@@ -133,7 +129,7 @@ static int ptp_clock_adjtime(struct posix_clock *pc, struct timex *tx)
ops = ptp->info;
if (tx->modes & ADJ_SETOFFSET) {
- struct timespec ts;
+ struct timespec64 ts;
ktime_t kt;
s64 delta;
@@ -146,7 +142,7 @@ static int ptp_clock_adjtime(struct posix_clock *pc, struct timex *tx)
if ((unsigned long) ts.tv_nsec >= NSEC_PER_SEC)
return -EINVAL;
- kt = timespec_to_ktime(ts);
+ kt = timespec64_to_ktime(ts);
delta = ktime_to_ns(kt);
err = ops->adjtime(ops, delta);
} else if (tx->modes & ADJ_FREQUENCY) {
diff --git a/drivers/pwm/pwm-tegra.c b/drivers/pwm/pwm-tegra.c
index d4de0607b502..3039fb762893 100644
--- a/drivers/pwm/pwm-tegra.c
+++ b/drivers/pwm/pwm-tegra.c
@@ -69,6 +69,7 @@ static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
struct tegra_pwm_chip *pc = to_tegra_pwm_chip(chip);
unsigned long long c;
unsigned long rate, hz;
+ unsigned long long ns100 = NSEC_PER_SEC;
u32 val = 0;
int err;
@@ -87,9 +88,11 @@ static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
* cycles at the PWM clock rate will take period_ns nanoseconds.
*/
rate = clk_get_rate(pc->clk) >> PWM_DUTY_WIDTH;
- hz = NSEC_PER_SEC / period_ns;
- rate = (rate + (hz / 2)) / hz;
+ /* Consider precision in PWM_SCALE_WIDTH rate calculation */
+ ns100 *= 100;
+ hz = DIV_ROUND_CLOSEST_ULL(ns100, period_ns);
+ rate = DIV_ROUND_CLOSEST(rate * 100, hz);
/*
* Since the actual PWM divider is the register's frequency divider
diff --git a/drivers/regulator/anatop-regulator.c b/drivers/regulator/anatop-regulator.c
index 3a6d0290c54c..c5e272ea4372 100644
--- a/drivers/regulator/anatop-regulator.c
+++ b/drivers/regulator/anatop-regulator.c
@@ -296,6 +296,11 @@ static int anatop_regulator_probe(struct platform_device *pdev)
if (!sreg->sel && !strcmp(sreg->name, "vddpu"))
sreg->sel = 22;
+ /* set the default voltage of the pcie phy to be 1.100v */
+ if (!sreg->sel && rdesc->name &&
+ !strcmp(rdesc->name, "vddpcie"))
+ sreg->sel = 0x10;
+
if (!sreg->bypass && !sreg->sel) {
dev_err(&pdev->dev, "Failed to read a valid default voltage selector.\n");
return -EINVAL;
diff --git a/drivers/regulator/cprh-kbss-regulator.c b/drivers/regulator/cprh-kbss-regulator.c
index 4d6d63e6d887..f7a18611b5d2 100644
--- a/drivers/regulator/cprh-kbss-regulator.c
+++ b/drivers/regulator/cprh-kbss-regulator.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -78,9 +78,10 @@ struct cprh_kbss_fuses {
* Fuse combos 8 - 15 map to CPR fusing revision 0 - 7 with speed bin fuse = 1.
* Fuse combos 16 - 23 map to CPR fusing revision 0 - 7 with speed bin fuse = 2.
* Fuse combos 24 - 31 map to CPR fusing revision 0 - 7 with speed bin fuse = 3.
+ * Fuse combos 32 - 39 map to CPR fusing revision 0 - 7 with speed bin fuse = 4.
*/
#define CPRH_MSM8998_KBSS_FUSE_COMBO_COUNT 32
-#define CPRH_SDM660_KBSS_FUSE_COMBO_COUNT 32
+#define CPRH_SDM660_KBSS_FUSE_COMBO_COUNT 40
#define CPRH_SDM630_KBSS_FUSE_COMBO_COUNT 24
/*
diff --git a/drivers/regulator/kryo-regulator.c b/drivers/regulator/kryo-regulator.c
index fd853e7323bb..d403ee43d924 100644
--- a/drivers/regulator/kryo-regulator.c
+++ b/drivers/regulator/kryo-regulator.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -311,7 +311,6 @@ done:
static int kryo_regulator_disable(struct regulator_dev *rdev)
{
struct kryo_regulator *kvreg = rdev_get_drvdata(rdev);
- int rc;
unsigned long flags;
if (kvreg->vreg_en == false)
@@ -322,7 +321,7 @@ static int kryo_regulator_disable(struct regulator_dev *rdev)
kvreg_debug(kvreg, "disabled\n");
spin_unlock_irqrestore(&kvreg->slock, flags);
- return rc;
+ return 0;
}
static int kryo_regulator_is_enabled(struct regulator_dev *rdev)
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 50ca3c33f942..d4ab6c4ced25 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -1636,5 +1636,13 @@ config RTC_DRV_HID_SENSOR_TIME
If this driver is compiled as a module, it will be named
rtc-hid-sensor-time.
+config RTC_DRV_GOLDFISH
+ tristate "Goldfish Real Time Clock"
+ depends on MIPS && (GOLDFISH || COMPILE_TEST)
+ help
+ Say yes to enable RTC driver for the Goldfish based virtual platform.
+
+ Goldfish is a code name for the virtual platform developed by Google
+ for Android emulation.
endif # RTC_CLASS
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index f264c343e6e9..138a6436d075 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -162,3 +162,4 @@ obj-$(CONFIG_RTC_DRV_WM8350) += rtc-wm8350.o
obj-$(CONFIG_RTC_DRV_X1205) += rtc-x1205.o
obj-$(CONFIG_RTC_DRV_XGENE) += rtc-xgene.o
obj-$(CONFIG_RTC_DRV_ZYNQMP) += rtc-zynqmp.o
+obj-$(CONFIG_RTC_DRV_GOLDFISH) += rtc-goldfish.o
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 853976bd3d36..9473715725df 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -217,6 +217,13 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
missing = year;
}
+ /* Can't proceed if alarm is still invalid after replacing
+ * missing fields.
+ */
+ err = rtc_valid_tm(&alarm->time);
+ if (err)
+ goto done;
+
/* with luck, no rollover is needed */
t_now = rtc_tm_to_time64(&now);
t_alm = rtc_tm_to_time64(&alarm->time);
@@ -268,9 +275,9 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
dev_warn(&rtc->dev, "alarm rollover not handled\n");
}
-done:
err = rtc_valid_tm(&alarm->time);
+done:
if (err) {
dev_warn(&rtc->dev, "invalid alarm value: %d-%d-%d %d:%d:%d\n",
alarm->time.tm_year + 1900, alarm->time.tm_mon + 1,
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 8f7034ba7d9e..86015b393dd5 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -41,6 +41,9 @@
#include <linux/pm.h>
#include <linux/of.h>
#include <linux/of_platform.h>
+#ifdef CONFIG_X86
+#include <asm/i8259.h>
+#endif
/* this is for "generic access to PC-style RTC" using CMOS_READ/CMOS_WRITE */
#include <asm-generic/rtc.h>
@@ -1058,17 +1061,23 @@ static int cmos_pnp_probe(struct pnp_dev *pnp, const struct pnp_device_id *id)
{
cmos_wake_setup(&pnp->dev);
- if (pnp_port_start(pnp, 0) == 0x70 && !pnp_irq_valid(pnp, 0))
+ if (pnp_port_start(pnp, 0) == 0x70 && !pnp_irq_valid(pnp, 0)) {
+ unsigned int irq = 0;
+#ifdef CONFIG_X86
/* Some machines contain a PNP entry for the RTC, but
* don't define the IRQ. It should always be safe to
- * hardcode it in these cases
+ * hardcode it on systems with a legacy PIC.
*/
+ if (nr_legacy_irqs())
+ irq = 8;
+#endif
return cmos_do_probe(&pnp->dev,
- pnp_get_resource(pnp, IORESOURCE_IO, 0), 8);
- else
+ pnp_get_resource(pnp, IORESOURCE_IO, 0), irq);
+ } else {
return cmos_do_probe(&pnp->dev,
pnp_get_resource(pnp, IORESOURCE_IO, 0),
pnp_irq(pnp, 0));
+ }
}
static void __exit cmos_pnp_remove(struct pnp_dev *pnp)
diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c
index 3b3049c8c9e0..c0eb113588ff 100644
--- a/drivers/rtc/rtc-ds1374.c
+++ b/drivers/rtc/rtc-ds1374.c
@@ -527,6 +527,10 @@ static long ds1374_wdt_ioctl(struct file *file, unsigned int cmd,
if (get_user(new_margin, (int __user *)arg))
return -EFAULT;
+ /* the hardware's tick rate is 4096 Hz, so
+ * the counter value needs to be scaled accordingly
+ */
+ new_margin <<= 12;
if (new_margin < 1 || new_margin > 16777216)
return -EINVAL;
@@ -535,7 +539,8 @@ static long ds1374_wdt_ioctl(struct file *file, unsigned int cmd,
ds1374_wdt_ping();
/* fallthrough */
case WDIOC_GETTIMEOUT:
- return put_user(wdt_margin, (int __user *)arg);
+ /* when returning ... inverse is true */
+ return put_user((wdt_margin >> 12), (int __user *)arg);
case WDIOC_SETOPTIONS:
if (copy_from_user(&options, (int __user *)arg, sizeof(int)))
return -EFAULT;
@@ -543,14 +548,15 @@ static long ds1374_wdt_ioctl(struct file *file, unsigned int cmd,
if (options & WDIOS_DISABLECARD) {
pr_info("disable watchdog\n");
ds1374_wdt_disable();
+ return 0;
}
if (options & WDIOS_ENABLECARD) {
pr_info("enable watchdog\n");
ds1374_wdt_settimeout(wdt_margin);
ds1374_wdt_ping();
+ return 0;
}
-
return -EINVAL;
}
return -ENOTTY;
diff --git a/drivers/rtc/rtc-goldfish.c b/drivers/rtc/rtc-goldfish.c
new file mode 100644
index 000000000000..d67769265185
--- /dev/null
+++ b/drivers/rtc/rtc-goldfish.c
@@ -0,0 +1,237 @@
+/* drivers/rtc/rtc-goldfish.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (C) 2017 Imagination Technologies Ltd.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+#include <linux/io.h>
+
+#define TIMER_TIME_LOW 0x00 /* get low bits of current time */
+ /* and update TIMER_TIME_HIGH */
+#define TIMER_TIME_HIGH 0x04 /* get high bits of time at last */
+ /* TIMER_TIME_LOW read */
+#define TIMER_ALARM_LOW 0x08 /* set low bits of alarm and */
+ /* activate it */
+#define TIMER_ALARM_HIGH 0x0c /* set high bits of next alarm */
+#define TIMER_IRQ_ENABLED 0x10
+#define TIMER_CLEAR_ALARM 0x14
+#define TIMER_ALARM_STATUS 0x18
+#define TIMER_CLEAR_INTERRUPT 0x1c
+
+struct goldfish_rtc {
+ void __iomem *base;
+ int irq;
+ struct rtc_device *rtc;
+};
+
+static int goldfish_rtc_read_alarm(struct device *dev,
+ struct rtc_wkalrm *alrm)
+{
+ u64 rtc_alarm;
+ u64 rtc_alarm_low;
+ u64 rtc_alarm_high;
+ void __iomem *base;
+ struct goldfish_rtc *rtcdrv;
+
+ rtcdrv = dev_get_drvdata(dev);
+ base = rtcdrv->base;
+
+ rtc_alarm_low = readl(base + TIMER_ALARM_LOW);
+ rtc_alarm_high = readl(base + TIMER_ALARM_HIGH);
+ rtc_alarm = (rtc_alarm_high << 32) | rtc_alarm_low;
+
+ do_div(rtc_alarm, NSEC_PER_SEC);
+ memset(alrm, 0, sizeof(struct rtc_wkalrm));
+
+ rtc_time_to_tm(rtc_alarm, &alrm->time);
+
+ if (readl(base + TIMER_ALARM_STATUS))
+ alrm->enabled = 1;
+ else
+ alrm->enabled = 0;
+
+ return 0;
+}
+
+static int goldfish_rtc_set_alarm(struct device *dev,
+ struct rtc_wkalrm *alrm)
+{
+ struct goldfish_rtc *rtcdrv;
+ unsigned long rtc_alarm;
+ u64 rtc_alarm64;
+ u64 rtc_status_reg;
+ void __iomem *base;
+ int ret = 0;
+
+ rtcdrv = dev_get_drvdata(dev);
+ base = rtcdrv->base;
+
+ if (alrm->enabled) {
+ ret = rtc_tm_to_time(&alrm->time, &rtc_alarm);
+ if (ret != 0)
+ return ret;
+
+ rtc_alarm64 = rtc_alarm * NSEC_PER_SEC;
+ writel((rtc_alarm64 >> 32), base + TIMER_ALARM_HIGH);
+ writel(rtc_alarm64, base + TIMER_ALARM_LOW);
+ } else {
+ /*
+ * if this function was called with enabled=0
+ * then it could mean that the application is
+ * trying to cancel an ongoing alarm
+ */
+ rtc_status_reg = readl(base + TIMER_ALARM_STATUS);
+ if (rtc_status_reg)
+ writel(1, base + TIMER_CLEAR_ALARM);
+ }
+
+ return ret;
+}
+
+static int goldfish_rtc_alarm_irq_enable(struct device *dev,
+ unsigned int enabled)
+{
+ void __iomem *base;
+ struct goldfish_rtc *rtcdrv;
+
+ rtcdrv = dev_get_drvdata(dev);
+ base = rtcdrv->base;
+
+ if (enabled)
+ writel(1, base + TIMER_IRQ_ENABLED);
+ else
+ writel(0, base + TIMER_IRQ_ENABLED);
+
+ return 0;
+}
+
+static irqreturn_t goldfish_rtc_interrupt(int irq, void *dev_id)
+{
+ struct goldfish_rtc *rtcdrv = dev_id;
+ void __iomem *base = rtcdrv->base;
+
+ writel(1, base + TIMER_CLEAR_INTERRUPT);
+
+ rtc_update_irq(rtcdrv->rtc, 1, RTC_IRQF | RTC_AF);
+
+ return IRQ_HANDLED;
+}
+
+static int goldfish_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct goldfish_rtc *rtcdrv;
+ void __iomem *base;
+ u64 time_high;
+ u64 time_low;
+ u64 time;
+
+ rtcdrv = dev_get_drvdata(dev);
+ base = rtcdrv->base;
+
+ time_low = readl(base + TIMER_TIME_LOW);
+ time_high = readl(base + TIMER_TIME_HIGH);
+ time = (time_high << 32) | time_low;
+
+ do_div(time, NSEC_PER_SEC);
+
+ rtc_time_to_tm(time, tm);
+
+ return 0;
+}
+
+static int goldfish_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct goldfish_rtc *rtcdrv;
+ void __iomem *base;
+ unsigned long now;
+ u64 now64;
+ int ret;
+
+ rtcdrv = dev_get_drvdata(dev);
+ base = rtcdrv->base;
+
+ ret = rtc_tm_to_time(tm, &now);
+ if (ret == 0) {
+ now64 = now * NSEC_PER_SEC;
+ writel((now64 >> 32), base + TIMER_TIME_HIGH);
+ writel(now64, base + TIMER_TIME_LOW);
+ }
+
+ return ret;
+}
+
+static const struct rtc_class_ops goldfish_rtc_ops = {
+ .read_time = goldfish_rtc_read_time,
+ .set_time = goldfish_rtc_set_time,
+ .read_alarm = goldfish_rtc_read_alarm,
+ .set_alarm = goldfish_rtc_set_alarm,
+ .alarm_irq_enable = goldfish_rtc_alarm_irq_enable
+};
+
+static int goldfish_rtc_probe(struct platform_device *pdev)
+{
+ struct goldfish_rtc *rtcdrv;
+ struct resource *r;
+ int err;
+
+ rtcdrv = devm_kzalloc(&pdev->dev, sizeof(*rtcdrv), GFP_KERNEL);
+ if (!rtcdrv)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, rtcdrv);
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r)
+ return -ENODEV;
+
+ rtcdrv->base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(rtcdrv->base))
+ return -ENODEV;
+
+ rtcdrv->irq = platform_get_irq(pdev, 0);
+ if (rtcdrv->irq < 0)
+ return -ENODEV;
+
+ rtcdrv->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
+ &goldfish_rtc_ops,
+ THIS_MODULE);
+ if (IS_ERR(rtcdrv->rtc))
+ return PTR_ERR(rtcdrv->rtc);
+
+ err = devm_request_irq(&pdev->dev, rtcdrv->irq,
+ goldfish_rtc_interrupt,
+ 0, pdev->name, rtcdrv);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static const struct of_device_id goldfish_rtc_of_match[] = {
+ { .compatible = "google,goldfish-rtc", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, goldfish_rtc_of_match);
+
+static struct platform_driver goldfish_rtc = {
+ .probe = goldfish_rtc_probe,
+ .driver = {
+ .name = "goldfish_rtc",
+ .of_match_table = goldfish_rtc_of_match,
+ }
+};
+
+module_platform_driver(goldfish_rtc);
diff --git a/drivers/rtc/rtc-opal.c b/drivers/rtc/rtc-opal.c
index df39ce02a99d..c6b0c7ed7a30 100644
--- a/drivers/rtc/rtc-opal.c
+++ b/drivers/rtc/rtc-opal.c
@@ -58,6 +58,7 @@ static void tm_to_opal(struct rtc_time *tm, u32 *y_m_d, u64 *h_m_s_ms)
static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm)
{
long rc = OPAL_BUSY;
+ int retries = 10;
u32 y_m_d;
u64 h_m_s_ms;
__be32 __y_m_d;
@@ -67,8 +68,11 @@ static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm)
rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms);
if (rc == OPAL_BUSY_EVENT)
opal_poll_events(NULL);
- else
+ else if (retries-- && (rc == OPAL_HARDWARE
+ || rc == OPAL_INTERNAL_ERROR))
msleep(10);
+ else if (rc != OPAL_BUSY && rc != OPAL_BUSY_EVENT)
+ break;
}
if (rc != OPAL_SUCCESS)
@@ -84,6 +88,7 @@ static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm)
static int opal_set_rtc_time(struct device *dev, struct rtc_time *tm)
{
long rc = OPAL_BUSY;
+ int retries = 10;
u32 y_m_d = 0;
u64 h_m_s_ms = 0;
@@ -92,8 +97,11 @@ static int opal_set_rtc_time(struct device *dev, struct rtc_time *tm)
rc = opal_rtc_write(y_m_d, h_m_s_ms);
if (rc == OPAL_BUSY_EVENT)
opal_poll_events(NULL);
- else
+ else if (retries-- && (rc == OPAL_HARDWARE
+ || rc == OPAL_INTERNAL_ERROR))
msleep(10);
+ else if (rc != OPAL_BUSY && rc != OPAL_BUSY_EVENT)
+ break;
}
return rc == OPAL_SUCCESS ? 0 : -EIO;
@@ -142,6 +150,16 @@ static int opal_get_tpo_time(struct device *dev, struct rtc_wkalrm *alarm)
y_m_d = be32_to_cpu(__y_m_d);
h_m_s_ms = ((u64)be32_to_cpu(__h_m) << 32);
+
+ /* check if no alarm is set */
+ if (y_m_d == 0 && h_m_s_ms == 0) {
+ pr_debug("No alarm is set\n");
+ rc = -ENOENT;
+ goto exit;
+ } else {
+ pr_debug("Alarm set to %x %llx\n", y_m_d, h_m_s_ms);
+ }
+
opal_to_tm(y_m_d, h_m_s_ms, &alarm->time);
exit:
diff --git a/drivers/rtc/rtc-snvs.c b/drivers/rtc/rtc-snvs.c
index 950c5d0b6dca..afab89f5be48 100644
--- a/drivers/rtc/rtc-snvs.c
+++ b/drivers/rtc/rtc-snvs.c
@@ -257,7 +257,7 @@ static int snvs_rtc_probe(struct platform_device *pdev)
of_property_read_u32(pdev->dev.of_node, "offset", &data->offset);
}
- if (!data->regmap) {
+ if (IS_ERR(data->regmap)) {
dev_err(&pdev->dev, "Can't find snvs syscon\n");
return -ENODEV;
}
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index e7a6f1222642..b76a85d14ef0 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1881,8 +1881,12 @@ static int __dasd_device_is_unusable(struct dasd_device *device,
{
int mask = ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM);
- if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
- /* dasd is being set offline. */
+ if (test_bit(DASD_FLAG_OFFLINE, &device->flags) &&
+ !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
+ /*
+ * dasd is being set offline
+ * but it is no safe offline where we have to allow I/O
+ */
return 1;
}
if (device->stopped) {
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index d26134713682..d05c553eb552 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -2743,6 +2743,16 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
erp = dasd_3990_erp_handle_match_erp(cqr, erp);
}
+
+ /*
+ * For path verification work we need to stick with the path that was
+ * originally chosen so that the per path configuration data is
+ * assigned correctly.
+ */
+ if (test_bit(DASD_CQR_VERIFY_PATH, &erp->flags) && cqr->lpm) {
+ erp->lpm = cqr->lpm;
+ }
+
if (device->features & DASD_FEATURE_ERPLOG) {
/* print current erp_chain */
dev_err(&device->cdev->dev,
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 9083247f55a8..21d174e9ebdb 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -518,10 +518,12 @@ static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
pfxdata->validity.define_extent = 1;
/* private uid is kept up to date, conf_data may be outdated */
- if (startpriv->uid.type != UA_BASE_DEVICE) {
+ if (startpriv->uid.type == UA_BASE_PAV_ALIAS)
pfxdata->validity.verify_base = 1;
- if (startpriv->uid.type == UA_HYPER_PAV_ALIAS)
- pfxdata->validity.hyper_pav = 1;
+
+ if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) {
+ pfxdata->validity.verify_base = 1;
+ pfxdata->validity.hyper_pav = 1;
}
/* define extend data (mostly)*/
@@ -3002,10 +3004,12 @@ static int prepare_itcw(struct itcw *itcw,
pfxdata.validity.define_extent = 1;
/* private uid is kept up to date, conf_data may be outdated */
- if (startpriv->uid.type != UA_BASE_DEVICE) {
+ if (startpriv->uid.type == UA_BASE_PAV_ALIAS)
+ pfxdata.validity.verify_base = 1;
+
+ if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) {
pfxdata.validity.verify_base = 1;
- if (startpriv->uid.type == UA_HYPER_PAV_ALIAS)
- pfxdata.validity.hyper_pav = 1;
+ pfxdata.validity.hyper_pav = 1;
}
switch (cmd) {
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
index 6fa9364d1c07..835f1054976b 100644
--- a/drivers/s390/char/Makefile
+++ b/drivers/s390/char/Makefile
@@ -2,6 +2,8 @@
# S/390 character devices
#
+CFLAGS_REMOVE_sclp_early_core.o += $(CC_FLAGS_EXPOLINE)
+
obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \
sclp_cmd.o sclp_config.o sclp_cpi_sys.o sclp_ocf.o sclp_ctl.o \
sclp_early.o
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 1e16331891a9..f9d6a9f00640 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -451,6 +451,7 @@ static void chsc_process_sei_link_incident(struct chsc_sei_nt0_area *sei_area)
static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area)
{
+ struct channel_path *chp;
struct chp_link link;
struct chp_id chpid;
int status;
@@ -463,10 +464,17 @@ static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area)
chpid.id = sei_area->rsid;
/* allocate a new channel path structure, if needed */
status = chp_get_status(chpid);
- if (status < 0)
- chp_new(chpid);
- else if (!status)
+ if (!status)
return;
+
+ if (status < 0) {
+ chp_new(chpid);
+ } else {
+ chp = chpid_to_chp(chpid);
+ mutex_lock(&chp->lock);
+ chp_update_desc(chp);
+ mutex_unlock(&chp->lock);
+ }
memset(&link, 0, sizeof(struct chp_link));
link.chpid = chpid;
if ((sei_area->vf & 0xc0) != 0) {
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 4bb5262f7aee..742ca57ece8c 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -126,7 +126,7 @@ static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
int start, int count, int auto_ack)
{
- int rc, tmp_count = count, tmp_start = start, nr = q->nr, retried = 0;
+ int rc, tmp_count = count, tmp_start = start, nr = q->nr;
unsigned int ccq = 0;
qperf_inc(q, eqbs);
@@ -149,14 +149,7 @@ again:
qperf_inc(q, eqbs_partial);
DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS part:%02x",
tmp_count);
- /*
- * Retry once, if that fails bail out and process the
- * extracted buffers before trying again.
- */
- if (!retried++)
- goto again;
- else
- return count - tmp_count;
+ return count - tmp_count;
}
DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
@@ -212,7 +205,10 @@ again:
return 0;
}
-/* returns number of examined buffers and their common state in *state */
+/*
+ * Returns number of examined buffers and their common state in *state.
+ * Requested number of buffers-to-examine must be > 0.
+ */
static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
unsigned char *state, unsigned int count,
int auto_ack, int merge_pending)
@@ -223,17 +219,23 @@ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
if (is_qebsm(q))
return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
- for (i = 0; i < count; i++) {
- if (!__state) {
- __state = q->slsb.val[bufnr];
- if (merge_pending && __state == SLSB_P_OUTPUT_PENDING)
- __state = SLSB_P_OUTPUT_EMPTY;
- } else if (merge_pending) {
- if ((q->slsb.val[bufnr] & __state) != __state)
- break;
- } else if (q->slsb.val[bufnr] != __state)
- break;
+ /* get initial state: */
+ __state = q->slsb.val[bufnr];
+ if (merge_pending && __state == SLSB_P_OUTPUT_PENDING)
+ __state = SLSB_P_OUTPUT_EMPTY;
+
+ for (i = 1; i < count; i++) {
bufnr = next_buf(bufnr);
+
+ /* merge PENDING into EMPTY: */
+ if (merge_pending &&
+ q->slsb.val[bufnr] == SLSB_P_OUTPUT_PENDING &&
+ __state == SLSB_P_OUTPUT_EMPTY)
+ continue;
+
+ /* stop if next state differs from initial state: */
+ if (q->slsb.val[bufnr] != __state)
+ break;
}
*state = __state;
return i;
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 48b3866a9ded..35286907c636 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -140,7 +140,7 @@ static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues)
int i;
for (i = 0; i < nr_queues; i++) {
- q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL);
+ q = kmem_cache_zalloc(qdio_q_cache, GFP_KERNEL);
if (!q)
return -ENOMEM;
@@ -456,7 +456,6 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
{
struct ciw *ciw;
struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data;
- int rc;
memset(&irq_ptr->qib, 0, sizeof(irq_ptr->qib));
memset(&irq_ptr->siga_flag, 0, sizeof(irq_ptr->siga_flag));
@@ -493,16 +492,14 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE);
if (!ciw) {
DBF_ERROR("%4x NO EQ", irq_ptr->schid.sch_no);
- rc = -EINVAL;
- goto out_err;
+ return -EINVAL;
}
irq_ptr->equeue = *ciw;
ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE);
if (!ciw) {
DBF_ERROR("%4x NO AQ", irq_ptr->schid.sch_no);
- rc = -EINVAL;
- goto out_err;
+ return -EINVAL;
}
irq_ptr->aqueue = *ciw;
@@ -510,9 +507,6 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
irq_ptr->orig_handler = init_data->cdev->handler;
init_data->cdev->handler = qdio_int_handler;
return 0;
-out_err:
- qdio_release_memory(irq_ptr);
- return rc;
}
void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 5006cb6ce62d..50030cdf91fb 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -591,6 +591,11 @@ struct qeth_cmd_buffer {
void (*callback) (struct qeth_channel *, struct qeth_cmd_buffer *);
};
+static inline struct qeth_ipa_cmd *__ipa_cmd(struct qeth_cmd_buffer *iob)
+{
+ return (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE);
+}
+
/**
* definition of a qeth channel, used for read and write
*/
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index e5b9506698b1..95c631125a20 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -517,8 +517,7 @@ static inline int qeth_is_cq(struct qeth_card *card, unsigned int queue)
queue == card->qdio.no_in_queues - 1;
}
-
-static int qeth_issue_next_read(struct qeth_card *card)
+static int __qeth_issue_next_read(struct qeth_card *card)
{
int rc;
struct qeth_cmd_buffer *iob;
@@ -549,6 +548,17 @@ static int qeth_issue_next_read(struct qeth_card *card)
return rc;
}
+static int qeth_issue_next_read(struct qeth_card *card)
+{
+ int ret;
+
+ spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card)));
+ ret = __qeth_issue_next_read(card);
+ spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card)));
+
+ return ret;
+}
+
static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card)
{
struct qeth_reply *reply;
@@ -952,7 +962,7 @@ void qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread)
spin_lock_irqsave(&card->thread_mask_lock, flags);
card->thread_running_mask &= ~thread;
spin_unlock_irqrestore(&card->thread_mask_lock, flags);
- wake_up(&card->wait_q);
+ wake_up_all(&card->wait_q);
}
EXPORT_SYMBOL_GPL(qeth_clear_thread_running_bit);
@@ -1156,6 +1166,7 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
}
rc = qeth_get_problem(cdev, irb);
if (rc) {
+ card->read_or_write_problem = 1;
qeth_clear_ipacmd_list(card);
qeth_schedule_recovery(card);
goto out;
@@ -1174,7 +1185,7 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
return;
if (channel == &card->read &&
channel->state == CH_STATE_UP)
- qeth_issue_next_read(card);
+ __qeth_issue_next_read(card);
iob = channel->iob;
index = channel->buf_no;
@@ -2054,7 +2065,7 @@ int qeth_send_control_data(struct qeth_card *card, int len,
unsigned long flags;
struct qeth_reply *reply = NULL;
unsigned long timeout, event_timeout;
- struct qeth_ipa_cmd *cmd;
+ struct qeth_ipa_cmd *cmd = NULL;
QETH_CARD_TEXT(card, 2, "sendctl");
@@ -2068,23 +2079,27 @@ int qeth_send_control_data(struct qeth_card *card, int len,
}
reply->callback = reply_cb;
reply->param = reply_param;
- if (card->state == CARD_STATE_DOWN)
- reply->seqno = QETH_IDX_COMMAND_SEQNO;
- else
- reply->seqno = card->seqno.ipa++;
+
init_waitqueue_head(&reply->wait_q);
- spin_lock_irqsave(&card->lock, flags);
- list_add_tail(&reply->list, &card->cmd_waiter_list);
- spin_unlock_irqrestore(&card->lock, flags);
QETH_DBF_HEX(CTRL, 2, iob->data, QETH_DBF_CTRL_LEN);
while (atomic_cmpxchg(&card->write.irq_pending, 0, 1)) ;
- qeth_prepare_control_data(card, len, iob);
- if (IS_IPA(iob->data))
+ if (IS_IPA(iob->data)) {
+ cmd = __ipa_cmd(iob);
+ cmd->hdr.seqno = card->seqno.ipa++;
+ reply->seqno = cmd->hdr.seqno;
event_timeout = QETH_IPA_TIMEOUT;
- else
+ } else {
+ reply->seqno = QETH_IDX_COMMAND_SEQNO;
event_timeout = QETH_TIMEOUT;
+ }
+ qeth_prepare_control_data(card, len, iob);
+
+ spin_lock_irqsave(&card->lock, flags);
+ list_add_tail(&reply->list, &card->cmd_waiter_list);
+ spin_unlock_irqrestore(&card->lock, flags);
+
timeout = jiffies + event_timeout;
QETH_CARD_TEXT(card, 6, "noirqpnd");
@@ -2109,9 +2124,8 @@ int qeth_send_control_data(struct qeth_card *card, int len,
/* we have only one long running ipassist, since we can ensure
process context of this command we can sleep */
- cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
- if ((cmd->hdr.command == IPA_CMD_SETIP) &&
- (cmd->hdr.prot_version == QETH_PROT_IPV4)) {
+ if (cmd && cmd->hdr.command == IPA_CMD_SETIP &&
+ cmd->hdr.prot_version == QETH_PROT_IPV4) {
if (!wait_event_timeout(reply->wait_q,
atomic_read(&reply->received), event_timeout))
goto time_err;
@@ -2877,7 +2891,7 @@ static void qeth_fill_ipacmd_header(struct qeth_card *card,
memset(cmd, 0, sizeof(struct qeth_ipa_cmd));
cmd->hdr.command = command;
cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST;
- cmd->hdr.seqno = card->seqno.ipa;
+ /* cmd->hdr.seqno is set by qeth_send_control_data() */
cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type);
cmd->hdr.rel_adapter_no = (__u8) card->info.portno;
if (card->options.layer2)
@@ -4966,8 +4980,6 @@ static void qeth_core_free_card(struct qeth_card *card)
QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
qeth_clean_channel(&card->read);
qeth_clean_channel(&card->write);
- if (card->dev)
- free_netdev(card->dev);
kfree(card->ip_tbd_list);
qeth_free_qdio_buffers(card);
unregister_service_level(&card->qeth_service_level);
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 58bcb3c9a86a..acdb5ccb0ab9 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -1062,8 +1062,8 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
qeth_l2_set_offline(cgdev);
if (card->dev) {
- netif_napi_del(&card->napi);
unregister_netdev(card->dev);
+ free_netdev(card->dev);
card->dev = NULL;
}
return;
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 0d6888cbd96e..bbdb3b6c54bb 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -3243,8 +3243,8 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
qeth_l3_set_offline(cgdev);
if (card->dev) {
- netif_napi_del(&card->napi);
unregister_netdev(card->dev);
+ free_netdev(card->dev);
card->dev = NULL;
}
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 34367d172961..4534a7ce77b8 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -3,7 +3,7 @@
*
* Debug traces for zfcp.
*
- * Copyright IBM Corp. 2002, 2017
+ * Copyright IBM Corp. 2002, 2018
*/
#define KMSG_COMPONENT "zfcp"
@@ -287,6 +287,27 @@ void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter,
spin_unlock_irqrestore(&dbf->rec_lock, flags);
}
+/**
+ * zfcp_dbf_rec_trig_lock - trace event related to triggered recovery with lock
+ * @tag: identifier for event
+ * @adapter: adapter on which the erp_action should run
+ * @port: remote port involved in the erp_action
+ * @sdev: scsi device involved in the erp_action
+ * @want: wanted erp_action
+ * @need: required erp_action
+ *
+ * The adapter->erp_lock must not be held.
+ */
+void zfcp_dbf_rec_trig_lock(char *tag, struct zfcp_adapter *adapter,
+ struct zfcp_port *port, struct scsi_device *sdev,
+ u8 want, u8 need)
+{
+ unsigned long flags;
+
+ read_lock_irqsave(&adapter->erp_lock, flags);
+ zfcp_dbf_rec_trig(tag, adapter, port, sdev, want, need);
+ read_unlock_irqrestore(&adapter->erp_lock, flags);
+}
/**
* zfcp_dbf_rec_run_lvl - trace event related to running recovery
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 21c8c689b02b..7a7984a50683 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -3,7 +3,7 @@
*
* External function declarations.
*
- * Copyright IBM Corp. 2002, 2016
+ * Copyright IBM Corp. 2002, 2018
*/
#ifndef ZFCP_EXT_H
@@ -34,6 +34,9 @@ extern int zfcp_dbf_adapter_register(struct zfcp_adapter *);
extern void zfcp_dbf_adapter_unregister(struct zfcp_adapter *);
extern void zfcp_dbf_rec_trig(char *, struct zfcp_adapter *,
struct zfcp_port *, struct scsi_device *, u8, u8);
+extern void zfcp_dbf_rec_trig_lock(char *tag, struct zfcp_adapter *adapter,
+ struct zfcp_port *port,
+ struct scsi_device *sdev, u8 want, u8 need);
extern void zfcp_dbf_rec_run(char *, struct zfcp_erp_action *);
extern void zfcp_dbf_rec_run_lvl(int level, char *tag,
struct zfcp_erp_action *erp);
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index a9b8104b982e..bb99db2948ab 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -3,7 +3,7 @@
*
* Interface to Linux SCSI midlayer.
*
- * Copyright IBM Corp. 2002, 2017
+ * Copyright IBM Corp. 2002, 2018
*/
#define KMSG_COMPONENT "zfcp"
@@ -616,9 +616,9 @@ static void zfcp_scsi_rport_register(struct zfcp_port *port)
ids.port_id = port->d_id;
ids.roles = FC_RPORT_ROLE_FCP_TARGET;
- zfcp_dbf_rec_trig("scpaddy", port->adapter, port, NULL,
- ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD,
- ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD);
+ zfcp_dbf_rec_trig_lock("scpaddy", port->adapter, port, NULL,
+ ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD,
+ ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD);
rport = fc_remote_port_add(port->adapter->scsi_host, 0, &ids);
if (!rport) {
dev_err(&port->adapter->ccw_device->dev,
@@ -640,9 +640,9 @@ static void zfcp_scsi_rport_block(struct zfcp_port *port)
struct fc_rport *rport = port->rport;
if (rport) {
- zfcp_dbf_rec_trig("scpdely", port->adapter, port, NULL,
- ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL,
- ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL);
+ zfcp_dbf_rec_trig_lock("scpdely", port->adapter, port, NULL,
+ ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL,
+ ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL);
fc_remote_port_delete(rport);
port->rport = NULL;
}
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index febbd83e2ecd..24e57e770432 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -6291,18 +6291,17 @@ static uchar AscGetSynPeriodIndex(ASC_DVC_VAR *asc_dvc, uchar syn_time)
static uchar
AscMsgOutSDTR(ASC_DVC_VAR *asc_dvc, uchar sdtr_period, uchar sdtr_offset)
{
- EXT_MSG sdtr_buf;
- uchar sdtr_period_index;
- PortAddr iop_base;
-
- iop_base = asc_dvc->iop_base;
- sdtr_buf.msg_type = EXTENDED_MESSAGE;
- sdtr_buf.msg_len = MS_SDTR_LEN;
- sdtr_buf.msg_req = EXTENDED_SDTR;
- sdtr_buf.xfer_period = sdtr_period;
+ PortAddr iop_base = asc_dvc->iop_base;
+ uchar sdtr_period_index = AscGetSynPeriodIndex(asc_dvc, sdtr_period);
+ EXT_MSG sdtr_buf = {
+ .msg_type = EXTENDED_MESSAGE,
+ .msg_len = MS_SDTR_LEN,
+ .msg_req = EXTENDED_SDTR,
+ .xfer_period = sdtr_period,
+ .req_ack_offset = sdtr_offset,
+ };
sdtr_offset &= ASC_SYN_MAX_OFFSET;
- sdtr_buf.req_ack_offset = sdtr_offset;
- sdtr_period_index = AscGetSynPeriodIndex(asc_dvc, sdtr_period);
+
if (sdtr_period_index <= asc_dvc->max_sdtr_index) {
AscMemWordCopyPtrToLram(iop_base, ASCV_MSGOUT_BEG,
(uchar *)&sdtr_buf,
@@ -11030,6 +11029,9 @@ static int advansys_board_found(struct Scsi_Host *shost, unsigned int iop,
ASC_DBG(2, "AdvInitGetConfig()\n");
ret = AdvInitGetConfig(pdev, shost) ? -ENODEV : 0;
+#else
+ share_irq = 0;
+ ret = -ENODEV;
#endif /* CONFIG_PCI */
}
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index 499e369eabf0..8bc1625337f6 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -191,6 +191,7 @@ struct bnx2fc_hba {
struct bnx2fc_cmd_mgr *cmd_mgr;
spinlock_t hba_lock;
struct mutex hba_mutex;
+ struct mutex hba_stats_mutex;
unsigned long adapter_state;
#define ADAPTER_STATE_UP 0
#define ADAPTER_STATE_GOING_DOWN 1
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 67405c628864..d0b227ffbd5f 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -641,15 +641,17 @@ static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost)
if (!fw_stats)
return NULL;
+ mutex_lock(&hba->hba_stats_mutex);
+
bnx2fc_stats = fc_get_host_stats(shost);
init_completion(&hba->stat_req_done);
if (bnx2fc_send_stat_req(hba))
- return bnx2fc_stats;
+ goto unlock_stats_mutex;
rc = wait_for_completion_timeout(&hba->stat_req_done, (2 * HZ));
if (!rc) {
BNX2FC_HBA_DBG(lport, "FW stat req timed out\n");
- return bnx2fc_stats;
+ goto unlock_stats_mutex;
}
BNX2FC_STATS(hba, rx_stat2, fc_crc_cnt);
bnx2fc_stats->invalid_crc_count += hba->bfw_stats.fc_crc_cnt;
@@ -671,6 +673,9 @@ static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost)
memcpy(&hba->prev_stats, hba->stats_buffer,
sizeof(struct fcoe_statistics_params));
+
+unlock_stats_mutex:
+ mutex_unlock(&hba->hba_stats_mutex);
return bnx2fc_stats;
}
@@ -1302,6 +1307,7 @@ static struct bnx2fc_hba *bnx2fc_hba_create(struct cnic_dev *cnic)
}
spin_lock_init(&hba->hba_lock);
mutex_init(&hba->hba_mutex);
+ mutex_init(&hba->hba_stats_mutex);
hba->cnic = cnic;
diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c
index 622bdabc8894..dab195f04da7 100644
--- a/drivers/scsi/csiostor/csio_hw.c
+++ b/drivers/scsi/csiostor/csio_hw.c
@@ -1769,7 +1769,6 @@ csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param)
goto bye;
}
- mempool_free(mbp, hw->mb_mempool);
if (finicsum != cfcsum) {
csio_warn(hw,
"Config File checksum mismatch: csum=%#x, computed=%#x\n",
@@ -1780,6 +1779,10 @@ csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param)
rv = csio_hw_validate_caps(hw, mbp);
if (rv != 0)
goto bye;
+
+ mempool_free(mbp, hw->mb_mempool);
+ mbp = NULL;
+
/*
* Note that we're operating with parameters
* not supplied by the driver, rather than from hard-wired
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index d4cda5e9600e..21c8d210c456 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -180,11 +180,14 @@ static u8 adpt_read_blink_led(adpt_hba* host)
*============================================================================
*/
+#ifdef MODULE
static struct pci_device_id dptids[] = {
{ PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
{ PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
{ 0, }
};
+#endif
+
MODULE_DEVICE_TABLE(pci,dptids);
static int adpt_detect(struct scsi_host_template* sht)
diff --git a/drivers/scsi/fdomain.c b/drivers/scsi/fdomain.c
index eefe14d453db..b87ab38a4530 100644
--- a/drivers/scsi/fdomain.c
+++ b/drivers/scsi/fdomain.c
@@ -1768,7 +1768,7 @@ struct scsi_host_template fdomain_driver_template = {
};
#ifndef PCMCIA
-#ifdef CONFIG_PCI
+#if defined(CONFIG_PCI) && defined(MODULE)
static struct pci_device_id fdomain_pci_tbl[] = {
{ PCI_VENDOR_ID_FD, PCI_DEVICE_ID_FD_36C70,
diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c
index f8d2478b11cc..87e081f8a386 100644
--- a/drivers/scsi/g_NCR5380.c
+++ b/drivers/scsi/g_NCR5380.c
@@ -538,7 +538,10 @@ static inline int NCR5380_pread(struct Scsi_Host *instance, unsigned char *dst,
printk(KERN_ERR "53C400r: Got 53C80_IRQ start=%d, blocks=%d\n", start, blocks);
return -1;
}
- while (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_HOST_BUF_NOT_RDY);
+ while (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_HOST_BUF_NOT_RDY)
+ {
+ // FIXME - no timeout
+ }
#ifndef SCSI_G_NCR5380_MEM
{
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index 8fae03215a85..543c10266984 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -366,7 +366,7 @@ enum ibmvfc_fcp_rsp_info_codes {
};
struct ibmvfc_fcp_rsp_info {
- __be16 reserved;
+ u8 reserved[3];
u8 rsp_code;
u8 reserved2[4];
}__attribute__((packed, aligned (2)));
diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c
index 6a926bae76b2..7a91cf3ff173 100644
--- a/drivers/scsi/initio.c
+++ b/drivers/scsi/initio.c
@@ -110,11 +110,6 @@
#define i91u_MAXQUEUE 2
#define i91u_REVID "Initio INI-9X00U/UW SCSI device driver; Revision: 1.04a"
-#define I950_DEVICE_ID 0x9500 /* Initio's inic-950 product ID */
-#define I940_DEVICE_ID 0x9400 /* Initio's inic-940 product ID */
-#define I935_DEVICE_ID 0x9401 /* Initio's inic-935 product ID */
-#define I920_DEVICE_ID 0x0002 /* Initio's other product ID */
-
#ifdef DEBUG_i91u
static unsigned int i91u_debug = DEBUG_DEFAULT;
#endif
@@ -127,17 +122,6 @@ static int setup_debug = 0;
static void i91uSCBPost(u8 * pHcb, u8 * pScb);
-/* PCI Devices supported by this driver */
-static struct pci_device_id i91u_pci_devices[] = {
- { PCI_VENDOR_ID_INIT, I950_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- { PCI_VENDOR_ID_INIT, I940_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- { PCI_VENDOR_ID_INIT, I935_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- { PCI_VENDOR_ID_INIT, I920_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- { PCI_VENDOR_ID_DOMEX, I920_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- { }
-};
-MODULE_DEVICE_TABLE(pci, i91u_pci_devices);
-
#define DEBUG_INTERRUPT 0
#define DEBUG_QUEUE 0
#define DEBUG_STATE 0
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 7a58128a0000..2f61d8cd5882 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -835,8 +835,10 @@ static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
qc->err_mask |= AC_ERR_OTHER;
sata_port->ioasa.status |= ATA_BUSY;
- list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
ata_qc_complete(qc);
+ if (ipr_cmd->eh_comp)
+ complete(ipr_cmd->eh_comp);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
}
/**
@@ -5864,8 +5866,10 @@ static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
res->in_erp = 0;
}
scsi_dma_unmap(ipr_cmd->scsi_cmd);
- list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
scsi_cmd->scsi_done(scsi_cmd);
+ if (ipr_cmd->eh_comp)
+ complete(ipr_cmd->eh_comp);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
}
/**
@@ -6255,8 +6259,10 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
}
scsi_dma_unmap(ipr_cmd->scsi_cmd);
- list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
scsi_cmd->scsi_done(scsi_cmd);
+ if (ipr_cmd->eh_comp)
+ complete(ipr_cmd->eh_comp);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
}
/**
@@ -6282,8 +6288,10 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
scsi_dma_unmap(scsi_cmd);
spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
- list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
scsi_cmd->scsi_done(scsi_cmd);
+ if (ipr_cmd->eh_comp)
+ complete(ipr_cmd->eh_comp);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
} else {
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index efce04df2109..9f0b00c38658 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1695,6 +1695,15 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
*/
switch (session->state) {
case ISCSI_STATE_FAILED:
+ /*
+ * cmds should fail during shutdown, if the session
+ * state is bad, allowing completion to happen
+ */
+ if (unlikely(system_state != SYSTEM_RUNNING)) {
+ reason = FAILURE_SESSION_FAILED;
+ sc->result = DID_NO_CONNECT << 16;
+ break;
+ }
case ISCSI_STATE_IN_RECOVERY:
reason = FAILURE_SESSION_IN_RECOVERY;
sc->result = DID_IMM_RETRY << 16;
@@ -1980,6 +1989,19 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
if (session->state != ISCSI_STATE_LOGGED_IN) {
/*
+ * During shutdown, if session is prematurely disconnected,
+ * recovery won't happen and there will be hung cmds. Not
+ * handling cmds would trigger EH, also bad in this case.
+ * Instead, handle cmd, allow completion to happen and let
+ * upper layer to deal with the result.
+ */
+ if (unlikely(system_state != SYSTEM_RUNNING)) {
+ sc->result = DID_NO_CONNECT << 16;
+ ISCSI_DBG_EH(session, "sc on shutdown, handled\n");
+ rc = BLK_EH_HANDLED;
+ goto done;
+ }
+ /*
* We are probably in the middle of iscsi recovery so let
* that complete and handle the error.
*/
@@ -2083,7 +2105,7 @@ done:
task->last_timeout = jiffies;
spin_unlock(&session->frwd_lock);
ISCSI_DBG_EH(session, "return %s\n", rc == BLK_EH_RESET_TIMER ?
- "timer reset" : "nh");
+ "timer reset" : "shutdown or nh");
return rc;
}
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 022bb6e10d98..12886f96b286 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -282,6 +282,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
phy->phy->minimum_linkrate = dr->pmin_linkrate;
phy->phy->maximum_linkrate = dr->pmax_linkrate;
phy->phy->negotiated_linkrate = phy->linkrate;
+ phy->phy->enabled = (phy->linkrate != SAS_PHY_DISABLED);
skip:
if (new_phy)
@@ -675,7 +676,7 @@ int sas_smp_get_phy_events(struct sas_phy *phy)
res = smp_execute_task(dev, req, RPEL_REQ_SIZE,
resp, RPEL_RESP_SIZE);
- if (!res)
+ if (res)
goto out;
phy->invalid_dword_count = scsi_to_u32(&resp[12]);
@@ -684,6 +685,7 @@ int sas_smp_get_phy_events(struct sas_phy *phy)
phy->phy_reset_problem_count = scsi_to_u32(&resp[24]);
out:
+ kfree(req);
kfree(resp);
return res;
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 519dac4e341e..9a8c2f97ed70 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -222,6 +222,7 @@ out_done:
static void sas_eh_finish_cmd(struct scsi_cmnd *cmd)
{
struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(cmd->device->host);
+ struct domain_device *dev = cmd_to_domain_dev(cmd);
struct sas_task *task = TO_SAS_TASK(cmd);
/* At this point, we only get called following an actual abort
@@ -230,6 +231,14 @@ static void sas_eh_finish_cmd(struct scsi_cmnd *cmd)
*/
sas_end_task(cmd, task);
+ if (dev_is_sata(dev)) {
+ /* defer commands to libata so that libata EH can
+ * handle ata qcs correctly
+ */
+ list_move_tail(&cmd->eh_entry, &sas_ha->eh_ata_q);
+ return;
+ }
+
/* now finish the command and move it on to the error
* handler done list, this also takes it off the
* error handler pending list.
@@ -237,22 +246,6 @@ static void sas_eh_finish_cmd(struct scsi_cmnd *cmd)
scsi_eh_finish_cmd(cmd, &sas_ha->eh_done_q);
}
-static void sas_eh_defer_cmd(struct scsi_cmnd *cmd)
-{
- struct domain_device *dev = cmd_to_domain_dev(cmd);
- struct sas_ha_struct *ha = dev->port->ha;
- struct sas_task *task = TO_SAS_TASK(cmd);
-
- if (!dev_is_sata(dev)) {
- sas_eh_finish_cmd(cmd);
- return;
- }
-
- /* report the timeout to libata */
- sas_end_task(cmd, task);
- list_move_tail(&cmd->eh_entry, &ha->eh_ata_q);
-}
-
static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd *my_cmd)
{
struct scsi_cmnd *cmd, *n;
@@ -260,7 +253,7 @@ static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd
list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
if (cmd->device->sdev_target == my_cmd->device->sdev_target &&
cmd->device->lun == my_cmd->device->lun)
- sas_eh_defer_cmd(cmd);
+ sas_eh_finish_cmd(cmd);
}
}
@@ -622,12 +615,12 @@ static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *
case TASK_IS_DONE:
SAS_DPRINTK("%s: task 0x%p is done\n", __func__,
task);
- sas_eh_defer_cmd(cmd);
+ sas_eh_finish_cmd(cmd);
continue;
case TASK_IS_ABORTED:
SAS_DPRINTK("%s: task 0x%p is aborted\n",
__func__, task);
- sas_eh_defer_cmd(cmd);
+ sas_eh_finish_cmd(cmd);
continue;
case TASK_IS_AT_LU:
SAS_DPRINTK("task 0x%p is at LU: lu recover\n", task);
@@ -638,7 +631,7 @@ static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *
"recovered\n",
SAS_ADDR(task->dev),
cmd->device->lun);
- sas_eh_defer_cmd(cmd);
+ sas_eh_finish_cmd(cmd);
sas_scsi_clear_queue_lu(work_q, cmd);
goto Again;
}
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 8379fbbc60db..ef43847153ea 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -13493,6 +13493,9 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
case LPFC_Q_CREATE_VERSION_1:
bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
wq->entry_count);
+ bf_set(lpfc_mbox_hdr_version, &shdr->request,
+ LPFC_Q_CREATE_VERSION_1);
+
switch (wq->entry_size) {
default:
case 64:
diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c
index 14c0334f41e4..26c67c42985c 100644
--- a/drivers/scsi/mac_esp.c
+++ b/drivers/scsi/mac_esp.c
@@ -55,6 +55,7 @@ struct mac_esp_priv {
int error;
};
static struct esp *esp_chips[2];
+static DEFINE_SPINLOCK(esp_chips_lock);
#define MAC_ESP_GET_PRIV(esp) ((struct mac_esp_priv *) \
platform_get_drvdata((struct platform_device *) \
@@ -562,15 +563,18 @@ static int esp_mac_probe(struct platform_device *dev)
}
host->irq = IRQ_MAC_SCSI;
- esp_chips[dev->id] = esp;
- mb();
- if (esp_chips[!dev->id] == NULL) {
- err = request_irq(host->irq, mac_scsi_esp_intr, 0, "ESP", NULL);
- if (err < 0) {
- esp_chips[dev->id] = NULL;
- goto fail_free_priv;
- }
+
+ /* The request_irq() call is intended to succeed for the first device
+ * and fail for the second device.
+ */
+ err = request_irq(host->irq, mac_scsi_esp_intr, 0, "ESP", NULL);
+ spin_lock(&esp_chips_lock);
+ if (err < 0 && esp_chips[!dev->id] == NULL) {
+ spin_unlock(&esp_chips_lock);
+ goto fail_free_priv;
}
+ esp_chips[dev->id] = esp;
+ spin_unlock(&esp_chips_lock);
err = scsi_esp_register(esp, &dev->dev);
if (err)
@@ -579,8 +583,13 @@ static int esp_mac_probe(struct platform_device *dev)
return 0;
fail_free_irq:
- if (esp_chips[!dev->id] == NULL)
+ spin_lock(&esp_chips_lock);
+ esp_chips[dev->id] = NULL;
+ if (esp_chips[!dev->id] == NULL) {
+ spin_unlock(&esp_chips_lock);
free_irq(host->irq, esp);
+ } else
+ spin_unlock(&esp_chips_lock);
fail_free_priv:
kfree(mep);
fail_free_command_block:
@@ -599,9 +608,13 @@ static int esp_mac_remove(struct platform_device *dev)
scsi_esp_unregister(esp);
+ spin_lock(&esp_chips_lock);
esp_chips[dev->id] = NULL;
- if (!(esp_chips[0] || esp_chips[1]))
+ if (esp_chips[!dev->id] == NULL) {
+ spin_unlock(&esp_chips_lock);
free_irq(irq, NULL);
+ } else
+ spin_unlock(&esp_chips_lock);
kfree(mep);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index e111c3d8c5d6..b868ef3b2ca3 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -3886,19 +3886,6 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
return 0;
}
- /*
- * Bug work around for firmware SATL handling. The loop
- * is based on atomic operations and ensures consistency
- * since we're lockless at this point
- */
- do {
- if (test_bit(0, &sas_device_priv_data->ata_command_pending)) {
- scmd->result = SAM_STAT_BUSY;
- scmd->scsi_done(scmd);
- return 0;
- }
- } while (_scsih_set_satl_pending(scmd, true));
-
sas_target_priv_data = sas_device_priv_data->sas_target;
/* invalid device handle */
@@ -3924,6 +3911,19 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
sas_device_priv_data->block)
return SCSI_MLQUEUE_DEVICE_BUSY;
+ /*
+ * Bug work around for firmware SATL handling. The loop
+ * is based on atomic operations and ensures consistency
+ * since we're lockless at this point
+ */
+ do {
+ if (test_bit(0, &sas_device_priv_data->ata_command_pending)) {
+ scmd->result = SAM_STAT_BUSY;
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+ } while (_scsih_set_satl_pending(scmd, true));
+
if (scmd->sc_data_direction == DMA_FROM_DEVICE)
mpi_control = MPI2_SCSIIO_CONTROL_READ;
else if (scmd->sc_data_direction == DMA_TO_DEVICE)
@@ -3945,6 +3945,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
if (!smid) {
pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
ioc->name, __func__);
+ _scsih_set_satl_pending(scmd, false);
goto out;
}
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
@@ -3975,6 +3976,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
if (mpi_request->DataLength) {
if (ioc->build_sg_scmd(ioc, scmd, smid)) {
mpt3sas_base_free_smid(ioc, smid);
+ _scsih_set_satl_pending(scmd, false);
goto out;
}
} else
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
index 02360de6b7e0..39285070f3b5 100644
--- a/drivers/scsi/mvumi.c
+++ b/drivers/scsi/mvumi.c
@@ -2629,7 +2629,7 @@ static void mvumi_shutdown(struct pci_dev *pdev)
mvumi_flush_cache(mhba);
}
-static int mvumi_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused mvumi_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct mvumi_hba *mhba = NULL;
@@ -2648,7 +2648,7 @@ static int mvumi_suspend(struct pci_dev *pdev, pm_message_t state)
return 0;
}
-static int mvumi_resume(struct pci_dev *pdev)
+static int __maybe_unused mvumi_resume(struct pci_dev *pdev)
{
int ret;
struct mvumi_hba *mhba = NULL;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index e197c6f39de2..aa18c729d23a 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -365,6 +365,7 @@ qla24xx_abort_sp_done(void *data, void *ptr, int res)
srb_t *sp = (srb_t *)ptr;
struct srb_iocb *abt = &sp->u.iocb_cmd;
+ del_timer(&sp->u.iocb_cmd.timer);
complete(&abt->u.abt.comp);
}
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index e6faa0b050d1..824e27eec7a1 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -5502,7 +5502,7 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
fc_port_t *fcport;
int rc;
- fcport = kzalloc(sizeof(*fcport), GFP_KERNEL);
+ fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
if (!fcport) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f,
"qla_target(%d): Allocation of tmp FC port failed",
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 60720e5b1ebc..6b61b09b3226 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -180,7 +180,7 @@ static struct {
{"HITACHI", "6586-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
{"HITACHI", "6588-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
{"HP", "A6189A", NULL, BLIST_SPARSELUN | BLIST_LARGELUN}, /* HP VA7400 */
- {"HP", "OPEN-", "*", BLIST_REPORTLUN2}, /* HP XP Arrays */
+ {"HP", "OPEN-", "*", BLIST_REPORTLUN2 | BLIST_TRY_VPD_PAGES}, /* HP XP Arrays */
{"HP", "NetRAID-4M", NULL, BLIST_FORCELUN},
{"HP", "HSV100", NULL, BLIST_REPORTLUN2 | BLIST_NOSTARTONADD},
{"HP", "C1557A", NULL, BLIST_FORCELUN},
@@ -589,17 +589,12 @@ int scsi_get_device_flags_keyed(struct scsi_device *sdev,
int key)
{
struct scsi_dev_info_list *devinfo;
- int err;
devinfo = scsi_dev_info_list_find(vendor, model, key);
if (!IS_ERR(devinfo))
return devinfo->flags;
- err = PTR_ERR(devinfo);
- if (err != -ENOENT)
- return err;
-
- /* nothing found, return nothing */
+ /* key or device not found: return nothing */
if (key != SCSI_DEVINFO_GLOBAL)
return 0;
diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c
index 4d655b568269..5711d58f9e81 100644
--- a/drivers/scsi/scsi_dh.c
+++ b/drivers/scsi/scsi_dh.c
@@ -56,10 +56,13 @@ static const struct scsi_dh_blist scsi_dh_blist[] = {
{"IBM", "1815", "rdac", },
{"IBM", "1818", "rdac", },
{"IBM", "3526", "rdac", },
+ {"IBM", "3542", "rdac", },
+ {"IBM", "3552", "rdac", },
{"SGI", "TP9", "rdac", },
{"SGI", "IS", "rdac", },
- {"STK", "OPENstorage D280", "rdac", },
+ {"STK", "OPENstorage", "rdac", },
{"STK", "FLEXLINE 380", "rdac", },
+ {"STK", "BladeCtlr", "rdac", },
{"SUN", "CSM", "rdac", },
{"SUN", "LCSM100", "rdac", },
{"SUN", "STK6580_6780", "rdac", },
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index ee65f3324d71..367dde6a6e17 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1420,6 +1420,85 @@ static int media_not_present(struct scsi_disk *sdkp,
return 0;
}
+/**
+ * sd_check_events - check media events
+ * @disk: kernel device descriptor
+ * @clearing: disk events currently being cleared
+ *
+ * Returns mask of DISK_EVENT_*.
+ *
+ * Note: this function is invoked from the block subsystem.
+ **/
+static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing)
+{
+ struct scsi_disk *sdkp = scsi_disk(disk);
+ struct scsi_device *sdp = sdkp->device;
+ struct scsi_sense_hdr *sshdr = NULL;
+ int retval;
+
+ SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_check_events\n"));
+
+ /* Simply return for embedded storage media such as UFS */
+ if (!sdp->removable)
+ goto out;
+
+ /*
+ * If the device is offline, don't send any commands - just pretend as
+ * if the command failed. If the device ever comes back online, we
+ * can deal with it then. It is only because of unrecoverable errors
+ * that we would ever take a device offline in the first place.
+ */
+ if (!scsi_device_online(sdp)) {
+ set_media_not_present(sdkp);
+ goto out;
+ }
+
+ /*
+ * Using TEST_UNIT_READY enables differentiation between drive with
+ * no cartridge loaded - NOT READY, drive with changed cartridge -
+ * UNIT ATTENTION, or with same cartridge - GOOD STATUS.
+ *
+ * Drives that auto spin down. eg iomega jaz 1G, will be started
+ * by sd_spinup_disk() from sd_revalidate_disk(), which happens whenever
+ * sd_revalidate() is called.
+ */
+ retval = -ENODEV;
+
+ if (scsi_block_when_processing_errors(sdp)) {
+ sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL);
+ retval = scsi_test_unit_ready(sdp, SD_TIMEOUT, SD_MAX_RETRIES,
+ sshdr);
+ }
+
+ /* failed to execute TUR, assume media not present */
+ if (host_byte(retval)) {
+ set_media_not_present(sdkp);
+ goto out;
+ }
+
+ if (media_not_present(sdkp, sshdr))
+ goto out;
+
+ /*
+ * For removable scsi disk we have to recognise the presence
+ * of a disk in the drive.
+ */
+ if (!sdkp->media_present)
+ sdp->changed = 1;
+ sdkp->media_present = 1;
+out:
+ /*
+ * sdp->changed is set under the following conditions:
+ *
+ * Medium present state has changed in either direction.
+ * Device has indicated UNIT_ATTENTION.
+ */
+ kfree(sshdr);
+ retval = sdp->changed ? DISK_EVENT_MEDIA_CHANGE : 0;
+ sdp->changed = 0;
+ return retval;
+}
+
static int sd_sync_cache(struct scsi_disk *sdkp)
{
int retries, res;
@@ -1612,6 +1691,7 @@ static const struct block_device_operations sd_fops = {
#ifdef CONFIG_COMPAT
.compat_ioctl = sd_compat_ioctl,
#endif
+ .check_events = sd_check_events,
.revalidate_disk = sd_revalidate_disk,
.unlock_native_capacity = sd_unlock_native_capacity,
.pr_ops = &sd_pr_ops,
@@ -1873,6 +1953,8 @@ sd_spinup_disk(struct scsi_disk *sdkp)
break; /* standby */
if (sshdr.asc == 4 && sshdr.ascq == 0xc)
break; /* unavailable */
+ if (sshdr.asc == 4 && sshdr.ascq == 0x1b)
+ break; /* sanitize in progress */
/*
* Issue command to spin up drive when not ready
*/
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index 044d06410d4c..01168acc864d 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -546,7 +546,6 @@ static void ses_enclosure_data_process(struct enclosure_device *edev,
ecomp = &edev->component[components++];
if (!IS_ERR(ecomp)) {
- ses_get_power_status(edev, ecomp);
if (addl_desc_ptr)
ses_process_descriptor(
ecomp,
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 3bc15d2664a1..3906be2836ba 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -535,6 +535,7 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
} else
count = (old_hdr->result == 0) ? 0 : -EIO;
sg_finish_rem_req(srp);
+ sg_remove_request(sfp, srp);
retval = count;
free_old_hdr:
kfree(old_hdr);
@@ -575,6 +576,7 @@ sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
}
err_out:
err2 = sg_finish_rem_req(srp);
+ sg_remove_request(sfp, srp);
return err ? : err2 ? : count;
}
@@ -674,18 +676,14 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
* is a non-zero input_size, so emit a warning.
*/
if (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV) {
- static char cmd[TASK_COMM_LEN];
- if (strcmp(current->comm, cmd)) {
- printk_ratelimited(KERN_WARNING
- "sg_write: data in/out %d/%d bytes "
- "for SCSI command 0x%x-- guessing "
- "data in;\n program %s not setting "
- "count and/or reply_len properly\n",
- old_hdr.reply_len - (int)SZ_SG_HEADER,
- input_size, (unsigned int) cmnd[0],
- current->comm);
- strcpy(cmd, current->comm);
- }
+ printk_ratelimited(KERN_WARNING
+ "sg_write: data in/out %d/%d bytes "
+ "for SCSI command 0x%x-- guessing "
+ "data in;\n program %s not setting "
+ "count and/or reply_len properly\n",
+ old_hdr.reply_len - (int)SZ_SG_HEADER,
+ input_size, (unsigned int) cmnd[0],
+ current->comm);
}
k = sg_common_write(sfp, srp, cmnd, sfp->timeout, blocking);
return (k < 0) ? k : count;
@@ -784,11 +782,15 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
"sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n",
(int) cmnd[0], (int) hp->cmd_len));
+ if (hp->dxfer_len >= SZ_256M)
+ return -EINVAL;
+
k = sg_start_req(srp, cmnd);
if (k) {
SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sfp->parentdp,
"sg_common_write: start_req err=%d\n", k));
sg_finish_rem_req(srp);
+ sg_remove_request(sfp, srp);
return k; /* probably out of space --> ENOMEM */
}
if (atomic_read(&sdp->detaching)) {
@@ -801,6 +803,7 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
}
sg_finish_rem_req(srp);
+ sg_remove_request(sfp, srp);
return -ENODEV;
}
@@ -1293,6 +1296,7 @@ sg_rq_end_io_usercontext(struct work_struct *work)
struct sg_fd *sfp = srp->parentfp;
sg_finish_rem_req(srp);
+ sg_remove_request(sfp, srp);
kref_put(&sfp->f_ref, sg_remove_sfp);
}
@@ -1834,8 +1838,6 @@ sg_finish_rem_req(Sg_request *srp)
else
sg_remove_scat(sfp, req_schp);
- sg_remove_request(sfp, srp);
-
return ret;
}
@@ -1901,7 +1903,7 @@ retry:
num = (rem_sz > scatter_elem_sz_prev) ?
scatter_elem_sz_prev : rem_sz;
- schp->pages[k] = alloc_pages(gfp_mask, order);
+ schp->pages[k] = alloc_pages(gfp_mask | __GFP_ZERO, order);
if (!schp->pages[k])
goto out;
@@ -2072,11 +2074,12 @@ sg_get_rq_mark(Sg_fd * sfp, int pack_id)
if ((1 == resp->done) && (!resp->sg_io_owned) &&
((-1 == pack_id) || (resp->header.pack_id == pack_id))) {
resp->done = 2; /* guard against other readers */
- break;
+ write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+ return resp;
}
}
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
- return resp;
+ return NULL;
}
/* always adds to end of list */
@@ -2182,12 +2185,17 @@ sg_remove_sfp_usercontext(struct work_struct *work)
struct sg_fd *sfp = container_of(work, struct sg_fd, ew.work);
struct sg_device *sdp = sfp->parentdp;
Sg_request *srp;
+ unsigned long iflags;
/* Cleanup any responses which were never read(). */
+ write_lock_irqsave(&sfp->rq_list_lock, iflags);
while (!list_empty(&sfp->rq_list)) {
srp = list_first_entry(&sfp->rq_list, Sg_request, entry);
sg_finish_rem_req(srp);
+ list_del(&srp->entry);
+ srp->parentfp = NULL;
}
+ write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
if (sfp->reserve.bufflen > 0) {
SCSI_LOG_TIMEOUT(6, sg_printk(KERN_INFO, sdp,
diff --git a/drivers/scsi/sim710.c b/drivers/scsi/sim710.c
index 3b3b56f4a830..82ed99848378 100644
--- a/drivers/scsi/sim710.c
+++ b/drivers/scsi/sim710.c
@@ -176,8 +176,7 @@ static struct eisa_device_id sim710_eisa_ids[] = {
};
MODULE_DEVICE_TABLE(eisa, sim710_eisa_ids);
-static __init int
-sim710_eisa_probe(struct device *dev)
+static int sim710_eisa_probe(struct device *dev)
{
struct eisa_device *edev = to_eisa_device(dev);
unsigned long io_addr = edev->base_addr;
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 5e4e1ba96f10..351d81dc2200 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -890,10 +890,11 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
case TEST_UNIT_READY:
break;
default:
- set_host_byte(scmnd, DID_TARGET_FAILURE);
+ set_host_byte(scmnd, DID_ERROR);
}
break;
case SRB_STATUS_INVALID_LUN:
+ set_host_byte(scmnd, DID_NO_CONNECT);
do_work = true;
process_err_fn = storvsc_remove_lun;
break;
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 105d861a2325..aaddf2fff99c 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -2596,7 +2596,7 @@ bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host,
int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
{
int reg = 0;
- int offset, ret = 0, testbus_sel_offset = 19;
+ int offset = 0, ret = 0, testbus_sel_offset = 19;
u32 mask = TEST_BUS_SUB_SEL_MASK;
unsigned long flags;
struct ufs_hba *hba;
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 9bf7f57d9a17..6d43254d84b9 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -3,7 +3,7 @@
*
* This code is based on drivers/scsi/ufs/ufshcd.c
* Copyright (C) 2011-2013 Samsung India Software Operations
- * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
*
* Authors:
* Santosh Yaraganavi <santosh.sy@samsung.com>
@@ -635,6 +635,7 @@ static void __ufshcd_cmd_log(struct ufs_hba *hba, char *str, char *cmd_type,
entry.str = str;
entry.lba = lba;
+ entry.cmd_id = cmd_id;
entry.transfer_len = transfer_len;
entry.doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
entry.tag = tag;
@@ -8431,7 +8432,8 @@ static void ufshcd_hba_exit(struct ufs_hba *hba)
if (ufshcd_is_clkscaling_supported(hba)) {
if (hba->devfreq)
ufshcd_suspend_clkscaling(hba);
- destroy_workqueue(hba->clk_scaling.workq);
+ if (hba->clk_scaling.workq)
+ destroy_workqueue(hba->clk_scaling.workq);
}
ufshcd_disable_clocks(hba, false);
ufshcd_setup_hba_vreg(hba, false);
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 03a2aadf0d3c..8ef905cbfc9c 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -28,6 +28,7 @@
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_devinfo.h>
#include <linux/seqlock.h>
#define VIRTIO_SCSI_MEMPOOL_SZ 64
@@ -704,6 +705,28 @@ static int virtscsi_device_reset(struct scsi_cmnd *sc)
return virtscsi_tmf(vscsi, cmd);
}
+static int virtscsi_device_alloc(struct scsi_device *sdevice)
+{
+ /*
+ * Passed through SCSI targets (e.g. with qemu's 'scsi-block')
+ * may have transfer limits which come from the host SCSI
+ * controller or something on the host side other than the
+ * target itself.
+ *
+ * To make this work properly, the hypervisor can adjust the
+ * target's VPD information to advertise these limits. But
+ * for that to work, the guest has to look at the VPD pages,
+ * which we won't do by default if it is an SPC-2 device, even
+ * if it does actually support it.
+ *
+ * So, set the blist to always try to read the VPD pages.
+ */
+ sdevice->sdev_bflags = BLIST_TRY_VPD_PAGES;
+
+ return 0;
+}
+
+
/**
* virtscsi_change_queue_depth() - Change a virtscsi target's queue depth
* @sdev: Virtscsi target whose queue depth to change
@@ -775,6 +798,7 @@ static struct scsi_host_template virtscsi_host_template_single = {
.change_queue_depth = virtscsi_change_queue_depth,
.eh_abort_handler = virtscsi_abort,
.eh_device_reset_handler = virtscsi_device_reset,
+ .slave_alloc = virtscsi_device_alloc,
.can_queue = 1024,
.dma_boundary = UINT_MAX,
@@ -795,6 +819,7 @@ static struct scsi_host_template virtscsi_host_template_multi = {
.eh_abort_handler = virtscsi_abort,
.eh_device_reset_handler = virtscsi_device_reset,
+ .slave_alloc = virtscsi_device_alloc,
.can_queue = 1024,
.dma_boundary = UINT_MAX,
.use_clustering = ENABLE_CLUSTERING,
diff --git a/drivers/slimbus/slimbus.c b/drivers/slimbus/slimbus.c
index 0819395a5e65..72ac5b80f13e 100644
--- a/drivers/slimbus/slimbus.c
+++ b/drivers/slimbus/slimbus.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -2337,6 +2337,9 @@ static int slim_sched_chans(struct slim_device *sb, u32 clkgear,
int opensl1[6];
bool opensl1valid = false;
int maxctrlw1, maxctrlw3, i;
+
+ /* intitalize array to zero */
+ memset(opensl1, 0x0, sizeof(opensl1));
finalexp = (ctrl->sched.chc3[last3])->rootexp;
if (last1 >= 0) {
slc1 = ctrl->sched.chc1[coeff1];
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 1e8f50c4ebad..62b7d12629e4 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -2,6 +2,13 @@
# QCOM Soc drivers
#
source "drivers/soc/qcom/hab/Kconfig"
+config MSM_PASR
+ bool "MSM DDR Partial Array Self-Refresh Driver"
+ help
+ RPM controls DDR functionaliy. This driver
+ is an interface for linux memory hotplug to RPM
+ for start/stop self-refresh of hot added or removed
+ memory in DDR.
config MSM_INRUSH_CURRENT_MITIGATION
bool "Inrush-current mitigation Driver"
@@ -705,6 +712,18 @@ config MSM_CDSP_LOADER
during boot.
Say M if you want to enable this module.
+config MSM_LPASS_RESOURCE_MANAGER
+ tristate "LPASS Resource Manager support"
+ select SND_SOC_MSM_APRV2_INTF
+ depends on MSM_QDSP6_APRV2 || MSM_QDSP6_APRV3 || \
+ MSM_QDSP6_APRV2_GLINK || MSM_QDSP6_APRV3_GLINK
+ help
+ Manages the allocation of LPASS resources. It also
+ can check LPAIF for Early Audio playback progress.
+ To check early audio playback, PCM registers are read.
+ If register is enabled, playback is on-going.
+ Say M if you want to enable this module.
+
config MSM_PERFORMANCE
tristate "msm_performance driver to support perflock request"
help
@@ -967,4 +986,11 @@ config QCOM_QDSS_BRIDGE
sub-system to USB on APSS side. The driver acts as a bridge between the
MHI and USB interface. If unsure, say N.
+config EXT_ANC
+ bool "Enable External ANC"
+ depends on MSM_QDSP6_APRV2 || MSM_QDSP6_APRV3
+ help
+ This option enables support for anti-noise cnacellation
+ on Sensor DSP.
+
source "drivers/soc/qcom/memshare/Kconfig"
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index ba2ff8326cac..5e565c863889 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -42,11 +42,13 @@ obj-$(CONFIG_MSM_PFE_WA) += pfe-wa.o
obj-$(CONFIG_ARCH_MSM8996) += msm_cpu_voltage.o
obj-$(CONFIG_MSM_PERFORMANCE) += msm_performance.o
+obj-$(CONFIG_MSM_PASR) += pasr.o
ifdef CONFIG_MSM_SUBSYSTEM_RESTART
obj-y += subsystem_notif.o
obj-y += subsystem_restart.o
obj-y += ramdump.o
+ obj-$(CONFIG_MSM_GVM_QUIN) += subsystem_notif_virt.o
endif
obj-$(CONFIG_QPNP_HAPTIC) += qpnp-haptic.o
diff --git a/drivers/soc/qcom/boot_marker.c b/drivers/soc/qcom/boot_marker.c
index b3a6c9f8d054..0b72d769f594 100644
--- a/drivers/soc/qcom/boot_marker.c
+++ b/drivers/soc/qcom/boot_marker.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016,2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -25,11 +25,12 @@
#include <linux/export.h>
#include <linux/types.h>
#include <linux/mutex.h>
+#include <linux/mm.h>
#include <soc/qcom/boot_stats.h>
#define MAX_STRING_LEN 256
#define BOOT_MARKER_MAX_LEN 40
-static struct dentry *dent_bkpi, *dent_bkpi_status;
+static struct dentry *dent_bkpi, *dent_bkpi_status, *dent_mpm_timer;
static struct boot_marker boot_marker_list;
struct boot_marker {
@@ -140,6 +141,48 @@ static const struct file_operations fops_bkpi = {
.write = bootkpi_writer,
};
+static ssize_t mpm_timer_read(struct file *fp, char __user *user_buffer,
+ size_t count, loff_t *position)
+{
+ unsigned long long int timer_value;
+ int rc = 0;
+ char buf[100];
+ int temp = 0;
+
+ timer_value = msm_timer_get_sclk_ticks();
+
+ temp = scnprintf(buf, sizeof(buf), "%llu.%03llu seconds\n",
+ timer_value/TIMER_KHZ,
+ (((timer_value % TIMER_KHZ) * 1000) / TIMER_KHZ));
+
+ rc = simple_read_from_buffer(user_buffer, count, position, buf, temp);
+
+ return rc;
+}
+
+static int mpm_timer_open(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static int mpm_timer_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ phys_addr_t addr = msm_timer_get_pa();
+
+ if (vma->vm_flags & VM_WRITE)
+ return -EPERM;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ return vm_iomap_memory(vma, addr, PAGE_SIZE);
+}
+
+static const struct file_operations fops_mpm_timer = {
+ .owner = THIS_MODULE,
+ .open = mpm_timer_open,
+ .read = mpm_timer_read,
+ .mmap = mpm_timer_mmap,
+};
+
static int __init init_bootkpi(void)
{
dent_bkpi = debugfs_create_dir("bootkpi", NULL);
@@ -147,7 +190,7 @@ static int __init init_bootkpi(void)
return -ENODEV;
dent_bkpi_status = debugfs_create_file("kpi_values",
- (S_IRUGO|S_IWUGO), dent_bkpi, 0, &fops_bkpi);
+ (S_IRUGO|S_IWUGO), dent_bkpi, NULL, &fops_bkpi);
if (IS_ERR_OR_NULL(dent_bkpi_status)) {
debugfs_remove(dent_bkpi);
dent_bkpi = NULL;
@@ -155,6 +198,17 @@ static int __init init_bootkpi(void)
return -ENODEV;
}
+ dent_mpm_timer = debugfs_create_file("mpm_timer",
+ S_IRUGO, dent_bkpi, NULL, &fops_mpm_timer);
+ if (IS_ERR_OR_NULL(dent_mpm_timer)) {
+ debugfs_remove(dent_bkpi_status);
+ dent_bkpi_status = NULL;
+ debugfs_remove(dent_bkpi);
+ dent_bkpi = NULL;
+ pr_err("boot_marker: Could not create 'mpm_timer' debugfs file\n");
+ return -ENODEV;
+ }
+
INIT_LIST_HEAD(&boot_marker_list.list);
mutex_init(&boot_marker_list.lock);
set_bootloader_stats();
diff --git a/drivers/soc/qcom/boot_stats.c b/drivers/soc/qcom/boot_stats.c
index eb5357e892eb..35b8108c9967 100644
--- a/drivers/soc/qcom/boot_stats.c
+++ b/drivers/soc/qcom/boot_stats.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2014,2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2014,2016,2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -28,12 +28,14 @@
#include <soc/qcom/boot_stats.h>
static void __iomem *mpm_counter_base;
+static phys_addr_t mpm_counter_pa;
static uint32_t mpm_counter_freq;
struct boot_stats __iomem *boot_stats;
static int mpm_parse_dt(void)
{
struct device_node *np;
+ const __be32 *addrp;
u32 freq;
np = of_find_compatible_node(NULL, NULL, "qcom,msm-imem-boot_stats");
@@ -58,12 +60,19 @@ static int mpm_parse_dt(void)
else
return -ENODEV;
- if (of_get_address(np, 0, NULL, NULL)) {
+ addrp = of_get_address(np, 0, NULL, NULL);
+ if (addrp) {
mpm_counter_base = of_iomap(np, 0);
if (!mpm_counter_base) {
pr_err("mpm_counter: cant map counter base\n");
return -ENODEV;
}
+
+ mpm_counter_pa = of_translate_address(np, addrp);
+ if (mpm_counter_pa == OF_BAD_ADDR) {
+ pr_err("mpm_counter: failed to get physical address\n");
+ return -ENODEV;
+ }
}
return 0;
@@ -121,6 +130,11 @@ unsigned long long int msm_timer_get_sclk_ticks(void)
return t1;
}
+phys_addr_t msm_timer_get_pa(void)
+{
+ return mpm_counter_pa;
+}
+
int boot_stats_init(void)
{
int ret;
diff --git a/drivers/soc/qcom/glink_smem_native_xprt.c b/drivers/soc/qcom/glink_smem_native_xprt.c
index 4407dfbc45df..6cf4c7b6dd8a 100644
--- a/drivers/soc/qcom/glink_smem_native_xprt.c
+++ b/drivers/soc/qcom/glink_smem_native_xprt.c
@@ -171,8 +171,6 @@ struct mailbox_config_info {
* @kwork: Work to be executed when an irq is received.
* @kworker: Handle to the entity processing of
deferred commands.
- * @tasklet Handle to tasklet to process incoming data
- packets in atomic manner.
* @task: Handle to the task context used to run @kworker.
* @use_ref: Active uses of this transport use this to grab
* a reference. Used for ssr synchronization.
@@ -216,7 +214,6 @@ struct edge_info {
struct kthread_work kwork;
struct kthread_worker kworker;
struct task_struct *task;
- struct tasklet_struct tasklet;
struct srcu_struct use_ref;
bool in_ssr;
spinlock_t rx_lock;
@@ -539,6 +536,12 @@ static int fifo_write(struct edge_info *einfo, const void *data, int len)
uint32_t write_index = einfo->tx_ch_desc->write_index;
len = fifo_write_body(einfo, data, len, &write_index);
+
+ /* All data writes need to be flushed to memory before the write index
+ * is updated. This protects against a race condition where the remote
+ * reads stale data because the write index was written before the data.
+ */
+ wmb();
einfo->tx_ch_desc->write_index = write_index;
send_irq(einfo);
@@ -574,6 +577,12 @@ static int fifo_write_complex(struct edge_info *einfo,
len1 = fifo_write_body(einfo, data1, len1, &write_index);
len2 = fifo_write_body(einfo, data2, len2, &write_index);
len3 = fifo_write_body(einfo, data3, len3, &write_index);
+
+ /* All data writes need to be flushed to memory before the write index
+ * is updated. This protects against a race condition where the remote
+ * reads stale data because the write index was written before the data.
+ */
+ wmb();
einfo->tx_ch_desc->write_index = write_index;
send_irq(einfo);
@@ -1179,18 +1188,6 @@ static void __rx_worker(struct edge_info *einfo, bool atomic_ctx)
}
/**
- * rx_worker_atomic() - worker function to process received command in atomic
- * context.
- * @param: The param parameter passed during initialization of the tasklet.
- */
-static void rx_worker_atomic(unsigned long param)
-{
- struct edge_info *einfo = (struct edge_info *)param;
-
- __rx_worker(einfo, true);
-}
-
-/**
* rx_worker() - worker function to process received commands
* @work: kwork associated with the edge to process commands on.
*/
@@ -1209,7 +1206,7 @@ irqreturn_t irq_handler(int irq, void *priv)
if (einfo->rx_reset_reg)
writel_relaxed(einfo->out_irq_mask, einfo->rx_reset_reg);
- tasklet_hi_schedule(&einfo->tasklet);
+ __rx_worker(einfo, true);
einfo->rx_irq_count++;
return IRQ_HANDLED;
@@ -2361,7 +2358,6 @@ static int glink_smem_native_probe(struct platform_device *pdev)
init_waitqueue_head(&einfo->tx_blocked_queue);
init_kthread_work(&einfo->kwork, rx_worker);
init_kthread_worker(&einfo->kworker);
- tasklet_init(&einfo->tasklet, rx_worker_atomic, (unsigned long)einfo);
einfo->read_from_fifo = read_from_fifo;
einfo->write_to_fifo = write_to_fifo;
init_srcu_struct(&einfo->use_ref);
@@ -2465,7 +2461,6 @@ smem_alloc_fail:
flush_kthread_worker(&einfo->kworker);
kthread_stop(einfo->task);
einfo->task = NULL;
- tasklet_kill(&einfo->tasklet);
kthread_fail:
iounmap(einfo->out_irq_reg);
ioremap_fail:
@@ -2551,7 +2546,6 @@ static int glink_rpm_native_probe(struct platform_device *pdev)
init_waitqueue_head(&einfo->tx_blocked_queue);
init_kthread_work(&einfo->kwork, rx_worker);
init_kthread_worker(&einfo->kworker);
- tasklet_init(&einfo->tasklet, rx_worker_atomic, (unsigned long)einfo);
einfo->intentless = true;
einfo->read_from_fifo = memcpy32_fromio;
einfo->write_to_fifo = memcpy32_toio;
@@ -2713,7 +2707,6 @@ toc_init_fail:
flush_kthread_worker(&einfo->kworker);
kthread_stop(einfo->task);
einfo->task = NULL;
- tasklet_kill(&einfo->tasklet);
kthread_fail:
iounmap(msgram);
msgram_ioremap_fail:
@@ -2842,7 +2835,6 @@ static int glink_mailbox_probe(struct platform_device *pdev)
init_waitqueue_head(&einfo->tx_blocked_queue);
init_kthread_work(&einfo->kwork, rx_worker);
init_kthread_worker(&einfo->kworker);
- tasklet_init(&einfo->tasklet, rx_worker_atomic, (unsigned long)einfo);
einfo->read_from_fifo = read_from_fifo;
einfo->write_to_fifo = write_to_fifo;
init_srcu_struct(&einfo->use_ref);
@@ -2965,7 +2957,6 @@ smem_alloc_fail:
flush_kthread_worker(&einfo->kworker);
kthread_stop(einfo->task);
einfo->task = NULL;
- tasklet_kill(&einfo->tasklet);
kthread_fail:
iounmap(einfo->rx_reset_reg);
rx_reset_ioremap_fail:
diff --git a/drivers/soc/qcom/glink_ssr.c b/drivers/soc/qcom/glink_ssr.c
index fe7fb1e5b925..177737f3e314 100644
--- a/drivers/soc/qcom/glink_ssr.c
+++ b/drivers/soc/qcom/glink_ssr.c
@@ -538,7 +538,6 @@ int notify_for_subsystem(struct subsys_info *ss_info)
* only modified during setup.
*/
atomic_set(&responses_remaining, ss_info->notify_list_len);
- init_waitqueue_head(&waitqueue);
notifications_successful = true;
list_for_each_entry(ss_leaf_entry, &ss_info->notify_list,
@@ -945,7 +944,7 @@ static int glink_ssr_probe(struct platform_device *pdev)
ss_info->cb_data = NULL;
spin_lock_init(&ss_info->link_up_lock);
spin_lock_init(&ss_info->cb_lock);
-
+ init_waitqueue_head(&waitqueue);
nb = kmalloc(sizeof(struct restart_notifier_block), GFP_KERNEL);
if (!nb) {
GLINK_SSR_ERR("<SSR> %s: Could not allocate notifier block\n",
diff --git a/drivers/soc/qcom/hab/Kconfig b/drivers/soc/qcom/hab/Kconfig
index 2e4f5114e29f..2e6126f3734e 100644
--- a/drivers/soc/qcom/hab/Kconfig
+++ b/drivers/soc/qcom/hab/Kconfig
@@ -5,3 +5,7 @@ config MSM_HAB
Required for drivers to use the HAB API to communicate with the host
OS.
+config MSM_AGL
+ bool "Enable built-in hab config"
+ help
+ Use built-in configuration to setup hab driver.
diff --git a/drivers/soc/qcom/hab/Makefile b/drivers/soc/qcom/hab/Makefile
index 77825be16fc4..945ae52de196 100644
--- a/drivers/soc/qcom/hab/Makefile
+++ b/drivers/soc/qcom/hab/Makefile
@@ -8,8 +8,24 @@ msm_hab-objs = \
hab_mimex.o \
hab_mem_linux.o \
hab_pipe.o \
+ hab_parser.o \
+ khab_test.o
+
+ifdef CONFIG_GHS_VMM
+msm_hab_hyp-objs = \
+ ghs_comm.o \
+ hab_ghs.o
+
+ifndef CONFIG_MSM_AGL
+ccflags-y += -DHABMM_HC_VMID
+endif
+
+else
+ifdef CONFIG_MSM_GVM_QUIN
+msm_hab_hyp-objs = \
qvm_comm.o \
- hab_qvm.o \
- hab_parser.o
+ hab_qvm.o
+endif
+endif
-obj-$(CONFIG_MSM_HAB) += msm_hab.o
+obj-$(CONFIG_MSM_HAB) += msm_hab.o msm_hab_hyp.o
diff --git a/drivers/soc/qcom/hab/ghs_comm.c b/drivers/soc/qcom/hab/ghs_comm.c
new file mode 100644
index 000000000000..97d43f4c2667
--- /dev/null
+++ b/drivers/soc/qcom/hab/ghs_comm.c
@@ -0,0 +1,141 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "hab.h"
+#include "hab_ghs.h"
+
+int physical_channel_read(struct physical_channel *pchan,
+ void *payload,
+ size_t read_size)
+{
+ struct ghs_vdev *dev = (struct ghs_vdev *)pchan->hyp_data;
+
+ /* size in header is only for payload excluding the header itself */
+ if (dev->read_size < read_size + sizeof(struct hab_header)) {
+ pr_warn("read %zd is less than requested %zd plus header %zd\n",
+ dev->read_size, read_size, sizeof(struct hab_header));
+ read_size = dev->read_size;
+ }
+
+ /* always skip the header */
+ memcpy(payload, (unsigned char *)dev->read_data +
+ sizeof(struct hab_header) + dev->read_offset, read_size);
+ dev->read_offset += read_size;
+
+ return read_size;
+}
+
+int physical_channel_send(struct physical_channel *pchan,
+ struct hab_header *header,
+ void *payload)
+{
+ int sizebytes = HAB_HEADER_GET_SIZE(*header);
+ struct ghs_vdev *dev = (struct ghs_vdev *)pchan->hyp_data;
+ GIPC_Result result;
+ uint8_t *msg;
+
+ spin_lock_bh(&dev->io_lock);
+
+ result = GIPC_PrepareMessage(dev->endpoint, sizebytes+sizeof(*header),
+ (void **)&msg);
+ if (result == GIPC_Full) {
+ spin_unlock_bh(&dev->io_lock);
+ /* need to wait for space! */
+ pr_err("failed to reserve send msg for %zd bytes\n",
+ sizebytes+sizeof(*header));
+ return -EBUSY;
+ } else if (result != GIPC_Success) {
+ spin_unlock_bh(&dev->io_lock);
+ pr_err("failed to send due to error %d\n", result);
+ return -ENOMEM;
+ }
+
+ if (HAB_HEADER_GET_TYPE(*header) == HAB_PAYLOAD_TYPE_PROFILE) {
+ struct timeval tv;
+ struct habmm_xing_vm_stat *pstat =
+ (struct habmm_xing_vm_stat *)payload;
+
+ do_gettimeofday(&tv);
+ pstat->tx_sec = tv.tv_sec;
+ pstat->tx_usec = tv.tv_usec;
+ }
+
+ memcpy(msg, header, sizeof(*header));
+
+ if (sizebytes)
+ memcpy(msg+sizeof(*header), payload, sizebytes);
+
+ result = GIPC_IssueMessage(dev->endpoint, sizebytes+sizeof(*header),
+ header->id_type_size);
+ spin_unlock_bh(&dev->io_lock);
+ if (result != GIPC_Success) {
+ pr_err("send error %d, sz %zd, prot %x\n",
+ result, sizebytes+sizeof(*header),
+ header->id_type_size);
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+void physical_channel_rx_dispatch(unsigned long physical_channel)
+{
+ struct hab_header header;
+ struct physical_channel *pchan =
+ (struct physical_channel *)physical_channel;
+ struct ghs_vdev *dev = (struct ghs_vdev *)pchan->hyp_data;
+ GIPC_Result result;
+
+ uint32_t events;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pchan->rxbuf_lock, flags);
+ events = kgipc_dequeue_events(dev->endpoint);
+ spin_unlock_irqrestore(&pchan->rxbuf_lock, flags);
+
+ if (events & (GIPC_EVENT_RESET))
+ pr_err("hab gipc %s remote vmid %d RESET\n",
+ dev->name, pchan->vmid_remote);
+ if (events & (GIPC_EVENT_RESETINPROGRESS))
+ pr_err("hab gipc %s remote vmid %d RESETINPROGRESS\n",
+ dev->name, pchan->vmid_remote);
+
+ if (events & (GIPC_EVENT_RECEIVEREADY)) {
+ spin_lock_bh(&pchan->rxbuf_lock);
+ while (1) {
+ dev->read_size = 0;
+ dev->read_offset = 0;
+ result = GIPC_ReceiveMessage(dev->endpoint,
+ dev->read_data,
+ GIPC_RECV_BUFF_SIZE_BYTES,
+ &dev->read_size,
+ &header.id_type_size);
+
+ if (result == GIPC_Success || dev->read_size > 0) {
+ /* handle corrupted msg? */
+ hab_msg_recv(pchan, dev->read_data);
+ continue;
+ } else if (result == GIPC_Empty) {
+ /* no more pending msg */
+ break;
+ }
+ pr_error("recv unhandled result %d, size %zd\n",
+ result, dev->read_size);
+ break;
+ }
+ spin_unlock_bh(&pchan->rxbuf_lock);
+ }
+
+ if (events & (GIPC_EVENT_SENDREADY))
+ pr_debug("kgipc send ready\n");
+}
diff --git a/drivers/soc/qcom/hab/hab.c b/drivers/soc/qcom/hab/hab.c
index 5ca94579b6f1..0427845a2d14 100644
--- a/drivers/soc/qcom/hab/hab.c
+++ b/drivers/soc/qcom/hab/hab.c
@@ -16,11 +16,13 @@
.name = __name__,\
.id = __id__,\
.pchannels = LIST_HEAD_INIT(hab_devices[__num__].pchannels),\
- .pchan_lock = __MUTEX_INITIALIZER(hab_devices[__num__].pchan_lock),\
+ .pchan_lock = __SPIN_LOCK_UNLOCKED(hab_devices[__num__].pchan_lock),\
.openq_list = LIST_HEAD_INIT(hab_devices[__num__].openq_list),\
.openlock = __SPIN_LOCK_UNLOCKED(&hab_devices[__num__].openlock)\
}
+static const char hab_info_str[] = "Change: 16239527 Revision: #65";
+
/*
* The following has to match habmm definitions, order does not matter if
* hab config does not care either. When hab config is not present, the default
@@ -47,11 +49,15 @@ static struct hab_device hab_devices[] = {
HAB_DEVICE_CNSTR(DEVICE_QCPE4_NAME, MM_QCPE_VM4, 17),
HAB_DEVICE_CNSTR(DEVICE_CLK1_NAME, MM_CLK_VM1, 18),
HAB_DEVICE_CNSTR(DEVICE_CLK2_NAME, MM_CLK_VM2, 19),
+ HAB_DEVICE_CNSTR(DEVICE_FDE1_NAME, MM_FDE_1, 20),
+ HAB_DEVICE_CNSTR(DEVICE_BUFFERQ1_NAME, MM_BUFFERQ_1, 21),
};
struct hab_driver hab_driver = {
.ndevices = ARRAY_SIZE(hab_devices),
.devp = hab_devices,
+ .uctx_list = LIST_HEAD_INIT(hab_driver.uctx_list),
+ .drvlock = __SPIN_LOCK_UNLOCKED(hab_driver.drvlock),
};
struct uhab_context *hab_ctx_alloc(int kernel)
@@ -75,6 +81,7 @@ struct uhab_context *hab_ctx_alloc(int kernel)
rwlock_init(&ctx->exp_lock);
rwlock_init(&ctx->ctx_lock);
+ INIT_LIST_HEAD(&ctx->pending_open);
kref_init(&ctx->refcount);
ctx->import_ctx = habmem_imp_hyp_open();
if (!ctx->import_ctx) {
@@ -84,14 +91,53 @@ struct uhab_context *hab_ctx_alloc(int kernel)
}
ctx->kernel = kernel;
+ spin_lock_bh(&hab_driver.drvlock);
+ list_add_tail(&ctx->node, &hab_driver.uctx_list);
+ hab_driver.ctx_cnt++;
+ ctx->lb_be = hab_driver.b_loopback_be; /* loopback only */
+ hab_driver.b_loopback_be = ~hab_driver.b_loopback_be; /* loopback only*/
+ spin_unlock_bh(&hab_driver.drvlock);
+ pr_debug("ctx %pK live %d loopback be %d\n",
+ ctx, hab_driver.ctx_cnt, ctx->lb_be);
+
return ctx;
}
+/* ctx can only be freed when all the vchan releases the refcnt */
void hab_ctx_free(struct kref *ref)
{
struct uhab_context *ctx =
container_of(ref, struct uhab_context, refcount);
struct hab_export_ack_recvd *ack_recvd, *tmp;
+ struct virtual_channel *vchan;
+ struct physical_channel *pchan;
+ int i;
+ struct uhab_context *ctxdel, *ctxtmp;
+ struct hab_open_node *node;
+ struct export_desc *exp, *exp_tmp;
+
+ /* garbage-collect exp/imp buffers */
+ write_lock(&ctx->exp_lock);
+ list_for_each_entry_safe(exp, exp_tmp, &ctx->exp_whse, node) {
+ list_del(&exp->node);
+ pr_err("potential leak exp %d vcid %X recovered\n",
+ exp->export_id, exp->vcid_local);
+ habmem_hyp_revoke(exp->payload, exp->payload_count);
+ habmem_remove_export(exp);
+ }
+ write_unlock(&ctx->exp_lock);
+
+ spin_lock_bh(&ctx->imp_lock);
+ list_for_each_entry_safe(exp, exp_tmp, &ctx->imp_whse, node) {
+ list_del(&exp->node);
+ ctx->import_total--;
+ pr_warn("leaked imp %d vcid %X for ctx is collected total %d\n",
+ exp->export_id, exp->vcid_local,
+ ctx->import_total);
+ habmm_imp_hyp_unmap(ctx->import_ctx, exp, ctx->kernel);
+ kfree(exp);
+ }
+ spin_unlock_bh(&ctx->imp_lock);
habmem_imp_hyp_close(ctx->import_ctx, ctx->kernel);
@@ -100,9 +146,70 @@ void hab_ctx_free(struct kref *ref)
kfree(ack_recvd);
}
+ /* walk vchan list to find the leakage */
+ spin_lock_bh(&hab_driver.drvlock);
+ hab_driver.ctx_cnt--;
+ list_for_each_entry_safe(ctxdel, ctxtmp, &hab_driver.uctx_list, node) {
+ if (ctxdel == ctx)
+ list_del(&ctxdel->node);
+ }
+ spin_unlock_bh(&hab_driver.drvlock);
+ pr_debug("live ctx %d refcnt %d kernel %d close %d owner %d\n",
+ hab_driver.ctx_cnt, get_refcnt(ctx->refcount),
+ ctx->kernel, ctx->closing, ctx->owner);
+
+ /* check vchans in this ctx */
+ write_lock(&ctx->ctx_lock);
+ list_for_each_entry(vchan, &ctx->vchannels, node) {
+ pr_warn("leak vchan id %X cnt %X remote %d in ctx\n",
+ vchan->id, get_refcnt(vchan->refcount),
+ vchan->otherend_id);
+ }
+ write_unlock(&ctx->ctx_lock);
+
+ /* check pending open */
+ if (ctx->pending_cnt)
+ pr_warn("potential leak of pendin_open nodes %d\n",
+ ctx->pending_cnt);
+
+ write_lock(&ctx->ctx_lock);
+ list_for_each_entry(node, &ctx->pending_open, node) {
+ pr_warn("leak pending open vcid %X type %d subid %d openid %d\n",
+ node->request.xdata.vchan_id, node->request.type,
+ node->request.xdata.sub_id,
+ node->request.xdata.open_id);
+ }
+ write_unlock(&ctx->ctx_lock);
+
+ /* check vchans belong to this ctx in all hab/mmid devices */
+ for (i = 0; i < hab_driver.ndevices; i++) {
+ struct hab_device *habdev = &hab_driver.devp[i];
+
+ spin_lock_bh(&habdev->pchan_lock);
+ list_for_each_entry(pchan, &habdev->pchannels, node) {
+
+ /* check vchan ctx owner */
+ write_lock(&pchan->vchans_lock);
+ list_for_each_entry(vchan, &pchan->vchannels, pnode) {
+ if (vchan->ctx == ctx) {
+ pr_warn("leak vcid %X cnt %d pchan %s local %d remote %d\n",
+ vchan->id,
+ get_refcnt(vchan->refcount),
+ pchan->name, pchan->vmid_local,
+ pchan->vmid_remote);
+ }
+ }
+ write_unlock(&pchan->vchans_lock);
+ }
+ spin_unlock_bh(&habdev->pchan_lock);
+ }
kfree(ctx);
}
+/*
+ * caller needs to call vchan_put() afterwards. this is used to refcnt
+ * the local ioctl access based on ctx
+ */
struct virtual_channel *hab_get_vchan_fromvcid(int32_t vcid,
struct uhab_context *ctx)
{
@@ -120,7 +227,7 @@ struct virtual_channel *hab_get_vchan_fromvcid(int32_t vcid,
return NULL;
}
-static struct hab_device *find_hab_device(unsigned int mm_id)
+struct hab_device *find_hab_device(unsigned int mm_id)
{
int i;
@@ -138,14 +245,14 @@ static struct hab_device *find_hab_device(unsigned int mm_id)
* frontend backend
* send(INIT) wait(INIT)
* wait(INIT_ACK) send(INIT_ACK)
- * send(ACK) wait(ACK)
+ * send(INIT_DONE) wait(INIT_DONE)
*/
struct virtual_channel *frontend_open(struct uhab_context *ctx,
unsigned int mm_id,
int dom_id)
{
- int ret, open_id = 0;
+ int ret, ret2, open_id = 0;
struct physical_channel *pchan = NULL;
struct hab_device *dev;
struct virtual_channel *vchan = NULL;
@@ -153,6 +260,7 @@ struct virtual_channel *frontend_open(struct uhab_context *ctx,
struct hab_open_request request;
struct hab_open_request *recv_request;
int sub_id = HAB_MMID_GET_MINOR(mm_id);
+ struct hab_open_node pending_open = { { 0 } };
dev = find_hab_device(mm_id);
if (dev == NULL) {
@@ -161,6 +269,7 @@ struct virtual_channel *frontend_open(struct uhab_context *ctx,
goto err;
}
+ /* guest can find its own id */
pchan = hab_pchan_find_domid(dev, dom_id);
if (!pchan) {
pr_err("hab_pchan_find_domid failed: dom_id=%d\n", dom_id);
@@ -168,44 +277,82 @@ struct virtual_channel *frontend_open(struct uhab_context *ctx,
goto err;
}
- vchan = hab_vchan_alloc(ctx, pchan);
+ open_id = atomic_inc_return(&open_id_counter);
+ vchan = hab_vchan_alloc(ctx, pchan, open_id);
if (!vchan) {
pr_err("vchan alloc failed\n");
ret = -ENOMEM;
goto err;
- }
+ } else
/* Send Init sequence */
- open_id = atomic_inc_return(&open_id_counter);
hab_open_request_init(&request, HAB_PAYLOAD_TYPE_INIT, pchan,
vchan->id, sub_id, open_id);
+ request.xdata.ver_fe = HAB_API_VER;
ret = hab_open_request_send(&request);
if (ret) {
pr_err("hab_open_request_send failed: %d\n", ret);
goto err;
}
+ pending_open.request = request;
+
+ /* during wait app could be terminated */
+ hab_open_pending_enter(ctx, pchan, &pending_open);
+
/* Wait for Init-Ack sequence */
hab_open_request_init(&request, HAB_PAYLOAD_TYPE_INIT_ACK, pchan,
0, sub_id, open_id);
+ /* wait forever */
ret = hab_open_listen(ctx, dev, &request, &recv_request, 0);
- if (ret || !recv_request) {
- pr_err("hab_open_listen failed: %d\n", ret);
+ if (!ret && recv_request && ((recv_request->xdata.ver_fe & 0xFFFF0000)
+ != (recv_request->xdata.ver_be & 0xFFFF0000))) {
+ /* version check */
+ pr_err("hab major version mismatch fe %X be %X on mmid %d\n",
+ recv_request->xdata.ver_fe,
+ recv_request->xdata.ver_be, mm_id);
+
+ hab_open_pending_exit(ctx, pchan, &pending_open);
+ ret = -EPROTO;
+ goto err;
+ } else if (ret || !recv_request) {
+ pr_err("hab_open_listen failed: %d, send cancel vcid %x subid %d openid %d\n",
+ ret, vchan->id,
+ sub_id, open_id);
+ /* send cancel to BE due to FE's local close */
+ hab_open_request_init(&request, HAB_PAYLOAD_TYPE_INIT_CANCEL,
+ pchan, vchan->id, sub_id, open_id);
+ request.xdata.ver_fe = HAB_API_VER;
+ ret2 = hab_open_request_send(&request);
+ if (ret2)
+ pr_err("send init_cancel failed %d on vcid %x\n", ret2,
+ vchan->id);
+ hab_open_pending_exit(ctx, pchan, &pending_open);
+
+ ret = -EINVAL;
goto err;
}
- vchan->otherend_id = recv_request->vchan_id;
- hab_open_request_free(recv_request);
+ /* remove pending open locally after good pairing */
+ hab_open_pending_exit(ctx, pchan, &pending_open);
+
+ pr_debug("hab version match fe %X be %X on mmid %d\n",
+ recv_request->xdata.ver_fe, recv_request->xdata.ver_be,
+ mm_id);
- vchan->session_id = open_id;
- pr_debug("vchan->session_id:%d\n", vchan->session_id);
+ vchan->otherend_id = recv_request->xdata.vchan_id;
+ hab_open_request_free(recv_request);
/* Send Ack sequence */
- hab_open_request_init(&request, HAB_PAYLOAD_TYPE_ACK, pchan,
+ hab_open_request_init(&request, HAB_PAYLOAD_TYPE_INIT_DONE, pchan,
0, sub_id, open_id);
+ request.xdata.ver_fe = HAB_API_VER;
ret = hab_open_request_send(&request);
- if (ret)
+ if (ret) {
+ pr_err("failed to send init-done vcid %x remote %x openid %d\n",
+ vchan->id, vchan->otherend_id, vchan->session_id);
goto err;
+ }
hab_pchan_put(pchan);
@@ -220,10 +367,10 @@ err:
}
struct virtual_channel *backend_listen(struct uhab_context *ctx,
- unsigned int mm_id)
+ unsigned int mm_id, int timeout)
{
- int ret;
- int open_id;
+ int ret, ret2;
+ int open_id, ver_fe;
int sub_id = HAB_MMID_GET_MINOR(mm_id);
struct physical_channel *pchan = NULL;
struct hab_device *dev;
@@ -231,6 +378,7 @@ struct virtual_channel *backend_listen(struct uhab_context *ctx,
struct hab_open_request request;
struct hab_open_request *recv_request;
uint32_t otherend_vchan_id;
+ struct hab_open_node pending_open = { { 0 } };
dev = find_hab_device(mm_id);
if (dev == NULL) {
@@ -243,19 +391,50 @@ struct virtual_channel *backend_listen(struct uhab_context *ctx,
/* Wait for Init sequence */
hab_open_request_init(&request, HAB_PAYLOAD_TYPE_INIT,
NULL, 0, sub_id, 0);
- ret = hab_open_listen(ctx, dev, &request, &recv_request, 0);
+ /* cancel should not happen at this moment */
+ ret = hab_open_listen(ctx, dev, &request, &recv_request,
+ timeout);
if (ret || !recv_request) {
- pr_err("hab_open_listen failed: %d\n", ret);
+ if (!ret && !recv_request)
+ ret = -EINVAL;
+ if (-EAGAIN == ret) {
+ ret = -ETIMEDOUT;
+ } else {
+ /* device is closed */
+ pr_err("open request wait failed ctx closing %d\n",
+ ctx->closing);
+ }
+ goto err;
+ } else if (!ret && recv_request &&
+ ((recv_request->xdata.ver_fe & 0xFFFF0000) !=
+ (HAB_API_VER & 0xFFFF0000))) {
+ int ret2;
+ /* version check */
+ pr_err("version mismatch fe %X be %X on mmid %d\n",
+ recv_request->xdata.ver_fe, HAB_API_VER, mm_id);
+ hab_open_request_init(&request,
+ HAB_PAYLOAD_TYPE_INIT_ACK,
+ NULL, 0, sub_id, recv_request->xdata.open_id);
+ request.xdata.ver_be = HAB_API_VER;
+ /* reply to allow FE to bail out */
+ ret2 = hab_open_request_send(&request);
+ if (ret2)
+ pr_err("send FE version mismatch failed mmid %d sub %d\n",
+ mm_id, sub_id);
+ ret = -EPROTO;
goto err;
}
- otherend_vchan_id = recv_request->vchan_id;
- open_id = recv_request->open_id;
+ /* guest id from guest */
+ otherend_vchan_id = recv_request->xdata.vchan_id;
+ open_id = recv_request->xdata.open_id;
+ ver_fe = recv_request->xdata.ver_fe;
pchan = recv_request->pchan;
hab_pchan_get(pchan);
hab_open_request_free(recv_request);
+ recv_request = NULL;
- vchan = hab_vchan_alloc(ctx, pchan);
+ vchan = hab_vchan_alloc(ctx, pchan, open_id);
if (!vchan) {
ret = -ENOMEM;
goto err;
@@ -263,23 +442,64 @@ struct virtual_channel *backend_listen(struct uhab_context *ctx,
vchan->otherend_id = otherend_vchan_id;
- vchan->session_id = open_id;
- pr_debug("vchan->session_id:%d\n", vchan->session_id);
-
/* Send Init-Ack sequence */
hab_open_request_init(&request, HAB_PAYLOAD_TYPE_INIT_ACK,
pchan, vchan->id, sub_id, open_id);
+ request.xdata.ver_fe = ver_fe; /* carry over */
+ request.xdata.ver_be = HAB_API_VER;
ret = hab_open_request_send(&request);
if (ret)
goto err;
+ pending_open.request = request;
+ /* wait only after init-ack is sent */
+ hab_open_pending_enter(ctx, pchan, &pending_open);
+
/* Wait for Ack sequence */
- hab_open_request_init(&request, HAB_PAYLOAD_TYPE_ACK,
+ hab_open_request_init(&request, HAB_PAYLOAD_TYPE_INIT_DONE,
pchan, 0, sub_id, open_id);
- ret = hab_open_listen(ctx, dev, &request, &recv_request, 0);
-
- if (ret != -EAGAIN)
+ ret = hab_open_listen(ctx, dev, &request, &recv_request,
+ HAB_HS_TIMEOUT);
+ hab_open_pending_exit(ctx, pchan, &pending_open);
+ if (ret && recv_request &&
+ recv_request->type == HAB_PAYLOAD_TYPE_INIT_CANCEL) {
+ pr_err("listen cancelled vcid %x subid %d openid %d ret %d\n",
+ request.xdata.vchan_id, request.xdata.sub_id,
+ request.xdata.open_id, ret);
+
+ /* FE cancels this session.
+ * So BE has to cancel its too
+ */
+ hab_open_request_init(&request,
+ HAB_PAYLOAD_TYPE_INIT_CANCEL, pchan,
+ vchan->id, sub_id, open_id);
+ ret2 = hab_open_request_send(&request);
+ if (ret2)
+ pr_err("send init_ack failed %d on vcid %x\n",
+ ret2, vchan->id);
+ hab_open_pending_exit(ctx, pchan, &pending_open);
+
+ ret = -ENODEV; /* open request cancelled remotely */
break;
+ } else if (ret != -EAGAIN) {
+ hab_open_pending_exit(ctx, pchan, &pending_open);
+ break; /* received something. good case! */
+ }
+
+ /* stay in the loop retry */
+ pr_warn("retry open ret %d vcid %X remote %X sub %d open %d\n",
+ ret, vchan->id, vchan->otherend_id, sub_id, open_id);
+
+ /* retry path starting here. free previous vchan */
+ hab_open_request_init(&request, HAB_PAYLOAD_TYPE_INIT_CANCEL,
+ pchan, vchan->id, sub_id, open_id);
+ request.xdata.ver_fe = ver_fe;
+ request.xdata.ver_be = HAB_API_VER;
+ ret2 = hab_open_request_send(&request);
+ if (ret2)
+ pr_err("send init_ack failed %d on vcid %x\n", ret2,
+ vchan->id);
+ hab_open_pending_exit(ctx, pchan, &pending_open);
hab_vchan_put(vchan);
vchan = NULL;
@@ -288,7 +508,7 @@ struct virtual_channel *backend_listen(struct uhab_context *ctx,
}
if (ret || !recv_request) {
- pr_err("backend_listen failed: %d\n", ret);
+ pr_err("backend mmid %d listen error %d\n", mm_id, ret);
ret = -EINVAL;
goto err;
}
@@ -297,7 +517,8 @@ struct virtual_channel *backend_listen(struct uhab_context *ctx,
hab_pchan_put(pchan);
return vchan;
err:
- pr_err("listen on mmid %d failed\n", mm_id);
+ if (ret != -ETIMEDOUT)
+ pr_err("listen on mmid %d failed\n", mm_id);
if (vchan)
hab_vchan_put(vchan);
if (pchan)
@@ -316,8 +537,9 @@ long hab_vchan_send(struct uhab_context *ctx,
struct hab_header header = HAB_HEADER_INITIALIZER;
int nonblocking_flag = flags & HABMM_SOCKET_SEND_FLAGS_NON_BLOCKING;
- if (sizebytes > HAB_MAX_MSG_SIZEBYTES) {
- pr_err("Message too large, %lu bytes\n", sizebytes);
+ if (sizebytes > HAB_HEADER_SIZE_MASK) {
+ pr_err("Message too large, %lu bytes, max is %d\n",
+ sizebytes, HAB_HEADER_SIZE_MASK);
return -EINVAL;
}
@@ -328,11 +550,17 @@ long hab_vchan_send(struct uhab_context *ctx,
}
HAB_HEADER_SET_SIZE(header, sizebytes);
- if (flags & HABMM_SOCKET_SEND_FLAGS_XING_VM_STAT)
+ if (flags & HABMM_SOCKET_SEND_FLAGS_XING_VM_STAT) {
HAB_HEADER_SET_TYPE(header, HAB_PAYLOAD_TYPE_PROFILE);
- else
+ if (sizebytes < sizeof(struct habmm_xing_vm_stat)) {
+ pr_err("wrong profiling buffer size %zd, expect %zd\n",
+ sizebytes,
+ sizeof(struct habmm_xing_vm_stat));
+ return -EINVAL;
+ }
+ } else {
HAB_HEADER_SET_TYPE(header, HAB_PAYLOAD_TYPE_MSG);
-
+ }
HAB_HEADER_SET_ID(header, vchan->otherend_id);
HAB_HEADER_SET_SESSION_ID(header, vchan->session_id);
@@ -345,8 +573,6 @@ long hab_vchan_send(struct uhab_context *ctx,
schedule();
}
-
-
err:
if (vchan)
hab_vchan_put(vchan);
@@ -354,18 +580,21 @@ err:
return ret;
}
-struct hab_message *hab_vchan_recv(struct uhab_context *ctx,
+int hab_vchan_recv(struct uhab_context *ctx,
+ struct hab_message **message,
int vcid,
+ int *rsize,
unsigned int flags)
{
struct virtual_channel *vchan;
- struct hab_message *message;
int ret = 0;
int nonblocking_flag = flags & HABMM_SOCKET_RECV_FLAGS_NON_BLOCKING;
vchan = hab_get_vchan_fromvcid(vcid, ctx);
- if (!vchan)
- return ERR_PTR(-ENODEV);
+ if (!vchan) {
+ pr_err("vcid %X, vchan %p ctx %p\n", vcid, vchan, ctx);
+ return -ENODEV;
+ }
if (nonblocking_flag) {
/*
@@ -376,18 +605,18 @@ struct hab_message *hab_vchan_recv(struct uhab_context *ctx,
physical_channel_rx_dispatch((unsigned long) vchan->pchan);
}
- message = hab_msg_dequeue(vchan, !nonblocking_flag);
- if (!message) {
+ ret = hab_msg_dequeue(vchan, message, rsize, flags);
+ if (!(*message)) {
if (nonblocking_flag)
ret = -EAGAIN;
else if (vchan->otherend_closed)
ret = -ENODEV;
- else
- ret = -EPIPE;
+ else if (ret == -ERESTARTSYS)
+ ret = -EINTR;
}
hab_vchan_put(vchan);
- return ret ? ERR_PTR(ret) : message;
+ return ret;
}
bool hab_is_loopback(void)
@@ -398,23 +627,22 @@ bool hab_is_loopback(void)
int hab_vchan_open(struct uhab_context *ctx,
unsigned int mmid,
int32_t *vcid,
+ int32_t timeout,
uint32_t flags)
{
struct virtual_channel *vchan = NULL;
struct hab_device *dev;
- pr_debug("Open mmid=%d, loopback mode=%d, loopback num=%d\n",
- mmid, hab_driver.b_loopback, hab_driver.loopback_num);
+ pr_debug("Open mmid=%d, loopback mode=%d, loopback be ctx %d\n",
+ mmid, hab_driver.b_loopback, ctx->lb_be);
if (!vcid)
return -EINVAL;
if (hab_is_loopback()) {
- if (!hab_driver.loopback_num) {
- hab_driver.loopback_num = 1;
- vchan = backend_listen(ctx, mmid);
+ if (ctx->lb_be) {
+ vchan = backend_listen(ctx, mmid, timeout);
} else {
- hab_driver.loopback_num = 0;
vchan = frontend_open(ctx, mmid, LOOPBACK_DOM);
}
} else {
@@ -422,28 +650,37 @@ int hab_vchan_open(struct uhab_context *ctx,
if (dev) {
struct physical_channel *pchan =
- hab_pchan_find_domid(dev, HABCFG_VMID_DONT_CARE);
-
- if (pchan->is_be)
- vchan = backend_listen(ctx, mmid);
- else
- vchan = frontend_open(ctx, mmid,
- HABCFG_VMID_DONT_CARE);
+ hab_pchan_find_domid(dev,
+ HABCFG_VMID_DONT_CARE);
+ if (pchan) {
+ if (pchan->is_be)
+ vchan = backend_listen(ctx, mmid,
+ timeout);
+ else
+ vchan = frontend_open(ctx, mmid,
+ HABCFG_VMID_DONT_CARE);
+ } else {
+ pr_err("open on nonexistent pchan (mmid %x)",
+ mmid);
+ return -ENODEV;
+ }
} else {
pr_err("failed to find device, mmid %d\n", mmid);
}
}
if (IS_ERR(vchan)) {
- pr_err("vchan open failed over mmid=%d\n", mmid);
+ if (-ETIMEDOUT != PTR_ERR(vchan) && -EAGAIN != PTR_ERR(vchan))
+ pr_err("vchan open failed mmid=%d\n", mmid);
return PTR_ERR(vchan);
}
- pr_debug("vchan id %x, remote id %x\n",
- vchan->id, vchan->otherend_id);
+ pr_debug("vchan id %x remote id %x session %d\n", vchan->id,
+ vchan->otherend_id, vchan->session_id);
write_lock(&ctx->ctx_lock);
list_add_tail(&vchan->node, &ctx->vchannels);
+ ctx->vcnt++;
write_unlock(&ctx->ctx_lock);
*vcid = vchan->id;
@@ -464,17 +701,6 @@ void hab_send_close_msg(struct virtual_channel *vchan)
}
}
-static void hab_vchan_close_impl(struct kref *ref)
-{
- struct virtual_channel *vchan =
- container_of(ref, struct virtual_channel, usagecnt);
-
- list_del(&vchan->node);
- hab_vchan_stop_notify(vchan);
- hab_vchan_put(vchan);
-}
-
-
void hab_vchan_close(struct uhab_context *ctx, int32_t vcid)
{
struct virtual_channel *vchan, *tmp;
@@ -485,11 +711,29 @@ void hab_vchan_close(struct uhab_context *ctx, int32_t vcid)
write_lock(&ctx->ctx_lock);
list_for_each_entry_safe(vchan, tmp, &ctx->vchannels, node) {
if (vchan->id == vcid) {
- kref_put(&vchan->usagecnt, hab_vchan_close_impl);
+ write_unlock(&ctx->ctx_lock);
+ pr_debug("vcid %x remote %x session %d refcnt %d\n",
+ vchan->id, vchan->otherend_id,
+ vchan->session_id, get_refcnt(vchan->refcount));
+ /*
+ * only set when vc close is called locally by user
+ * explicity. Used to block remote msg. if forked once
+ * before, this local close is skipped due to child
+ * usage. if forked but not closed locally, the local
+ * context could NOT be closed, vchan can be prolonged
+ * by arrived remote msgs
+ */
+ if (vchan->forked)
+ vchan->forked = 0;
+ else {
+ vchan->closed = 1;
+ hab_vchan_stop_notify(vchan);
+ }
+ hab_vchan_put(vchan); /* there is a lock inside */
+ write_lock(&ctx->ctx_lock);
break;
}
}
-
write_unlock(&ctx->ctx_lock);
}
@@ -506,7 +750,7 @@ static int hab_initialize_pchan_entry(struct hab_device *mmid_device,
char pchan_name[MAX_VMID_NAME_SIZE];
struct physical_channel *pchan = NULL;
int ret;
- int vmid = is_be ? vmid_remote : vmid_local;
+ int vmid = is_be ? vmid_remote : vmid_local; /* used for naming only */
if (!mmid_device) {
pr_err("habdev %pK, vmid local %d, remote %d, is be %d\n",
@@ -519,20 +763,28 @@ static int hab_initialize_pchan_entry(struct hab_device *mmid_device,
ret = habhyp_commdev_alloc((void **)&pchan, is_be, pchan_name,
vmid_remote, mmid_device);
- if (ret == 0) {
- pr_debug("pchan %s added, vmid local %d, remote %d, is_be %d, total %d\n",
- pchan_name, vmid_local, vmid_remote, is_be,
- mmid_device->pchan_cnt);
- } else {
+ if (ret) {
pr_err("failed %d to allocate pchan %s, vmid local %d, remote %d, is_be %d, total %d\n",
ret, pchan_name, vmid_local, vmid_remote,
is_be, mmid_device->pchan_cnt);
+ } else {
+ /* local/remote id setting should be kept in lower level */
+ pchan->vmid_local = vmid_local;
+ pchan->vmid_remote = vmid_remote;
+ pr_debug("pchan %s mmid %s local %d remote %d role %d\n",
+ pchan_name, mmid_device->name,
+ pchan->vmid_local, pchan->vmid_remote,
+ pchan->dom_id);
}
return ret;
}
-static void hab_generate_pchan(struct local_vmid *settings, int i, int j)
+/*
+ * generate pchan list based on hab settings table.
+ * return status 0: success, otherwise failure
+ */
+static int hab_generate_pchan(struct local_vmid *settings, int i, int j)
{
int k, ret = 0;
@@ -624,13 +876,31 @@ static void hab_generate_pchan(struct local_vmid *settings, int i, int j)
HABCFG_GET_BE(settings, i, j));
}
break;
-
+ case MM_FDE_START/100:
+ for (k = MM_FDE_START + 1; k < MM_FDE_END; k++) {
+ ret += hab_initialize_pchan_entry(
+ find_hab_device(k),
+ settings->self,
+ HABCFG_GET_VMID(settings, i),
+ HABCFG_GET_BE(settings, i, j));
+ }
+ break;
+ case MM_BUFFERQ_START/100:
+ for (k = MM_BUFFERQ_START + 1; k < MM_BUFFERQ_END; k++) {
+ ret += hab_initialize_pchan_entry(
+ find_hab_device(k),
+ settings->self,
+ HABCFG_GET_VMID(settings, i),
+ HABCFG_GET_BE(settings, i, j));
+ }
+ break;
default:
pr_err("failed to find mmid %d, i %d, j %d\n",
HABCFG_GET_MMID(settings, i, j), i, j);
break;
}
+ return ret;
}
/*
@@ -639,7 +909,7 @@ static void hab_generate_pchan(struct local_vmid *settings, int i, int j)
*/
static int hab_generate_pchan_list(struct local_vmid *settings)
{
- int i, j;
+ int i, j, ret = 0;
/* scan by valid VMs, then mmid */
pr_debug("self vmid is %d\n", settings->self);
@@ -651,24 +921,34 @@ static int hab_generate_pchan_list(struct local_vmid *settings)
for (j = 1; j <= HABCFG_MMID_AREA_MAX; j++) {
if (HABCFG_GET_MMID(settings, i, j)
!= HABCFG_VMID_INVALID)
- hab_generate_pchan(settings, i, j);
+ ret = hab_generate_pchan(settings,
+ i, j);
}
}
}
-
- return 0;
+ return ret;
}
/*
* This function checks hypervisor plug-in readiness, read in hab configs,
* and configure pchans
*/
+#ifdef HABMM_HC_VMID
+#define DEFAULT_GVMID 3
+#else
+#define DEFAULT_GVMID 2
+#endif
+
int do_hab_parse(void)
{
int result;
int i;
struct hab_device *device;
- int pchan_total = 0;
+
+ /* single GVM is 2, multigvm is 2 or 3. GHS LV-GVM 2, LA-GVM 3 */
+ int default_gvmid = DEFAULT_GVMID;
+
+ pr_debug("hab parse starts for %s\n", hab_info_str);
/* first check if hypervisor plug-in is ready */
result = hab_hypervisor_register();
@@ -677,7 +957,10 @@ int do_hab_parse(void)
return result;
}
- /* Initialize open Q before first pchan starts */
+ /*
+ * Initialize open Q before first pchan starts.
+ * Each is for one pchan list
+ */
for (i = 0; i < hab_driver.ndevices; i++) {
device = &hab_driver.devp[i];
init_waitqueue_head(&device->openq);
@@ -686,12 +969,12 @@ int do_hab_parse(void)
/* read in hab config and create pchans*/
memset(&hab_driver.settings, HABCFG_VMID_INVALID,
sizeof(hab_driver.settings));
-
result = hab_parse(&hab_driver.settings);
if (result) {
- pr_warn("hab_parse failed and use the default settings\n");
- fill_default_gvm_settings(&hab_driver.settings, 2,
- MM_AUD_START, MM_ID_MAX);
+ pr_err("hab config open failed, prepare default gvm %d settings\n",
+ default_gvmid);
+ fill_default_gvm_settings(&hab_driver.settings, default_gvmid,
+ MM_AUD_START, MM_ID_MAX);
}
/* now generate hab pchan list */
@@ -699,6 +982,7 @@ int do_hab_parse(void)
if (result) {
pr_err("generate pchan list failed, ret %d\n", result);
} else {
+ int pchan_total = 0;
for (i = 0; i < hab_driver.ndevices; i++) {
device = &hab_driver.devp[i];
pchan_total += device->pchan_cnt;
@@ -710,6 +994,48 @@ int do_hab_parse(void)
return result;
}
+int get_refcnt(struct kref ref)
+{
+ return ref.refcount.counter;
+}
+
+void hab_hypervisor_unregister_common(void)
+{
+ int status, i;
+ struct uhab_context *ctx;
+ struct virtual_channel *vchan;
+
+ for (i = 0; i < hab_driver.ndevices; i++) {
+ struct hab_device *habdev = &hab_driver.devp[i];
+ struct physical_channel *pchan, *pchan_tmp;
+
+ list_for_each_entry_safe(pchan, pchan_tmp,
+ &habdev->pchannels, node) {
+ status = habhyp_commdev_dealloc(pchan);
+ if (status) {
+ pr_err("failed to free pchan %pK, i %d, ret %d\n",
+ pchan, i, status);
+ }
+ }
+ }
+
+ /* detect leaking uctx */
+ spin_lock_bh(&hab_driver.drvlock);
+ list_for_each_entry(ctx, &hab_driver.uctx_list, node) {
+ pr_warn("leaking ctx owner %d refcnt %d kernel %d\n",
+ ctx->owner, get_refcnt(ctx->refcount), ctx->kernel);
+ /* further check vchan leak */
+ read_lock(&ctx->ctx_lock);
+ list_for_each_entry(vchan, &ctx->vchannels, node) {
+ pr_warn("leaking vchan id %X remote %X refcnt %d\n",
+ vchan->id, vchan->otherend_id,
+ get_refcnt(vchan->refcount));
+ }
+ read_unlock(&ctx->ctx_lock);
+ }
+ spin_unlock_bh(&hab_driver.drvlock);
+}
+
static int hab_open(struct inode *inodep, struct file *filep)
{
int result = 0;
@@ -723,7 +1049,10 @@ static int hab_open(struct inode *inodep, struct file *filep)
return -ENOMEM;
}
+ ctx->owner = task_pid_nr(current);
filep->private_data = ctx;
+ pr_debug("ctx owner %d refcnt %d\n", ctx->owner,
+ get_refcnt(ctx->refcount));
return result;
}
@@ -732,25 +1061,50 @@ static int hab_release(struct inode *inodep, struct file *filep)
{
struct uhab_context *ctx = filep->private_data;
struct virtual_channel *vchan, *tmp;
+ struct hab_open_node *node;
if (!ctx)
return 0;
- pr_debug("inode %pK, filep %pK\n", inodep, filep);
+ pr_debug("inode %pK, filep %pK ctx %pK\n", inodep, filep, ctx);
write_lock(&ctx->ctx_lock);
-
+ /* notify remote side on vchan closing */
list_for_each_entry_safe(vchan, tmp, &ctx->vchannels, node) {
- list_del(&vchan->node);
+ list_del(&vchan->node); /* vchan is not in this ctx anymore */
hab_vchan_stop_notify(vchan);
- hab_vchan_put(vchan);
+ write_unlock(&ctx->ctx_lock);
+ if (!vchan->closed) {
+ pr_warn("potential leak vc %pK %x remote %x session %d refcnt %d\n",
+ vchan, vchan->id, vchan->otherend_id,
+ vchan->session_id,
+ get_refcnt(vchan->refcount));
+ hab_vchan_put(vchan); /* there is a lock inside */
+ }
+ write_lock(&ctx->ctx_lock);
}
+ /* notify remote side on pending open */
+ list_for_each_entry(node, &ctx->pending_open, node) {
+ /* no touch to the list itself. it is allocated on the stack */
+ if (hab_open_cancel_notify(&node->request))
+ pr_err("failed to send open cancel vcid %x subid %d openid %d pchan %s\n",
+ node->request.xdata.vchan_id,
+ node->request.xdata.sub_id,
+ node->request.xdata.open_id,
+ node->request.pchan->habdev->name);
+ }
write_unlock(&ctx->ctx_lock);
hab_ctx_put(ctx);
filep->private_data = NULL;
+ /* ctx leak check */
+ if (get_refcnt(ctx->refcount))
+ pr_warn("pending ctx release owner %d refcnt %d total %d\n",
+ ctx->owner, get_refcnt(ctx->refcount),
+ hab_driver.ctx_cnt);
+
return 0;
}
@@ -761,10 +1115,12 @@ static long hab_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
struct hab_close *close_param;
struct hab_recv *recv_param;
struct hab_send *send_param;
+ struct hab_info *info_param;
struct hab_message *msg;
void *send_data;
unsigned char data[256] = { 0 };
long ret = 0;
+ char names[30];
if (_IOC_SIZE(cmd) && (cmd & IOC_IN)) {
if (_IOC_SIZE(cmd) > sizeof(data))
@@ -781,7 +1137,9 @@ static long hab_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
case IOCTL_HAB_VC_OPEN:
open_param = (struct hab_open *)data;
ret = hab_vchan_open(ctx, open_param->mmid,
- &open_param->vcid, open_param->flags);
+ &open_param->vcid,
+ open_param->timeout,
+ open_param->flags);
break;
case IOCTL_HAB_VC_CLOSE:
close_param = (struct hab_close *)data;
@@ -818,29 +1176,25 @@ static long hab_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
break;
}
- msg = hab_vchan_recv(ctx, recv_param->vcid, recv_param->flags);
+ ret = hab_vchan_recv(ctx, &msg, recv_param->vcid,
+ &recv_param->sizebytes, recv_param->flags);
- if (IS_ERR(msg)) {
- recv_param->sizebytes = 0;
- ret = PTR_ERR(msg);
- break;
- }
-
- if (recv_param->sizebytes < msg->sizebytes) {
- recv_param->sizebytes = 0;
- ret = -EINVAL;
- } else if (copy_to_user((void __user *)recv_param->data,
+ if (ret == 0 && msg) {
+ if (copy_to_user((void __user *)recv_param->data,
msg->data,
msg->sizebytes)) {
- pr_err("copy_to_user failed: vc=%x size=%d\n",
- recv_param->vcid, (int)msg->sizebytes);
- recv_param->sizebytes = 0;
- ret = -EFAULT;
- } else {
- recv_param->sizebytes = msg->sizebytes;
+ pr_err("copy_to_user failed: vc=%x size=%d\n",
+ recv_param->vcid, (int)msg->sizebytes);
+ recv_param->sizebytes = 0;
+ ret = -EFAULT;
+ }
+ } else if (ret && msg) {
+ pr_warn("vcid %X recv failed %d and msg is still of %zd bytes\n",
+ recv_param->vcid, (int)ret, msg->sizebytes);
}
- hab_msg_free(msg);
+ if (msg)
+ hab_msg_free(msg);
break;
case IOCTL_HAB_VC_EXPORT:
ret = hab_mem_export(ctx, (struct hab_export *)data, 0);
@@ -854,11 +1208,36 @@ static long hab_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
case IOCTL_HAB_VC_UNIMPORT:
ret = hab_mem_unimport(ctx, (struct hab_unimport *)data, 0);
break;
+ case IOCTL_HAB_VC_QUERY:
+ info_param = (struct hab_info *)data;
+ if (!info_param->names || !info_param->namesize ||
+ info_param->namesize > sizeof(names)) {
+ pr_err("wrong param for vm info vcid %X, names %llX, sz %d\n",
+ info_param->vcid, info_param->names,
+ info_param->namesize);
+ ret = -EINVAL;
+ break;
+ }
+ ret = hab_vchan_query(ctx, info_param->vcid,
+ (uint64_t *)&info_param->ids,
+ names, info_param->namesize, 0);
+ if (!ret) {
+ if (copy_to_user((void __user *)info_param->names,
+ names,
+ info_param->namesize)) {
+ pr_err("copy_to_user failed: vc=%x size=%d\n",
+ info_param->vcid,
+ info_param->namesize*2);
+ info_param->namesize = 0;
+ ret = -EFAULT;
+ }
+ }
+ break;
default:
ret = -ENOIOCTLCMD;
}
- if (ret == 0 && _IOC_SIZE(cmd) && (cmd & IOC_OUT))
+ if (_IOC_SIZE(cmd) && (cmd & IOC_OUT))
if (copy_to_user((void __user *) arg, data, _IOC_SIZE(cmd))) {
pr_err("copy_to_user failed: cmd=%x\n", cmd);
ret = -EFAULT;
@@ -909,6 +1288,26 @@ static const struct dma_map_ops hab_dma_ops = {
.unmap_sg = hab_unmap_sg,
};
+static int hab_power_down_callback(
+ struct notifier_block *nfb, unsigned long action, void *data)
+{
+
+ switch (action) {
+ case SYS_DOWN:
+ case SYS_HALT:
+ case SYS_POWER_OFF:
+ pr_debug("reboot called %ld\n", action);
+ hab_hypervisor_unregister(); /* only for single VM guest */
+ break;
+ }
+ pr_debug("reboot called %ld done\n", action);
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block hab_reboot_notifier = {
+ .notifier_call = hab_power_down_callback,
+};
+
static int __init hab_init(void)
{
int result;
@@ -951,6 +1350,10 @@ static int __init hab_init(void)
goto err;
}
+ result = register_reboot_notifier(&hab_reboot_notifier);
+ if (result)
+ pr_err("failed to register reboot notifier %d\n", result);
+
/* read in hab config, then configure pchans */
result = do_hab_parse();
@@ -961,12 +1364,10 @@ static int __init hab_init(void)
result = -ENOMEM;
hab_hypervisor_unregister();
goto err;
- }
-
- set_dma_ops(hab_driver.dev, &hab_dma_ops);
-
- return result;
+ } else
+ set_dma_ops(hab_driver.dev, &hab_dma_ops);
}
+ return result;
err:
if (!IS_ERR_OR_NULL(hab_driver.dev))
@@ -991,6 +1392,8 @@ static void __exit hab_exit(void)
class_destroy(hab_driver.class);
cdev_del(&hab_driver.cdev);
unregister_chrdev_region(dev, 1);
+ unregister_reboot_notifier(&hab_reboot_notifier);
+ pr_debug("hab exit called\n");
}
subsys_initcall(hab_init);
diff --git a/drivers/soc/qcom/hab/hab.h b/drivers/soc/qcom/hab/hab.h
index 19a8584edd35..d1aa88e3978e 100644
--- a/drivers/soc/qcom/hab/hab.h
+++ b/drivers/soc/qcom/hab/hab.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -13,11 +13,15 @@
#ifndef __HAB_H
#define __HAB_H
-#define pr_fmt(fmt) "|hab:%s:%d|" fmt, __func__, __LINE__
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+#define pr_fmt(fmt) "hab:%s:%d " fmt, __func__, __LINE__
#include <linux/types.h>
#include <linux/habmm.h>
+#include <linux/hab_ioctl.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
@@ -37,16 +41,19 @@
#include <linux/uaccess.h>
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
+#include <linux/jiffies.h>
+#include <linux/reboot.h>
enum hab_payload_type {
HAB_PAYLOAD_TYPE_MSG = 0x0,
HAB_PAYLOAD_TYPE_INIT,
HAB_PAYLOAD_TYPE_INIT_ACK,
- HAB_PAYLOAD_TYPE_ACK,
+ HAB_PAYLOAD_TYPE_INIT_DONE,
HAB_PAYLOAD_TYPE_EXPORT,
HAB_PAYLOAD_TYPE_EXPORT_ACK,
HAB_PAYLOAD_TYPE_PROFILE,
HAB_PAYLOAD_TYPE_CLOSE,
+ HAB_PAYLOAD_TYPE_INIT_CANCEL,
HAB_PAYLOAD_TYPE_MAX,
};
#define LOOPBACK_DOM 0xFF
@@ -78,6 +85,8 @@ enum hab_payload_type {
#define DEVICE_QCPE4_NAME "hab_qcpe_vm4"
#define DEVICE_CLK1_NAME "hab_clock_vm1"
#define DEVICE_CLK2_NAME "hab_clock_vm2"
+#define DEVICE_FDE1_NAME "hab_fde1"
+#define DEVICE_BUFFERQ1_NAME "hab_bufferq1"
/* make sure concascaded name is less than this value */
#define MAX_VMID_NAME_SIZE 30
@@ -122,26 +131,27 @@ struct hab_header {
/* "Size" of the HAB_HEADER_ID and HAB_VCID_ID must match */
#define HAB_HEADER_SIZE_SHIFT 0
#define HAB_HEADER_TYPE_SHIFT 16
-#define HAB_HEADER_ID_SHIFT 24
+#define HAB_HEADER_ID_SHIFT 20
#define HAB_HEADER_SIZE_MASK 0x0000FFFF
-#define HAB_HEADER_TYPE_MASK 0x00FF0000
-#define HAB_HEADER_ID_MASK 0xFF000000
+#define HAB_HEADER_TYPE_MASK 0x000F0000
+#define HAB_HEADER_ID_MASK 0xFFF00000
#define HAB_HEADER_INITIALIZER {0}
#define HAB_MMID_GET_MAJOR(mmid) (mmid & 0xFFFF)
#define HAB_MMID_GET_MINOR(mmid) ((mmid>>16) & 0xFF)
#define HAB_VCID_ID_SHIFT 0
-#define HAB_VCID_DOMID_SHIFT 8
-#define HAB_VCID_MMID_SHIFT 16
-#define HAB_VCID_ID_MASK 0x000000FF
-#define HAB_VCID_DOMID_MASK 0x0000FF00
-#define HAB_VCID_MMID_MASK 0xFFFF0000
+#define HAB_VCID_DOMID_SHIFT 12
+#define HAB_VCID_MMID_SHIFT 20
+#define HAB_VCID_ID_MASK 0x00000FFF
+#define HAB_VCID_DOMID_MASK 0x000FF000
+#define HAB_VCID_MMID_MASK 0xFFF00000
#define HAB_VCID_GET_ID(vcid) \
(((vcid) & HAB_VCID_ID_MASK) >> HAB_VCID_ID_SHIFT)
-#define HAB_HEADER_SET_SESSION_ID(header, sid) ((header).session_id = (sid))
+#define HAB_HEADER_SET_SESSION_ID(header, sid) \
+ ((header).session_id = (sid))
#define HAB_HEADER_SET_SIZE(header, size) \
((header).id_type_size = ((header).id_type_size & \
@@ -175,12 +185,14 @@ struct hab_header {
#define HAB_HEADER_GET_SESSION_ID(header) ((header).session_id)
+#define HAB_HS_TIMEOUT (10*1000*1000)
+
struct physical_channel {
+ struct list_head node;
char name[MAX_VMID_NAME_SIZE];
int is_be;
struct kref refcount;
struct hab_device *habdev;
- struct list_head node;
struct idr vchan_idr;
spinlock_t vid_lock;
@@ -188,34 +200,44 @@ struct physical_channel {
spinlock_t expid_lock;
void *hyp_data;
- int dom_id;
+ int dom_id; /* BE role: remote vmid; FE role: don't care */
+ int vmid_local; /* from DT or hab_config */
+ int vmid_remote;
+ char vmname_local[12]; /* from DT */
+ char vmname_remote[12];
int closed;
spinlock_t rxbuf_lock;
- /* vchans over this pchan */
+ /* debug only */
+ uint32_t sequence_tx;
+ uint32_t sequence_rx;
+
+ /* vchans on this pchan */
struct list_head vchannels;
+ int vcnt;
rwlock_t vchans_lock;
};
-
+/* this payload has to be used together with type */
struct hab_open_send_data {
int vchan_id;
int sub_id;
int open_id;
+ int ver_fe;
+ int ver_be;
+ int reserved;
};
struct hab_open_request {
int type;
struct physical_channel *pchan;
- int vchan_id;
- int sub_id;
- int open_id;
+ struct hab_open_send_data xdata;
};
struct hab_open_node {
struct hab_open_request request;
struct list_head node;
- int age;
+ int64_t age; /* sec */
};
struct hab_export_ack {
@@ -236,20 +258,25 @@ struct hab_message {
uint32_t data[];
};
+/* for all the pchans of same kind */
struct hab_device {
char name[MAX_VMID_NAME_SIZE];
- unsigned int id;
+ uint32_t id;
struct list_head pchannels;
int pchan_cnt;
- struct mutex pchan_lock;
- struct list_head openq_list;
+ spinlock_t pchan_lock;
+ struct list_head openq_list; /* received */
spinlock_t openlock;
wait_queue_head_t openq;
+ int openq_cnt;
};
struct uhab_context {
+ struct list_head node; /* managed by the driver */
struct kref refcount;
+
struct list_head vchannels;
+ int vcnt;
struct list_head exp_whse;
uint32_t export_total;
@@ -265,14 +292,20 @@ struct uhab_context {
void *import_ctx;
+ struct list_head pending_open; /* sent to remote */
+ int pending_cnt;
+
rwlock_t ctx_lock;
int closing;
int kernel;
+ int owner;
+
+ int lb_be; /* loopback only */
};
/*
- * array to describe the VM and its MMID configuration as what is connected to
- * so this is describing a pchan's remote side
+ * array to describe the VM and its MMID configuration as
+ * what is connected to so this is describing a pchan's remote side
*/
struct vmid_mmid_desc {
int vmid; /* remote vmid */
@@ -286,7 +319,7 @@ struct local_vmid {
};
struct hab_driver {
- struct device *dev;
+ struct device *dev; /* mmid dev list */
struct cdev cdev;
dev_t major;
struct class *class;
@@ -294,33 +327,30 @@ struct hab_driver {
struct hab_device *devp;
struct uhab_context *kctx;
+ struct list_head uctx_list;
+ int ctx_cnt;
+ spinlock_t drvlock;
+
struct local_vmid settings; /* parser results */
int b_server_dom;
- int loopback_num;
+ int b_loopback_be; /* only allow 2 apps simultaneously 1 fe 1 be */
int b_loopback;
void *hyp_priv; /* hypervisor plug-in storage */
};
struct virtual_channel {
- struct work_struct work;
/*
* refcount is used to track the references from hab core to the virtual
* channel such as references from physical channels,
* i.e. references from the "other" side
*/
struct kref refcount;
- /*
- * usagecnt is used to track the clients who are using this virtual
- * channel such as local clients, client sowftware etc,
- * i.e. references from "this" side
- */
- struct kref usagecnt;
struct physical_channel *pchan;
struct uhab_context *ctx;
- struct list_head node;
- struct list_head pnode;
+ struct list_head node; /* for ctx */
+ struct list_head pnode; /* for pchan */
struct list_head rx_list;
wait_queue_head_t rx_queue;
spinlock_t rx_lock;
@@ -328,23 +358,35 @@ struct virtual_channel {
int otherend_id;
int otherend_closed;
uint32_t session_id;
+
+ /*
+ * set when local close() is called explicitly. vchan could be
+ * used in hab-recv-msg() path (2) then close() is called (1).
+ * this is same case as close is not called and no msg path
+ */
+ int closed;
+ int forked; /* if fork is detected and assume only once */
};
/*
- * Struct shared between local and remote, contents are composed by exporter,
- * the importer only writes to pdata and local (exporter) domID
+ * Struct shared between local and remote, contents
+ * are composed by exporter, the importer only writes
+ * to pdata and local (exporter) domID
*/
struct export_desc {
uint32_t export_id;
int readonly;
uint64_t import_index;
- struct virtual_channel *vchan;
+ struct virtual_channel *vchan; /* vchan could be freed earlier */
+ struct uhab_context *ctx;
+ struct physical_channel *pchan;
int32_t vcid_local;
int32_t vcid_remote;
int domid_local;
int domid_remote;
+ int flags;
struct list_head node;
void *kva;
@@ -353,7 +395,8 @@ struct export_desc {
} __packed;
int hab_vchan_open(struct uhab_context *ctx,
- unsigned int mmid, int32_t *vcid, uint32_t flags);
+ unsigned int mmid, int32_t *vcid,
+ int32_t timeout, uint32_t flags);
void hab_vchan_close(struct uhab_context *ctx,
int32_t vcid);
long hab_vchan_send(struct uhab_context *ctx,
@@ -361,9 +404,11 @@ long hab_vchan_send(struct uhab_context *ctx,
size_t sizebytes,
void *data,
unsigned int flags);
-struct hab_message *hab_vchan_recv(struct uhab_context *ctx,
- int vcid,
- unsigned int flags);
+int hab_vchan_recv(struct uhab_context *ctx,
+ struct hab_message **msg,
+ int vcid,
+ int *rsize,
+ unsigned int flags);
void hab_vchan_stop(struct virtual_channel *vchan);
void hab_vchans_stop(struct physical_channel *pchan);
void hab_vchan_stop_notify(struct virtual_channel *vchan);
@@ -387,39 +432,37 @@ int habmem_hyp_grant_user(unsigned long address,
int page_count,
int flags,
int remotedom,
- void *ppdata);
+ void *ppdata,
+ int *compressed,
+ int *compressed_size);
int habmem_hyp_grant(unsigned long address,
int page_count,
int flags,
int remotedom,
- void *ppdata);
+ void *ppdata,
+ int *compressed,
+ int *compressed_size);
int habmem_hyp_revoke(void *expdata, uint32_t count);
void *habmem_imp_hyp_open(void);
void habmem_imp_hyp_close(void *priv, int kernel);
-long habmem_imp_hyp_map(void *priv, void *impdata, uint32_t count,
- uint32_t remotedom,
- uint64_t *index,
- void **pkva,
- int kernel,
- uint32_t userflags);
+int habmem_imp_hyp_map(void *imp_ctx, struct hab_import *param,
+ struct export_desc *exp, int kernel);
-long habmm_imp_hyp_unmap(void *priv, uint64_t index,
- uint32_t count,
- int kernel);
+int habmm_imp_hyp_unmap(void *imp_ctx, struct export_desc *exp, int kernel);
int habmem_imp_hyp_mmap(struct file *flip, struct vm_area_struct *vma);
void hab_msg_free(struct hab_message *message);
-struct hab_message *hab_msg_dequeue(struct virtual_channel *vchan,
- int wait_flag);
+int hab_msg_dequeue(struct virtual_channel *vchan,
+ struct hab_message **msg, int *rsize, unsigned int flags);
-void hab_msg_recv(struct physical_channel *pchan,
+int hab_msg_recv(struct physical_channel *pchan,
struct hab_header *header);
void hab_open_request_init(struct hab_open_request *request,
@@ -439,7 +482,7 @@ int hab_open_listen(struct uhab_context *ctx,
int ms_timeout);
struct virtual_channel *hab_vchan_alloc(struct uhab_context *ctx,
- struct physical_channel *pchan);
+ struct physical_channel *pchan, int openid);
struct virtual_channel *hab_vchan_get(struct physical_channel *pchan,
struct hab_header *header);
void hab_vchan_put(struct virtual_channel *vchan);
@@ -474,6 +517,7 @@ static inline void hab_ctx_put(struct uhab_context *ctx)
void hab_send_close_msg(struct virtual_channel *vchan);
int hab_hypervisor_register(void);
void hab_hypervisor_unregister(void);
+void hab_hypervisor_unregister_common(void);
int habhyp_commdev_alloc(void **commdev, int is_be, char *name,
int vmid_remote, struct hab_device *mmid_device);
int habhyp_commdev_dealloc(void *commdev);
@@ -488,7 +532,7 @@ int physical_channel_send(struct physical_channel *pchan,
void physical_channel_rx_dispatch(unsigned long physical_channel);
-int loopback_pchan_create(char *dev_name);
+int loopback_pchan_create(struct hab_device *dev, char *pchan_name);
int hab_parse(struct local_vmid *settings);
@@ -499,6 +543,26 @@ int fill_default_gvm_settings(struct local_vmid *settings,
bool hab_is_loopback(void);
+int hab_vchan_query(struct uhab_context *ctx, int32_t vcid, uint64_t *ids,
+ char *names, size_t name_size, uint32_t flags);
+
+struct hab_device *find_hab_device(unsigned int mm_id);
+
+int get_refcnt(struct kref ref);
+
+int hab_open_pending_enter(struct uhab_context *ctx,
+ struct physical_channel *pchan,
+ struct hab_open_node *pending);
+
+int hab_open_pending_exit(struct uhab_context *ctx,
+ struct physical_channel *pchan,
+ struct hab_open_node *pending);
+
+int hab_open_cancel_notify(struct hab_open_request *request);
+
+int hab_open_receive_cancel(struct physical_channel *pchan,
+ size_t sizebytes);
+
/* Global singleton HAB instance */
extern struct hab_driver hab_driver;
diff --git a/drivers/soc/qcom/hab/hab_ghs.c b/drivers/soc/qcom/hab/hab_ghs.c
new file mode 100644
index 000000000000..859f47f3ed59
--- /dev/null
+++ b/drivers/soc/qcom/hab/hab_ghs.c
@@ -0,0 +1,217 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "hab.h"
+#include "hab_ghs.h"
+
+static const char * const dt_gipc_path_name[] = {
+ "testgipc1",
+ "testgipc2",
+ "testgipc3",
+ "testgipc4",
+ "testgipc5",
+ "testgipc6",
+ "testgipc7",
+ "testgipc8",
+ "testgipc9",
+ "testgipc10",
+ "testgipc11",
+ "testgipc12",
+ "testgipc13",
+ "testgipc14",
+ "testgipc15",
+ "testgipc16",
+ "testgipc17",
+ "testgipc18",
+ "testgipc19",
+ "testgipc20",
+ "testgipc21",
+ "testgipc22",
+};
+
+static struct ghs_vmm_plugin_info_s {
+ const char **dt_name;
+ int curr;
+ int probe_cnt;
+} ghs_vmm_plugin_info = {
+ dt_gipc_path_name,
+ 0,
+ ARRAY_SIZE(dt_gipc_path_name),
+};
+
+static void ghs_irq_handler(void *cookie)
+{
+ struct physical_channel *pchan = cookie;
+ struct ghs_vdev *dev =
+ (struct ghs_vdev *) (pchan ? pchan->hyp_data : NULL);
+
+ if (dev)
+ tasklet_schedule(&dev->task);
+}
+
+/* static struct physical_channel *habhyp_commdev_alloc(int id) */
+int habhyp_commdev_alloc(void **commdev, int is_be, char *name, int vmid_remote,
+ struct hab_device *mmid_device)
+{
+ struct ghs_vdev *dev = NULL;
+ struct physical_channel *pchan = NULL;
+ struct physical_channel **ppchan = (struct physical_channel **)commdev;
+ int ret = 0;
+
+ if (ghs_vmm_plugin_info.curr > ghs_vmm_plugin_info.probe_cnt) {
+ pr_err("too many commdev alloc %d, supported is %d\n",
+ ghs_vmm_plugin_info.curr,
+ ghs_vmm_plugin_info.probe_cnt);
+ ret = -ENOENT;
+ goto err;
+ }
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
+ ret = -ENOMEM;
+ pr_err("allocate struct ghs_vdev failed %zu bytes on pchan %s\n",
+ sizeof(*dev), name);
+ goto err;
+ }
+
+ memset(dev, 0, sizeof(*dev));
+ spin_lock_init(&dev->io_lock);
+
+ /*
+ * TODO: ExtractEndpoint is in ghs_comm.c because it blocks.
+ * Extrace and Request should be in roughly the same spot
+ */
+ if (is_be) {
+ /* role is backend */
+ dev->be = 1;
+ } else {
+ /* role is FE */
+ struct device_node *gvh_dn;
+
+ gvh_dn = of_find_node_by_path("/aliases");
+ if (gvh_dn) {
+ const char *ep_path = NULL;
+ struct device_node *ep_dn;
+
+ ret = of_property_read_string(gvh_dn,
+ ghs_vmm_plugin_info.dt_name[ghs_vmm_plugin_info.curr],
+ &ep_path);
+ if (ret)
+ pr_err("failed to read endpoint string ret %d\n",
+ ret);
+ of_node_put(gvh_dn);
+
+ ep_dn = of_find_node_by_path(ep_path);
+ if (ep_dn) {
+ dev->endpoint = kgipc_endpoint_alloc(ep_dn);
+ of_node_put(ep_dn);
+ if (IS_ERR(dev->endpoint)) {
+ ret = PTR_ERR(dev->endpoint);
+ pr_err("KGIPC alloc failed id: %d, ret: %d\n",
+ ghs_vmm_plugin_info.curr, ret);
+ goto err;
+ } else {
+ pr_debug("gipc ep found for %d\n",
+ ghs_vmm_plugin_info.curr);
+ }
+ } else {
+ pr_err("of_parse_phandle failed id: %d\n",
+ ghs_vmm_plugin_info.curr);
+ ret = -ENOENT;
+ goto err;
+ }
+ } else {
+ pr_err("of_find_compatible_node failed id: %d\n",
+ ghs_vmm_plugin_info.curr);
+ ret = -ENOENT;
+ goto err;
+ }
+ }
+ /* add pchan into the mmid_device list */
+ pchan = hab_pchan_alloc(mmid_device, vmid_remote);
+ if (!pchan) {
+ pr_err("hab_pchan_alloc failed for %s, cnt %d\n",
+ mmid_device->name, mmid_device->pchan_cnt);
+ ret = -ENOMEM;
+ goto err;
+ }
+ pchan->closed = 0;
+ pchan->hyp_data = (void *)dev;
+ pchan->is_be = is_be;
+ strlcpy(dev->name, name, sizeof(dev->name));
+ *ppchan = pchan;
+ dev->read_data = kmalloc(GIPC_RECV_BUFF_SIZE_BYTES, GFP_KERNEL);
+ if (!dev->read_data) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ tasklet_init(&dev->task, physical_channel_rx_dispatch,
+ (unsigned long) pchan);
+
+ ret = kgipc_endpoint_start_with_irq_callback(dev->endpoint,
+ ghs_irq_handler,
+ pchan);
+ if (ret) {
+ pr_err("irq alloc failed id: %d %s, ret: %d\n",
+ ghs_vmm_plugin_info.curr, name, ret);
+ goto err;
+ } else
+ pr_debug("ep irq handler started for %d %s, ret %d\n",
+ ghs_vmm_plugin_info.curr, name, ret);
+ /* this value could be more than devp total */
+ ghs_vmm_plugin_info.curr++;
+ return 0;
+err:
+ hab_pchan_put(pchan);
+ kfree(dev);
+ return ret;
+}
+
+int habhyp_commdev_dealloc(void *commdev)
+{
+ struct physical_channel *pchan = (struct physical_channel *)commdev;
+ struct ghs_vdev *dev = pchan->hyp_data;
+
+ kgipc_endpoint_free(dev->endpoint);
+
+ spin_lock_destroy(&dev->io_lock);
+
+ kfree(dev->read_data);
+ kfree(dev);
+
+ if (get_refcnt(pchan->refcount) > 1) {
+ pr_warn("potential leak pchan %s vchans %d refcnt %d\n",
+ pchan->name, pchan->vcnt, get_refcnt(pchan->refcount));
+ }
+ hab_pchan_put(pchan);
+ return 0;
+}
+
+void hab_hypervisor_unregister(void)
+{
+ pr_debug("total %d\n", hab_driver.ndevices);
+
+ hab_hypervisor_unregister_common();
+
+ ghs_vmm_plugin_info.curr = 0;
+}
+
+int hab_hypervisor_register(void)
+{
+ int ret = 0;
+
+ hab_driver.b_server_dom = 0;
+
+ return ret;
+}
diff --git a/drivers/soc/qcom/hab/hab_ghs.h b/drivers/soc/qcom/hab/hab_ghs.h
new file mode 100644
index 000000000000..54812480ebaa
--- /dev/null
+++ b/drivers/soc/qcom/hab/hab_ghs.h
@@ -0,0 +1,30 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __HAB_GHS_H
+#define __HAB_GHS_H
+
+#include <ghs_vmm/kgipc.h>
+#define GIPC_RECV_BUFF_SIZE_BYTES (32*1024)
+
+struct ghs_vdev {
+ int be;
+ void *read_data; /* buffer to receive from gipc */
+ size_t read_size;
+ int read_offset;
+ GIPC_Endpoint endpoint;
+ spinlock_t io_lock;
+ char name[32];
+ struct tasklet_struct task;
+};
+#endif /* __HAB_GHS_H */
diff --git a/drivers/soc/qcom/hab/hab_mem_linux.c b/drivers/soc/qcom/hab/hab_mem_linux.c
index ecc3f52a6662..74ee88a037af 100644
--- a/drivers/soc/qcom/hab/hab_mem_linux.c
+++ b/drivers/soc/qcom/hab/hab_mem_linux.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -29,6 +29,9 @@ struct pages_list {
uint32_t userflags;
struct file *filp_owner;
struct file *filp_mapper;
+ struct dma_buf *dmabuf;
+ int32_t export_id;
+ int32_t vcid;
};
struct importer_context {
@@ -58,7 +61,7 @@ static int match_file(const void *p, struct file *file, unsigned int fd)
}
-static int habmem_get_dma_pages(unsigned long address,
+static int habmem_get_dma_pages_from_va(unsigned long address,
int page_count,
struct page **pages)
{
@@ -79,8 +82,6 @@ static int habmem_get_dma_pages(unsigned long address,
goto err;
}
- pr_debug("vma flags %lx\n", vma->vm_flags);
-
/* Look for the fd that matches this the vma file */
fd = iterate_fd(current->files, 0, match_file, vma->vm_file);
if (fd == 0) {
@@ -108,7 +109,6 @@ static int habmem_get_dma_pages(unsigned long address,
for_each_sg(sg_table->sgl, s, sg_table->nents, i) {
page = sg_page(s);
- pr_debug("sgl length %d\n", s->length);
for (j = page_offset; j < (s->length >> PAGE_SHIFT); j++) {
pages[rc] = nth_page(page, j);
@@ -142,6 +142,56 @@ err:
return rc;
}
+static int habmem_get_dma_pages_from_fd(int32_t fd,
+ int page_count,
+ struct page **pages)
+{
+ struct dma_buf *dmabuf = NULL;
+ struct scatterlist *s;
+ struct sg_table *sg_table = NULL;
+ struct dma_buf_attachment *attach = NULL;
+ struct page *page;
+ int i, j, rc = 0;
+
+ dmabuf = dma_buf_get(fd);
+ if (IS_ERR(dmabuf))
+ return PTR_ERR(dmabuf);
+
+ attach = dma_buf_attach(dmabuf, hab_driver.dev);
+ if (IS_ERR_OR_NULL(attach)) {
+ pr_err("dma_buf_attach failed\n");
+ goto err;
+ }
+
+ sg_table = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
+
+ if (IS_ERR_OR_NULL(sg_table)) {
+ pr_err("dma_buf_map_attachment failed\n");
+ goto err;
+ }
+
+ for_each_sg(sg_table->sgl, s, sg_table->nents, i) {
+ page = sg_page(s);
+ pr_debug("sgl length %d\n", s->length);
+
+ for (j = 0; j < (s->length >> PAGE_SHIFT); j++) {
+ pages[rc] = nth_page(page, j);
+ rc++;
+ if (WARN_ON(rc >= page_count))
+ break;
+ }
+ }
+
+err:
+ if (!IS_ERR_OR_NULL(sg_table))
+ dma_buf_unmap_attachment(attach, sg_table, DMA_TO_DEVICE);
+ if (!IS_ERR_OR_NULL(attach))
+ dma_buf_detach(dmabuf, attach);
+ if (!IS_ERR_OR_NULL(dmabuf))
+ dma_buf_put(dmabuf);
+ return rc;
+}
+
/*
* exporter - grant & revoke
* degenerate sharabled page list based on CPU friendly virtual "address".
@@ -152,7 +202,9 @@ int habmem_hyp_grant_user(unsigned long address,
int page_count,
int flags,
int remotedom,
- void *ppdata)
+ void *ppdata,
+ int *compressed,
+ int *compressed_size)
{
int i, ret = 0;
struct grantable *item = (struct grantable *)ppdata;
@@ -165,7 +217,11 @@ int habmem_hyp_grant_user(unsigned long address,
down_read(&current->mm->mmap_sem);
if (HABMM_EXP_MEM_TYPE_DMA & flags) {
- ret = habmem_get_dma_pages(address,
+ ret = habmem_get_dma_pages_from_va(address,
+ page_count,
+ pages);
+ } else if (HABMM_EXPIMP_FLAGS_FD & flags) {
+ ret = habmem_get_dma_pages_from_fd(address,
page_count,
pages);
} else {
@@ -182,7 +238,8 @@ int habmem_hyp_grant_user(unsigned long address,
for (i = 0; i < page_count; i++)
item[i].pfn = page_to_pfn(pages[i]);
} else {
- pr_err("get %d user pages failed: %d\n", page_count, ret);
+ pr_err("get %d user pages failed %d flags %d\n",
+ page_count, ret, flags);
}
vfree(pages);
@@ -199,7 +256,9 @@ int habmem_hyp_grant(unsigned long address,
int page_count,
int flags,
int remotedom,
- void *ppdata)
+ void *ppdata,
+ int *compressed,
+ int *compressed_size)
{
int i;
struct grantable *item;
@@ -253,43 +312,169 @@ void habmem_imp_hyp_close(void *imp_ctx, int kernel)
list_del(&pglist->list);
priv->cnt--;
- vfree(pglist->pages);
+ kfree(pglist->pages);
kfree(pglist);
}
kfree(priv);
}
-/*
- * setup pages, be ready for the following mmap call
- * index is output to refer to this imported buffer described by the import data
- */
-long habmem_imp_hyp_map(void *imp_ctx,
- void *impdata,
- uint32_t count,
- uint32_t remotedom,
- uint64_t *index,
- void **pkva,
- int kernel,
- uint32_t userflags)
+static struct sg_table *hab_mem_map_dma_buf(
+ struct dma_buf_attachment *attachment,
+ enum dma_data_direction direction)
+{
+ struct dma_buf *dmabuf = attachment->dmabuf;
+ struct pages_list *pglist = dmabuf->priv;
+ struct sg_table *sgt;
+ struct scatterlist *sg;
+ int i;
+ int ret = 0;
+ struct page **pages = pglist->pages;
+
+ sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!sgt)
+ return ERR_PTR(-ENOMEM);
+
+ ret = sg_alloc_table(sgt, pglist->npages, GFP_KERNEL);
+ if (ret) {
+ kfree(sgt);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ for_each_sg(sgt->sgl, sg, pglist->npages, i) {
+ sg_set_page(sg, pages[i], PAGE_SIZE, 0);
+ }
+
+ return sgt;
+}
+
+
+static void hab_mem_unmap_dma_buf(struct dma_buf_attachment *attachment,
+ struct sg_table *sgt,
+ enum dma_data_direction direction)
+{
+ sg_free_table(sgt);
+ kfree(sgt);
+}
+
+static int hab_map_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct page *page;
+ struct pages_list *pglist;
+
+ unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+
+ /* PHY address */
+ unsigned long fault_offset =
+ (unsigned long)vmf->virtual_address - vma->vm_start + offset;
+ unsigned long fault_index = fault_offset>>PAGE_SHIFT;
+ int page_idx;
+
+ if (vma == NULL)
+ return VM_FAULT_SIGBUS;
+
+ pglist = vma->vm_private_data;
+
+ page_idx = fault_index - pglist->index;
+ if (page_idx < 0 || page_idx >= pglist->npages) {
+ pr_err("Out of page array! page_idx %d, pg cnt %ld",
+ page_idx, pglist->npages);
+ return VM_FAULT_SIGBUS;
+ }
+
+ page = pglist->pages[page_idx];
+ get_page(page);
+ vmf->page = page;
+ return 0;
+}
+
+static void hab_map_open(struct vm_area_struct *vma)
+{
+}
+
+static void hab_map_close(struct vm_area_struct *vma)
+{
+}
+
+static const struct vm_operations_struct habmem_vm_ops = {
+ .fault = hab_map_fault,
+ .open = hab_map_open,
+ .close = hab_map_close,
+};
+
+static int hab_mem_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
+{
+ struct pages_list *pglist = dmabuf->priv;
+ uint32_t obj_size = pglist->npages << PAGE_SHIFT;
+
+ if (vma == NULL)
+ return VM_FAULT_SIGBUS;
+
+ /* Check for valid size. */
+ if (obj_size < vma->vm_end - vma->vm_start)
+ return -EINVAL;
+
+ vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_ops = &habmem_vm_ops;
+ vma->vm_private_data = pglist;
+ vma->vm_flags |= VM_MIXEDMAP;
+
+ return 0;
+}
+
+static void hab_mem_dma_buf_release(struct dma_buf *dmabuf)
+{
+}
+
+static void *hab_mem_dma_buf_kmap(struct dma_buf *dmabuf,
+ unsigned long offset)
+{
+ return NULL;
+}
+
+static void hab_mem_dma_buf_kunmap(struct dma_buf *dmabuf,
+ unsigned long offset,
+ void *ptr)
+{
+}
+
+static struct dma_buf_ops dma_buf_ops = {
+ .map_dma_buf = hab_mem_map_dma_buf,
+ .unmap_dma_buf = hab_mem_unmap_dma_buf,
+ .mmap = hab_mem_mmap,
+ .release = hab_mem_dma_buf_release,
+ .kmap_atomic = hab_mem_dma_buf_kmap,
+ .kunmap_atomic = hab_mem_dma_buf_kunmap,
+ .kmap = hab_mem_dma_buf_kmap,
+ .kunmap = hab_mem_dma_buf_kunmap,
+};
+
+static int habmem_imp_hyp_map_fd(void *imp_ctx,
+ struct export_desc *exp,
+ uint32_t userflags,
+ int32_t *pfd)
{
struct page **pages;
- struct compressed_pfns *pfn_table = (struct compressed_pfns *)impdata;
+ struct compressed_pfns *pfn_table =
+ (struct compressed_pfns *)exp->payload;
struct pages_list *pglist;
struct importer_context *priv = imp_ctx;
unsigned long pfn;
int i, j, k = 0;
+ pgprot_t prot = PAGE_KERNEL;
+ int32_t fd, size;
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
if (!pfn_table || !priv)
return -EINVAL;
-
- pages = vmalloc(count * sizeof(struct page *));
+ size = exp->payload_count * sizeof(struct page *);
+ pages = kmalloc(size, GFP_KERNEL);
if (!pages)
return -ENOMEM;
pglist = kzalloc(sizeof(*pglist), GFP_KERNEL);
if (!pglist) {
- vfree(pages);
+ kfree(pages);
return -ENOMEM;
}
@@ -303,145 +488,228 @@ long habmem_imp_hyp_map(void *imp_ctx,
}
pglist->pages = pages;
- pglist->npages = count;
- pglist->kernel = kernel;
- pglist->index = page_to_phys(pages[0]) >> PAGE_SHIFT;
+ pglist->npages = exp->payload_count;
+ pglist->kernel = 0;
+ pglist->index = 0;
pglist->refcntk = pglist->refcntu = 0;
pglist->userflags = userflags;
+ pglist->export_id = exp->export_id;
+ pglist->vcid = exp->vcid_remote;
+
+ if (!(userflags & HABMM_IMPORT_FLAGS_CACHED))
+ prot = pgprot_writecombine(prot);
+
+ exp_info.ops = &dma_buf_ops;
+ exp_info.size = exp->payload_count << PAGE_SHIFT;
+ exp_info.flags = O_RDWR;
+ exp_info.priv = pglist;
+ pglist->dmabuf = dma_buf_export(&exp_info);
+ if (IS_ERR(pglist->dmabuf)) {
+ kfree(pages);
+ kfree(pglist);
+ return PTR_ERR(pglist->dmabuf);
+ }
- *index = pglist->index << PAGE_SHIFT;
-
- if (kernel) {
- pgprot_t prot = PAGE_KERNEL;
-
- if (!(userflags & HABMM_IMPORT_FLAGS_CACHED))
- prot = pgprot_writecombine(prot);
-
- pglist->kva = vmap(pglist->pages, pglist->npages, VM_MAP, prot);
- if (pglist->kva == NULL) {
- vfree(pages);
- kfree(pglist);
- pr_err("%ld pages vmap failed\n", pglist->npages);
- return -ENOMEM;
- } else {
- pr_debug("%ld pages vmap pass, return %pK\n",
- pglist->npages, pglist->kva);
- }
-
- pglist->uva = NULL;
- pglist->refcntk++;
- *pkva = pglist->kva;
- *index = (uint64_t)((uintptr_t)pglist->kva);
- } else {
- pglist->kva = NULL;
+ fd = dma_buf_fd(pglist->dmabuf, O_CLOEXEC);
+ if (fd < 0) {
+ dma_buf_put(pglist->dmabuf);
+ kfree(pages);
+ kfree(pglist);
+ return -EINVAL;
}
+ pglist->refcntk++;
+
write_lock(&priv->implist_lock);
list_add_tail(&pglist->list, &priv->imp_list);
priv->cnt++;
write_unlock(&priv->implist_lock);
- pr_debug("index returned %llx\n", *index);
+
+ *pfd = fd;
return 0;
}
-/* the input index is PHY address shifted for uhab, and kva for khab */
-long habmm_imp_hyp_unmap(void *imp_ctx,
- uint64_t index,
- uint32_t count,
- int kernel)
+static int habmem_imp_hyp_map_kva(void *imp_ctx,
+ struct export_desc *exp,
+ uint32_t userflags,
+ void **pkva)
{
+ struct page **pages;
+ struct compressed_pfns *pfn_table =
+ (struct compressed_pfns *)exp->payload;
+ struct pages_list *pglist;
struct importer_context *priv = imp_ctx;
- struct pages_list *pglist, *tmp;
- int found = 0;
- uint64_t pg_index = index >> PAGE_SHIFT;
-
- write_lock(&priv->implist_lock);
- list_for_each_entry_safe(pglist, tmp, &priv->imp_list, list) {
- pr_debug("node pglist %pK, kernel %d, pg_index %llx\n",
- pglist, pglist->kernel, pg_index);
+ unsigned long pfn;
+ int i, j, k = 0, size;
+ pgprot_t prot = PAGE_KERNEL;
- if (kernel) {
- if (pglist->kva == (void *)((uintptr_t)index))
- found = 1;
- } else {
- if (pglist->index == pg_index)
- found = 1;
- }
+ if (!pfn_table || !priv)
+ return -EINVAL;
+ size = exp->payload_count * sizeof(struct page *);
+ pages = kmalloc(size, GFP_KERNEL);
+ if (!pages)
+ return -ENOMEM;
+ pglist = kzalloc(sizeof(*pglist), GFP_KERNEL);
+ if (!pglist) {
+ kfree(pages);
+ return -ENOMEM;
+ }
- if (found) {
- list_del(&pglist->list);
- priv->cnt--;
- break;
+ pfn = pfn_table->first_pfn;
+ for (i = 0; i < pfn_table->nregions; i++) {
+ for (j = 0; j < pfn_table->region[i].size; j++) {
+ pages[k] = pfn_to_page(pfn+j);
+ k++;
}
+ pfn += pfn_table->region[i].size + pfn_table->region[i].space;
}
- write_unlock(&priv->implist_lock);
- if (!found) {
- pr_err("failed to find export id on index %llx\n", index);
- return -EINVAL;
+ pglist->pages = pages;
+ pglist->npages = exp->payload_count;
+ pglist->kernel = 1;
+ pglist->refcntk = pglist->refcntu = 0;
+ pglist->userflags = userflags;
+ pglist->export_id = exp->export_id;
+ pglist->vcid = exp->vcid_remote;
+
+ if (!(userflags & HABMM_IMPORT_FLAGS_CACHED))
+ prot = pgprot_writecombine(prot);
+
+ pglist->kva = vmap(pglist->pages, pglist->npages, VM_MAP, prot);
+ if (pglist->kva == NULL) {
+ kfree(pages);
+ kfree(pglist);
+ pr_err("%ld pages vmap failed\n", pglist->npages);
+ return -ENOMEM;
}
- pr_debug("detach pglist %pK, index %llx, kernel %d, list cnt %d\n",
- pglist, pglist->index, pglist->kernel, priv->cnt);
+ pr_debug("%ld pages vmap pass, return %p\n",
+ pglist->npages, pglist->kva);
- if (kernel)
- if (pglist->kva)
- vunmap(pglist->kva);
+ pglist->refcntk++;
- vfree(pglist->pages);
- kfree(pglist);
+ write_lock(&priv->implist_lock);
+ list_add_tail(&pglist->list, &priv->imp_list);
+ priv->cnt++;
+ write_unlock(&priv->implist_lock);
+
+ *pkva = pglist->kva;
return 0;
}
-static int hab_map_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int habmem_imp_hyp_map_uva(void *imp_ctx,
+ struct export_desc *exp,
+ uint32_t userflags,
+ uint64_t *index)
{
- struct page *page;
+ struct page **pages;
+ struct compressed_pfns *pfn_table =
+ (struct compressed_pfns *)exp->payload;
struct pages_list *pglist;
+ struct importer_context *priv = imp_ctx;
+ unsigned long pfn;
+ int i, j, k = 0, size;
- unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+ if (!pfn_table || !priv)
+ return -EINVAL;
+ size = exp->payload_count * sizeof(struct page *);
+ pages = kmalloc(size, GFP_KERNEL);
+ if (!pages)
+ return -ENOMEM;
- /* PHY address */
- unsigned long fault_offset =
- (unsigned long)vmf->virtual_address - vma->vm_start + offset;
- unsigned long fault_index = fault_offset>>PAGE_SHIFT;
- int page_idx;
+ pglist = kzalloc(sizeof(*pglist), GFP_KERNEL);
+ if (!pglist) {
+ kfree(pages);
+ return -ENOMEM;
+ }
- if (vma == NULL)
- return VM_FAULT_SIGBUS;
+ pfn = pfn_table->first_pfn;
+ for (i = 0; i < pfn_table->nregions; i++) {
+ for (j = 0; j < pfn_table->region[i].size; j++) {
+ pages[k] = pfn_to_page(pfn+j);
+ k++;
+ }
+ pfn += pfn_table->region[i].size + pfn_table->region[i].space;
+ }
- pglist = vma->vm_private_data;
+ pglist->pages = pages;
+ pglist->npages = exp->payload_count;
+ pglist->index = page_to_phys(pages[0]) >> PAGE_SHIFT;
+ pglist->refcntk = pglist->refcntu = 0;
+ pglist->userflags = userflags;
+ pglist->export_id = exp->export_id;
+ pglist->vcid = exp->vcid_remote;
- page_idx = fault_index - pglist->index;
- if (page_idx < 0 || page_idx >= pglist->npages) {
- pr_err("Out of page array. page_idx %d, pg cnt %ld",
- page_idx, pglist->npages);
- return VM_FAULT_SIGBUS;
- }
+ write_lock(&priv->implist_lock);
+ list_add_tail(&pglist->list, &priv->imp_list);
+ priv->cnt++;
+ write_unlock(&priv->implist_lock);
- pr_debug("Fault page index %d\n", page_idx);
+ *index = pglist->index << PAGE_SHIFT;
- page = pglist->pages[page_idx];
- get_page(page);
- vmf->page = page;
return 0;
}
-static void hab_map_open(struct vm_area_struct *vma)
+int habmem_imp_hyp_map(void *imp_ctx, struct hab_import *param,
+ struct export_desc *exp, int kernel)
{
+ int ret = 0;
+
+ if (kernel)
+ ret = habmem_imp_hyp_map_kva(imp_ctx, exp,
+ param->flags,
+ (void **)&param->kva);
+ else if (param->flags & HABMM_EXPIMP_FLAGS_FD)
+ ret = habmem_imp_hyp_map_fd(imp_ctx, exp,
+ param->flags,
+ (int32_t *)&param->kva);
+ else
+ ret = habmem_imp_hyp_map_uva(imp_ctx, exp,
+ param->flags,
+ &param->index);
+
+ return ret;
}
-static void hab_map_close(struct vm_area_struct *vma)
+int habmm_imp_hyp_unmap(void *imp_ctx, struct export_desc *exp, int kernel)
{
-}
+ struct importer_context *priv = imp_ctx;
+ struct pages_list *pglist, *tmp;
+ int found = 0;
-static const struct vm_operations_struct habmem_vm_ops = {
+ write_lock(&priv->implist_lock);
+ list_for_each_entry_safe(pglist, tmp, &priv->imp_list, list) {
+ if (pglist->export_id == exp->export_id &&
+ pglist->vcid == exp->vcid_remote) {
+ found = 1;
+ list_del(&pglist->list);
+ priv->cnt--;
+ break;
+ }
+ }
+ write_unlock(&priv->implist_lock);
- .fault = hab_map_fault,
- .open = hab_map_open,
- .close = hab_map_close,
-};
+ if (!found) {
+ pr_err("failed to find export id %u\n", exp->export_id);
+ return -EINVAL;
+ }
+
+ pr_debug("detach pglist %p, kernel %d, list cnt %d\n",
+ pglist, pglist->kernel, priv->cnt);
+
+ if (pglist->kva)
+ vunmap(pglist->kva);
+
+ if (pglist->dmabuf)
+ dma_buf_put(pglist->dmabuf);
+
+ kfree(pglist->pages);
+ kfree(pglist);
+
+ return 0;
+}
int habmem_imp_hyp_mmap(struct file *filp, struct vm_area_struct *vma)
{
@@ -451,9 +719,6 @@ int habmem_imp_hyp_mmap(struct file *filp, struct vm_area_struct *vma)
struct pages_list *pglist;
int bfound = 0;
- pr_debug("mmap request start %lX, len %ld, index %lX\n",
- vma->vm_start, length, vma->vm_pgoff);
-
read_lock(&imp_ctx->implist_lock);
list_for_each_entry(pglist, &imp_ctx->imp_list, list) {
if (pglist->index == vma->vm_pgoff) {
diff --git a/drivers/soc/qcom/hab/hab_mimex.c b/drivers/soc/qcom/hab/hab_mimex.c
index 67601590908e..86d763f65657 100644
--- a/drivers/soc/qcom/hab/hab_mimex.c
+++ b/drivers/soc/qcom/hab/hab_mimex.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -28,7 +28,7 @@
static int hab_export_ack_find(struct uhab_context *ctx,
- struct hab_export_ack *expect_ack)
+ struct hab_export_ack *expect_ack, struct virtual_channel *vchan)
{
int ret = 0;
struct hab_export_ack_recvd *ack_recvd, *tmp;
@@ -36,9 +36,10 @@ static int hab_export_ack_find(struct uhab_context *ctx,
spin_lock_bh(&ctx->expq_lock);
list_for_each_entry_safe(ack_recvd, tmp, &ctx->exp_rxq, node) {
- if (ack_recvd->ack.export_id == expect_ack->export_id &&
+ if ((ack_recvd->ack.export_id == expect_ack->export_id &&
ack_recvd->ack.vcid_local == expect_ack->vcid_local &&
- ack_recvd->ack.vcid_remote == expect_ack->vcid_remote) {
+ ack_recvd->ack.vcid_remote == expect_ack->vcid_remote)
+ || vchan->otherend_closed) {
list_del(&ack_recvd->node);
kfree(ack_recvd);
ret = 1;
@@ -57,15 +58,17 @@ static int hab_export_ack_find(struct uhab_context *ctx,
}
static int hab_export_ack_wait(struct uhab_context *ctx,
- struct hab_export_ack *expect_ack)
+ struct hab_export_ack *expect_ack, struct virtual_channel *vchan)
{
int ret;
ret = wait_event_interruptible_timeout(ctx->exp_wq,
- hab_export_ack_find(ctx, expect_ack),
- HZ);
+ hab_export_ack_find(ctx, expect_ack, vchan),
+ HAB_HS_TIMEOUT);
if (!ret || (ret == -ERESTARTSYS))
ret = -EAGAIN;
+ else if (vchan->otherend_closed)
+ ret = -ENODEV;
else if (ret > 0)
ret = 0;
return ret;
@@ -86,7 +89,7 @@ static struct export_desc *habmem_add_export(struct virtual_channel *vchan,
if (!vchan || !sizebytes)
return NULL;
- exp = vmalloc(sizebytes);
+ exp = kzalloc(sizebytes, GFP_KERNEL);
if (!exp)
return NULL;
@@ -103,6 +106,8 @@ static struct export_desc *habmem_add_export(struct virtual_channel *vchan,
exp->vcid_remote = vchan->otherend_id;
exp->domid_local = -1; /* dom id, provided on the importer */
exp->domid_remote = vchan->pchan->dom_id;
+ exp->ctx = vchan->ctx;
+ exp->pchan = vchan->pchan;
ctx = vchan->ctx;
write_lock(&ctx->exp_lock);
@@ -118,19 +123,22 @@ void habmem_remove_export(struct export_desc *exp)
struct physical_channel *pchan;
struct uhab_context *ctx;
- if (!exp || !exp->vchan || !exp->vchan->ctx || !exp->vchan->pchan)
+ if (!exp || !exp->ctx || !exp->pchan) {
+ pr_err("failed to find valid info in exp %pK ctx %pK pchan %pK\n",
+ exp, exp->ctx, exp->pchan);
return;
+ }
- ctx = exp->vchan->ctx;
+ ctx = exp->ctx;
ctx->export_total--;
- pchan = exp->vchan->pchan;
+ pchan = exp->pchan;
spin_lock(&pchan->expid_lock);
idr_remove(&pchan->expid_idr, exp->export_id);
spin_unlock(&pchan->expid_lock);
- vfree(exp);
+ kfree(exp);
}
static int compress_pfns(void **pfns, int npages, unsigned int *data_size)
@@ -148,7 +156,7 @@ static int compress_pfns(void **pfns, int npages, unsigned int *data_size)
new_table->first_pfn = item[0].pfn;
for (i = 1; i < npages; i++) {
if (item[i].pfn-1 == item[i-1].pfn) {
- region_size++;
+ region_size++; /* continuous pfn */
} else {
new_table->region[j].size = region_size;
new_table->region[j].space = item[i].pfn -
@@ -208,7 +216,12 @@ static int habmem_export_vchan(struct uhab_context *ctx,
expected_ack.export_id = exp->export_id;
expected_ack.vcid_local = exp->vcid_local;
expected_ack.vcid_remote = exp->vcid_remote;
- ret = hab_export_ack_wait(ctx, &expected_ack);
+ ret = hab_export_ack_wait(ctx, &expected_ack, vchan);
+ if (ret != 0) {
+ pr_err("failed to receive remote export ack %d on vc %x\n",
+ ret, vchan->id);
+ return ret;
+ }
*export_id = exp->export_id;
@@ -225,12 +238,11 @@ int hab_mem_export(struct uhab_context *ctx,
uint32_t export_id = 0;
struct virtual_channel *vchan;
int page_count;
+ int compressed = 0;
- if (!ctx || !param || param->sizebytes > HAB_MAX_EXPORT_SIZE)
+ if (!ctx || !param)
return -EINVAL;
- pr_debug("vc %X, mem size %d\n", param->vcid, param->sizebytes);
-
vchan = hab_get_vchan_fromvcid(param->vcid, ctx);
if (!vchan || !vchan->pchan) {
ret = -ENODEV;
@@ -249,13 +261,17 @@ int hab_mem_export(struct uhab_context *ctx,
page_count,
param->flags,
vchan->pchan->dom_id,
- pdata_exp);
+ pdata_exp,
+ &compressed,
+ &pdata_size);
} else {
ret = habmem_hyp_grant_user((unsigned long)param->buffer,
page_count,
param->flags,
vchan->pchan->dom_id,
- pdata_exp);
+ pdata_exp,
+ &compressed,
+ &pdata_size);
}
if (ret < 0) {
pr_err("habmem_hyp_grant failed size=%d ret=%d\n",
@@ -263,7 +279,8 @@ int hab_mem_export(struct uhab_context *ctx,
goto err;
}
- compress_pfns(&pdata_exp, page_count, &pdata_size);
+ if (!compressed)
+ compress_pfns(&pdata_exp, page_count, &pdata_size);
ret = habmem_export_vchan(ctx,
vchan,
@@ -287,14 +304,23 @@ int hab_mem_unexport(struct uhab_context *ctx,
{
int ret = 0, found = 0;
struct export_desc *exp, *tmp;
+ struct virtual_channel *vchan;
if (!ctx || !param)
return -EINVAL;
+ /* refcnt on the access */
+ vchan = hab_get_vchan_fromvcid(param->vcid, ctx);
+ if (!vchan || !vchan->pchan) {
+ ret = -ENODEV;
+ goto err_novchan;
+ }
+
write_lock(&ctx->exp_lock);
list_for_each_entry_safe(exp, tmp, &ctx->exp_whse, node) {
- if ((param->exportid == exp->export_id) &&
- (param->vcid == exp->vcid_local)) {
+ if (param->exportid == exp->export_id &&
+ param->vcid == exp->vcid_local) {
+ /* same vchan guarantees the pchan for idr */
list_del(&exp->node);
found = 1;
break;
@@ -302,15 +328,22 @@ int hab_mem_unexport(struct uhab_context *ctx,
}
write_unlock(&ctx->exp_lock);
- if (!found)
- return -EINVAL;
+ if (!found) {
+ ret = -EINVAL;
+ goto err_novchan;
+ }
ret = habmem_hyp_revoke(exp->payload, exp->payload_count);
if (ret) {
pr_err("Error found in revoke grant with ret %d", ret);
- return ret;
+ goto err_novchan;
}
habmem_remove_export(exp);
+
+err_novchan:
+ if (vchan)
+ hab_vchan_put(vchan);
+
return ret;
}
@@ -320,14 +353,24 @@ int hab_mem_import(struct uhab_context *ctx,
{
int ret = 0, found = 0;
struct export_desc *exp = NULL;
+ struct virtual_channel *vchan;
if (!ctx || !param)
return -EINVAL;
+ vchan = hab_get_vchan_fromvcid(param->vcid, ctx);
+ if (!vchan || !vchan->pchan) {
+ ret = -ENODEV;
+ goto err_imp;
+ }
+
spin_lock_bh(&ctx->imp_lock);
list_for_each_entry(exp, &ctx->imp_whse, node) {
if ((exp->export_id == param->exportid) &&
(param->vcid == exp->vcid_remote)) {
+ /* only allow import on the vchan recevied from
+ * remote
+ */
found = 1;
break;
}
@@ -338,32 +381,24 @@ int hab_mem_import(struct uhab_context *ctx,
pr_err("Fail to get export descriptor from export id %d\n",
param->exportid);
ret = -ENODEV;
- return ret;
+ goto err_imp;
}
- pr_debug("call map id: %d pcnt %d remote_dom %d 1st_ref:0x%X\n",
- exp->export_id, exp->payload_count, exp->domid_local,
- *((uint32_t *)exp->payload));
-
- ret = habmem_imp_hyp_map(ctx->import_ctx,
- exp->payload,
- exp->payload_count,
- exp->domid_local,
- &exp->import_index,
- &exp->kva,
- kernel,
- param->flags);
+ ret = habmem_imp_hyp_map(ctx->import_ctx, param, exp, kernel);
+
if (ret) {
pr_err("Import fail ret:%d pcnt:%d rem:%d 1st_ref:0x%X\n",
ret, exp->payload_count,
exp->domid_local, *((uint32_t *)exp->payload));
- return ret;
+ goto err_imp;
}
- pr_debug("import index %llx, kva %llx, kernel %d\n",
- exp->import_index, param->kva, kernel);
- param->index = exp->import_index;
- param->kva = (uint64_t)exp->kva;
+ exp->import_index = param->index;
+ exp->kva = kernel ? (void *)param->kva : NULL;
+
+err_imp:
+ if (vchan)
+ hab_vchan_put(vchan);
return ret;
}
@@ -374,20 +409,26 @@ int hab_mem_unimport(struct uhab_context *ctx,
{
int ret = 0, found = 0;
struct export_desc *exp = NULL, *exp_tmp;
+ struct virtual_channel *vchan;
if (!ctx || !param)
return -EINVAL;
+ vchan = hab_get_vchan_fromvcid(param->vcid, ctx);
+ if (!vchan || !vchan->pchan) {
+ if (vchan)
+ hab_vchan_put(vchan);
+ return -ENODEV;
+ }
+
spin_lock_bh(&ctx->imp_lock);
list_for_each_entry_safe(exp, exp_tmp, &ctx->imp_whse, node) {
- if ((exp->export_id == param->exportid) &&
- (param->vcid == exp->vcid_remote)) {
+ if (exp->export_id == param->exportid &&
+ param->vcid == exp->vcid_remote) {
+ /* same vchan is expected here */
list_del(&exp->node);
ctx->import_total--;
found = 1;
-
- pr_debug("found id:%d payload cnt:%d kernel:%d\n",
- exp->export_id, exp->payload_count, kernel);
break;
}
}
@@ -396,17 +437,17 @@ int hab_mem_unimport(struct uhab_context *ctx,
if (!found)
ret = -EINVAL;
else {
- ret = habmm_imp_hyp_unmap(ctx->import_ctx,
- exp->import_index,
- exp->payload_count,
- kernel);
+ ret = habmm_imp_hyp_unmap(ctx->import_ctx, exp, kernel);
if (ret) {
- pr_err("unmap fail id:%d pcnt:%d kernel:%d\n",
- exp->export_id, exp->payload_count, kernel);
+ pr_err("unmap fail id:%d pcnt:%d vcid:%d\n",
+ exp->export_id, exp->payload_count, exp->vcid_remote);
}
param->kva = (uint64_t)exp->kva;
kfree(exp);
}
+ if (vchan)
+ hab_vchan_put(vchan);
+
return ret;
}
diff --git a/drivers/soc/qcom/hab/hab_msg.c b/drivers/soc/qcom/hab/hab_msg.c
index 700239a25652..9d5ee134c94e 100644
--- a/drivers/soc/qcom/hab/hab_msg.c
+++ b/drivers/soc/qcom/hab/hab_msg.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -42,29 +42,54 @@ void hab_msg_free(struct hab_message *message)
kfree(message);
}
-struct hab_message *
-hab_msg_dequeue(struct virtual_channel *vchan, int wait_flag)
+int
+hab_msg_dequeue(struct virtual_channel *vchan, struct hab_message **msg,
+ int *rsize, unsigned int flags)
{
struct hab_message *message = NULL;
int ret = 0;
-
- if (wait_flag) {
- if (hab_rx_queue_empty(vchan))
- ret = wait_event_interruptible(vchan->rx_queue,
- !hab_rx_queue_empty(vchan) ||
- vchan->otherend_closed);
+ int wait = !(flags & HABMM_SOCKET_RECV_FLAGS_NON_BLOCKING);
+ int interruptible = !(flags & HABMM_SOCKET_RECV_FLAGS_UNINTERRUPTIBLE);
+
+ if (wait) {
+ if (hab_rx_queue_empty(vchan)) {
+ if (interruptible)
+ ret = wait_event_interruptible(vchan->rx_queue,
+ !hab_rx_queue_empty(vchan) ||
+ vchan->otherend_closed);
+ else
+ wait_event(vchan->rx_queue,
+ !hab_rx_queue_empty(vchan) ||
+ vchan->otherend_closed);
+ }
}
/* return all the received messages before the remote close */
- if (!ret && !hab_rx_queue_empty(vchan)) {
+ if ((!ret || (ret == -ERESTARTSYS)) && !hab_rx_queue_empty(vchan)) {
spin_lock_bh(&vchan->rx_lock);
message = list_first_entry(&vchan->rx_list,
struct hab_message, node);
- list_del(&message->node);
+ if (message) {
+ if (*rsize >= message->sizebytes) {
+ /* msg can be safely retrieved in full */
+ list_del(&message->node);
+ ret = 0;
+ *rsize = message->sizebytes;
+ } else {
+ pr_err("rcv buffer too small %d < %zd\n",
+ *rsize, message->sizebytes);
+ *rsize = message->sizebytes;
+ message = NULL;
+ ret = -EOVERFLOW; /* come back again */
+ }
+ }
spin_unlock_bh(&vchan->rx_lock);
- }
+ } else
+ /* no message received, retain the original status */
+ *rsize = 0;
- return message;
+ *msg = message;
+ return ret;
}
static void hab_msg_queue(struct virtual_channel *vchan,
@@ -74,7 +99,7 @@ static void hab_msg_queue(struct virtual_channel *vchan,
list_add_tail(&message->node, &vchan->rx_list);
spin_unlock_bh(&vchan->rx_lock);
- wake_up_interruptible(&vchan->rx_queue);
+ wake_up(&vchan->rx_queue);
}
static int hab_export_enqueue(struct virtual_channel *vchan,
@@ -118,7 +143,7 @@ static int hab_receive_create_export_ack(struct physical_channel *pchan,
return -ENOMEM;
if (sizeof(ack_recvd->ack) != sizebytes)
- pr_err("exp ack size %lu is not as arrived %zu\n",
+ pr_err("exp ack size %zu is not as arrived %zu\n",
sizeof(ack_recvd->ack), sizebytes);
if (physical_channel_read(pchan,
@@ -126,11 +151,6 @@ static int hab_receive_create_export_ack(struct physical_channel *pchan,
sizebytes) != sizebytes)
return -EIO;
- pr_debug("receive export id %d, local vc %X, vd remote %X\n",
- ack_recvd->ack.export_id,
- ack_recvd->ack.vcid_local,
- ack_recvd->ack.vcid_remote);
-
spin_lock_bh(&ctx->expq_lock);
list_add_tail(&ack_recvd->node, &ctx->exp_rxq);
spin_unlock_bh(&ctx->expq_lock);
@@ -138,10 +158,21 @@ static int hab_receive_create_export_ack(struct physical_channel *pchan,
return 0;
}
-void hab_msg_recv(struct physical_channel *pchan,
+static void hab_msg_drop(struct physical_channel *pchan, size_t sizebytes)
+{
+ uint8_t *data = NULL;
+
+ data = kmalloc(sizebytes, GFP_ATOMIC);
+ if (data == NULL)
+ return;
+ physical_channel_read(pchan, data, sizebytes);
+ kfree(data);
+}
+
+int hab_msg_recv(struct physical_channel *pchan,
struct hab_header *header)
{
- int ret;
+ int ret = 0;
struct hab_message *message;
struct hab_device *dev = pchan->habdev;
size_t sizebytes = HAB_HEADER_GET_SIZE(*header);
@@ -155,7 +186,8 @@ void hab_msg_recv(struct physical_channel *pchan,
/* get the local virtual channel if it isn't an open message */
if (payload_type != HAB_PAYLOAD_TYPE_INIT &&
payload_type != HAB_PAYLOAD_TYPE_INIT_ACK &&
- payload_type != HAB_PAYLOAD_TYPE_ACK) {
+ payload_type != HAB_PAYLOAD_TYPE_INIT_DONE &&
+ payload_type != HAB_PAYLOAD_TYPE_INIT_CANCEL) {
/* sanity check the received message */
if (payload_type >= HAB_PAYLOAD_TYPE_MAX ||
@@ -165,29 +197,42 @@ void hab_msg_recv(struct physical_channel *pchan,
payload_type, vchan_id, sizebytes, session_id);
}
+ /*
+ * need both vcid and session_id to be accurate.
+ * this is from pchan instead of ctx
+ */
vchan = hab_vchan_get(pchan, header);
if (!vchan) {
- pr_debug("vchan is not found, payload type %d, vchan id %x, sizebytes %zx, session %d\n",
+ pr_info("vchan is not found, payload type %d, vchan id %x, sizebytes %zx, session %d\n",
payload_type, vchan_id, sizebytes, session_id);
- if (sizebytes)
- pr_err("message is dropped\n");
-
- return;
+ if (sizebytes) {
+ hab_msg_drop(pchan, sizebytes);
+ pr_err("message %d dropped no vchan, session id %d\n",
+ payload_type, session_id);
+ }
+ return -EINVAL;
} else if (vchan->otherend_closed) {
hab_vchan_put(vchan);
- pr_debug("vchan remote is closed, payload type %d, vchan id %x, sizebytes %zx, session %d\n",
+ pr_info("vchan remote is closed payload type %d, vchan id %x, sizebytes %zx, session %d\n",
payload_type, vchan_id, sizebytes, session_id);
-
- if (sizebytes)
- pr_err("message is dropped\n");
-
- return;
+ if (sizebytes) {
+ hab_msg_drop(pchan, sizebytes);
+ pr_err("message %d dropped remote close, session id %d\n",
+ payload_type, session_id);
+ }
+ return -ENODEV;
}
} else {
if (sizebytes != sizeof(struct hab_open_send_data)) {
- pr_err("Invalid open request received, payload type %d, vchan id %x, sizebytes %zx, session %d\n",
+ pr_err("Invalid open request received type %d, vcid %x, szbytes %zx, session %d\n",
payload_type, vchan_id, sizebytes, session_id);
+ if (sizebytes) {
+ hab_msg_drop(pchan, sizebytes);
+ pr_err("message %d dropped unknown reason, session id %d\n",
+ payload_type, session_id);
+ }
+ return -ENODEV;
}
}
@@ -202,7 +247,7 @@ void hab_msg_recv(struct physical_channel *pchan,
case HAB_PAYLOAD_TYPE_INIT:
case HAB_PAYLOAD_TYPE_INIT_ACK:
- case HAB_PAYLOAD_TYPE_ACK:
+ case HAB_PAYLOAD_TYPE_INIT_DONE:
ret = hab_open_request_add(pchan, sizebytes, payload_type);
if (ret) {
pr_err("open request add failed, ret %d, payload type %d, sizebytes %zx\n",
@@ -212,6 +257,16 @@ void hab_msg_recv(struct physical_channel *pchan,
wake_up_interruptible(&dev->openq);
break;
+ case HAB_PAYLOAD_TYPE_INIT_CANCEL:
+ pr_info("remote open cancel header vcid %X session %d local %d remote %d\n",
+ vchan_id, session_id, pchan->vmid_local,
+ pchan->vmid_remote);
+ ret = hab_open_receive_cancel(pchan, sizebytes);
+ if (ret)
+ pr_err("open cancel handling failed ret %d vcid %X session %d\n",
+ ret, vchan_id, session_id);
+ break;
+
case HAB_PAYLOAD_TYPE_EXPORT:
exp_desc = kzalloc(sizebytes, GFP_ATOMIC);
if (!exp_desc)
@@ -219,7 +274,10 @@ void hab_msg_recv(struct physical_channel *pchan,
if (physical_channel_read(pchan, exp_desc, sizebytes) !=
sizebytes) {
- vfree(exp_desc);
+ pr_err("corrupted exp expect %zd bytes vcid %X remote %X open %d!\n",
+ sizebytes, vchan->id,
+ vchan->otherend_id, vchan->session_id);
+ kfree(exp_desc);
break;
}
@@ -241,36 +299,33 @@ void hab_msg_recv(struct physical_channel *pchan,
case HAB_PAYLOAD_TYPE_CLOSE:
/* remote request close */
- pr_debug("remote side request close\n");
- pr_debug(" vchan id %X, other end %X, session %d\n",
- vchan->id, vchan->otherend_id, session_id);
+ pr_info("remote request close vcid %pK %X other id %X session %d refcnt %d\n",
+ vchan, vchan->id, vchan->otherend_id,
+ session_id, get_refcnt(vchan->refcount));
hab_vchan_stop(vchan);
break;
case HAB_PAYLOAD_TYPE_PROFILE:
do_gettimeofday(&tv);
-
/* pull down the incoming data */
message = hab_msg_alloc(pchan, sizebytes);
- if (!message) {
- pr_err("msg alloc failed\n");
- break;
+ if (!message)
+ pr_err("failed to allocate msg Arrived msg will be lost\n");
+ else {
+ struct habmm_xing_vm_stat *pstat =
+ (struct habmm_xing_vm_stat *)message->data;
+ pstat->rx_sec = tv.tv_sec;
+ pstat->rx_usec = tv.tv_usec;
+ hab_msg_queue(vchan, message);
}
-
- ((uint64_t *)message->data)[2] = tv.tv_sec;
- ((uint64_t *)message->data)[3] = tv.tv_usec;
- hab_msg_queue(vchan, message);
break;
default:
- pr_err("unknown msg is received\n");
- pr_err("payload type %d, vchan id %x\n",
- payload_type, vchan_id);
- pr_err("sizebytes %zx, session %d\n",
- sizebytes, session_id);
-
+ pr_err("unknown msg received, payload type %d, vchan id %x, sizebytes %zx, session %d\n",
+ payload_type, vchan_id, sizebytes, session_id);
break;
}
if (vchan)
hab_vchan_put(vchan);
+ return ret;
}
diff --git a/drivers/soc/qcom/hab/hab_open.c b/drivers/soc/qcom/hab/hab_open.c
index 35f3281604e2..50ebdf6852fe 100644
--- a/drivers/soc/qcom/hab/hab_open.c
+++ b/drivers/soc/qcom/hab/hab_open.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -12,6 +12,8 @@
*/
#include "hab.h"
+#define HAB_OPEN_REQ_EXPIRE_TIME_S (3600*10)
+
void hab_open_request_init(struct hab_open_request *request,
int type,
struct physical_channel *pchan,
@@ -21,57 +23,55 @@ void hab_open_request_init(struct hab_open_request *request,
{
request->type = type;
request->pchan = pchan;
- request->vchan_id = vchan_id;
- request->sub_id = sub_id;
- request->open_id = open_id;
+ request->xdata.vchan_id = vchan_id;
+ request->xdata.sub_id = sub_id;
+ request->xdata.open_id = open_id;
}
int hab_open_request_send(struct hab_open_request *request)
{
struct hab_header header = HAB_HEADER_INITIALIZER;
- struct hab_open_send_data data;
HAB_HEADER_SET_SIZE(header, sizeof(struct hab_open_send_data));
HAB_HEADER_SET_TYPE(header, request->type);
- data.vchan_id = request->vchan_id;
- data.open_id = request->open_id;
- data.sub_id = request->sub_id;
-
- return physical_channel_send(request->pchan, &header, &data);
+ return physical_channel_send(request->pchan, &header, &request->xdata);
}
+/* called when remote sends in open-request */
int hab_open_request_add(struct physical_channel *pchan,
size_t sizebytes, int request_type)
{
struct hab_open_node *node;
struct hab_device *dev = pchan->habdev;
- struct hab_open_send_data data;
struct hab_open_request *request;
+ struct timeval tv;
node = kzalloc(sizeof(*node), GFP_ATOMIC);
if (!node)
return -ENOMEM;
- if (physical_channel_read(pchan, &data, sizebytes) != sizebytes)
+ request = &node->request;
+ if (physical_channel_read(pchan, &request->xdata, sizebytes)
+ != sizebytes)
return -EIO;
- request = &node->request;
- request->type = request_type;
- request->pchan = pchan;
- request->vchan_id = data.vchan_id;
- request->sub_id = data.sub_id;
- request->open_id = data.open_id;
- node->age = 0;
+ request->type = request_type;
+ request->pchan = pchan;
+
+ do_gettimeofday(&tv);
+ node->age = tv.tv_sec + HAB_OPEN_REQ_EXPIRE_TIME_S +
+ tv.tv_usec/1000000;
hab_pchan_get(pchan);
spin_lock_bh(&dev->openlock);
list_add_tail(&node->node, &dev->openq_list);
+ dev->openq_cnt++;
spin_unlock_bh(&dev->openlock);
-
return 0;
}
+/* local only */
static int hab_open_request_find(struct uhab_context *ctx,
struct hab_device *dev,
struct hab_open_request *listen,
@@ -79,6 +79,7 @@ static int hab_open_request_find(struct uhab_context *ctx,
{
struct hab_open_node *node, *tmp;
struct hab_open_request *request;
+ struct timeval tv;
int ret = 0;
if (ctx->closing ||
@@ -91,21 +92,27 @@ static int hab_open_request_find(struct uhab_context *ctx,
if (list_empty(&dev->openq_list))
goto done;
+ do_gettimeofday(&tv);
+
list_for_each_entry_safe(node, tmp, &dev->openq_list, node) {
request = (struct hab_open_request *)node;
- if (request->type == listen->type &&
- (request->sub_id == listen->sub_id) &&
- (!listen->open_id ||
- request->open_id == listen->open_id) &&
+ if ((request->type == listen->type ||
+ request->type == HAB_PAYLOAD_TYPE_INIT_CANCEL) &&
+ (request->xdata.sub_id == listen->xdata.sub_id) &&
+ (!listen->xdata.open_id ||
+ request->xdata.open_id == listen->xdata.open_id) &&
(!listen->pchan ||
request->pchan == listen->pchan)) {
list_del(&node->node);
+ dev->openq_cnt--;
*recv_request = request;
ret = 1;
break;
}
- node->age++;
- if (node->age > Q_AGE_THRESHOLD) {
+ if (node->age < (int64_t)tv.tv_sec + tv.tv_usec/1000000) {
+ pr_warn("open request type %d sub %d open %d\n",
+ request->type, request->xdata.sub_id,
+ request->xdata.sub_id);
list_del(&node->node);
hab_open_request_free(request);
}
@@ -121,7 +128,8 @@ void hab_open_request_free(struct hab_open_request *request)
if (request) {
hab_pchan_put(request->pchan);
kfree(request);
- }
+ } else
+ pr_err("empty request found\n");
}
int hab_open_listen(struct uhab_context *ctx,
@@ -132,22 +140,153 @@ int hab_open_listen(struct uhab_context *ctx,
{
int ret = 0;
- if (!ctx || !listen || !recv_request)
+ if (!ctx || !listen || !recv_request) {
+ pr_err("listen failed ctx %pK listen %pK request %pK\n",
+ ctx, listen, recv_request);
return -EINVAL;
+ }
*recv_request = NULL;
- if (ms_timeout > 0) {
+ if (ms_timeout > 0) { /* be case */
+ ms_timeout = msecs_to_jiffies(ms_timeout);
ret = wait_event_interruptible_timeout(dev->openq,
hab_open_request_find(ctx, dev, listen, recv_request),
ms_timeout);
- if (!ret || (-ERESTARTSYS == ret))
- ret = -EAGAIN;
- else if (ret > 0)
- ret = 0;
- } else {
+ if (!ret || (-ERESTARTSYS == ret)) {
+ pr_warn("something failed in open listen ret %d\n",
+ ret);
+ ret = -EAGAIN; /* condition not met */
+ } else if (ret > 0)
+ ret = 0; /* condition met */
+ } else { /* fe case */
ret = wait_event_interruptible(dev->openq,
hab_open_request_find(ctx, dev, listen, recv_request));
+ if (ctx->closing) {
+ pr_warn("local closing during open ret %d\n", ret);
+ ret = -ENODEV;
+ } else if (-ERESTARTSYS == ret) {
+ pr_warn("local interrupted during open ret %d\n", ret);
+ ret = -EAGAIN;
+ }
+ }
+
+ return ret;
+}
+
+/* called when receives remote's cancel init from FE or init-ack from BE */
+int hab_open_receive_cancel(struct physical_channel *pchan,
+ size_t sizebytes)
+{
+ struct hab_device *dev = pchan->habdev;
+ struct hab_open_send_data data;
+ struct hab_open_request *request;
+ struct hab_open_node *node, *tmp;
+ int bfound = 0;
+ struct timeval tv;
+
+ if (physical_channel_read(pchan, &data, sizebytes) != sizebytes)
+ return -EIO;
+
+ spin_lock_bh(&dev->openlock);
+ list_for_each_entry_safe(node, tmp, &dev->openq_list, node) {
+ request = &node->request;
+ /* check if open request has been serviced or not */
+ if ((request->type == HAB_PAYLOAD_TYPE_INIT ||
+ request->type == HAB_PAYLOAD_TYPE_INIT_ACK) &&
+ (request->xdata.sub_id == data.sub_id) &&
+ (request->xdata.open_id == data.open_id) &&
+ (request->xdata.vchan_id == data.vchan_id)) {
+ list_del(&node->node);
+ dev->openq_cnt--;
+ pr_info("open cancelled on pchan %s vcid %x subid %d openid %d\n",
+ pchan->name, data.vchan_id,
+ data.sub_id, data.open_id);
+ /* found un-serviced open request, delete it */
+ bfound = 1;
+ break;
+ }
+ }
+ spin_unlock_bh(&dev->openlock);
+
+ if (!bfound) {
+ pr_info("init waiting is in-flight. vcid %x sub %d open %d\n",
+ data.vchan_id, data.sub_id, data.open_id);
+ /* add cancel to the openq to let the waiting open bail out */
+ node = kzalloc(sizeof(*node), GFP_ATOMIC);
+ if (!node)
+ return -ENOMEM;
+
+ request = &node->request;
+ request->type = HAB_PAYLOAD_TYPE_INIT_CANCEL;
+ request->pchan = pchan;
+ request->xdata.vchan_id = data.vchan_id;
+ request->xdata.sub_id = data.sub_id;
+ request->xdata.open_id = data.open_id;
+
+ do_gettimeofday(&tv);
+ node->age = tv.tv_sec + HAB_OPEN_REQ_EXPIRE_TIME_S +
+ tv.tv_usec/1000000;
+ /* put when this node is handled in open path */
+ hab_pchan_get(pchan);
+
+ spin_lock_bh(&dev->openlock);
+ list_add_tail(&node->node, &dev->openq_list);
+ dev->openq_cnt++;
+ spin_unlock_bh(&dev->openlock);
+
+ wake_up_interruptible(&dev->openq);
+ }
+
+ return 0;
+}
+
+/* calls locally to send cancel pending open to remote */
+int hab_open_cancel_notify(struct hab_open_request *request)
+{
+ struct hab_header header = HAB_HEADER_INITIALIZER;
+
+ HAB_HEADER_SET_SIZE(header, sizeof(struct hab_open_send_data));
+ HAB_HEADER_SET_TYPE(header, HAB_PAYLOAD_TYPE_INIT_CANCEL);
+
+ return physical_channel_send(request->pchan, &header, &request->xdata);
+}
+
+int hab_open_pending_enter(struct uhab_context *ctx,
+ struct physical_channel *pchan,
+ struct hab_open_node *pending)
+{
+ write_lock(&ctx->ctx_lock);
+ list_add_tail(&pending->node, &ctx->pending_open);
+ ctx->pending_cnt++;
+ write_unlock(&ctx->ctx_lock);
+
+ return 0;
+}
+
+int hab_open_pending_exit(struct uhab_context *ctx,
+ struct physical_channel *pchan,
+ struct hab_open_node *pending)
+{
+ struct hab_open_node *node, *tmp;
+ int ret = -ENOENT;
+
+ write_lock(&ctx->ctx_lock);
+ list_for_each_entry_safe(node, tmp, &ctx->pending_open, node) {
+ if ((node->request.type == pending->request.type) &&
+ (node->request.pchan
+ == pending->request.pchan) &&
+ (node->request.xdata.vchan_id
+ == pending->request.xdata.vchan_id) &&
+ (node->request.xdata.sub_id
+ == pending->request.xdata.sub_id) &&
+ (node->request.xdata.open_id
+ == pending->request.xdata.open_id)) {
+ list_del(&node->node);
+ ctx->pending_cnt--;
+ ret = 0;
+ }
}
+ write_unlock(&ctx->ctx_lock);
return ret;
}
diff --git a/drivers/soc/qcom/hab/hab_parser.c b/drivers/soc/qcom/hab/hab_parser.c
index da0a4a3830a7..c332587e2b47 100644
--- a/drivers/soc/qcom/hab/hab_parser.c
+++ b/drivers/soc/qcom/hab/hab_parser.c
@@ -30,7 +30,7 @@ static int fill_vmid_mmid_tbl(struct vmid_mmid_desc *tbl, int32_t vm_start,
for (j = mmid_start; j < mmid_start + mmid_range; j++) {
/* sanity check */
if (tbl[i].mmid[j] != HABCFG_VMID_INVALID) {
- pr_err("overwrite previous setting, i %d, j %d, be %d\n",
+ pr_err("overwrite previous setting vmid %d, mmid %d, be %d\n",
i, j, tbl[i].is_listener[j]);
}
tbl[i].mmid[j] = j;
@@ -43,28 +43,23 @@ static int fill_vmid_mmid_tbl(struct vmid_mmid_desc *tbl, int32_t vm_start,
void dump_settings(struct local_vmid *settings)
{
- int i, j;
-
pr_debug("self vmid is %d\n", settings->self);
- for (i = 0; i < HABCFG_VMID_MAX; i++) {
- pr_debug("remote vmid %d\n",
- settings->vmid_mmid_list[i].vmid);
- for (j = 0; j <= HABCFG_MMID_AREA_MAX; j++) {
- pr_debug("mmid %d, is_be %d\n",
- settings->vmid_mmid_list[i].mmid[j],
- settings->vmid_mmid_list[i].is_listener[j]);
- }
- }
}
int fill_default_gvm_settings(struct local_vmid *settings, int vmid_local,
- int mmid_start, int mmid_end) {
+ int mmid_start, int mmid_end)
+{
+ int32_t be = HABCFG_BE_FALSE;
+ int32_t range = 1;
+ int32_t vmremote = 0; /* default to host[0] as local is guest[2] */
+
settings->self = vmid_local;
/* default gvm always talks to host as vm0 */
- return fill_vmid_mmid_tbl(settings->vmid_mmid_list, 0, 1,
- mmid_start/100, (mmid_end-mmid_start)/100+1, HABCFG_BE_FALSE);
+ return fill_vmid_mmid_tbl(settings->vmid_mmid_list, vmremote, range,
+ mmid_start/100, (mmid_end-mmid_start)/100+1, be);
}
+/* device tree based parser */
static int hab_parse_dt(struct local_vmid *settings)
{
int result, i;
@@ -151,6 +146,10 @@ static int hab_parse_dt(struct local_vmid *settings)
return 0;
}
+/*
+ * 0: successful
+ * negative: various failure core
+ */
int hab_parse(struct local_vmid *settings)
{
int ret;
diff --git a/drivers/soc/qcom/hab/hab_pchan.c b/drivers/soc/qcom/hab/hab_pchan.c
index 36bc29b7bd0c..8a9a6dfd1e0c 100644
--- a/drivers/soc/qcom/hab/hab_pchan.c
+++ b/drivers/soc/qcom/hab/hab_pchan.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -35,10 +35,10 @@ hab_pchan_alloc(struct hab_device *habdev, int otherend_id)
rwlock_init(&pchan->vchans_lock);
spin_lock_init(&pchan->rxbuf_lock);
- mutex_lock(&habdev->pchan_lock);
+ spin_lock_bh(&habdev->pchan_lock);
list_add_tail(&pchan->node, &habdev->pchannels);
habdev->pchan_cnt++;
- mutex_unlock(&habdev->pchan_lock);
+ spin_unlock_bh(&habdev->pchan_lock);
return pchan;
}
@@ -47,11 +47,26 @@ static void hab_pchan_free(struct kref *ref)
{
struct physical_channel *pchan =
container_of(ref, struct physical_channel, refcount);
+ struct virtual_channel *vchan;
- mutex_lock(&pchan->habdev->pchan_lock);
+ pr_debug("pchan %s refcnt %d\n", pchan->name,
+ get_refcnt(pchan->refcount));
+
+ spin_lock_bh(&pchan->habdev->pchan_lock);
list_del(&pchan->node);
pchan->habdev->pchan_cnt--;
- mutex_unlock(&pchan->habdev->pchan_lock);
+ spin_unlock_bh(&pchan->habdev->pchan_lock);
+
+ /* check vchan leaking */
+ read_lock(&pchan->vchans_lock);
+ list_for_each_entry(vchan, &pchan->vchannels, pnode) {
+ /* no logging on the owner. it might have been gone */
+ pr_warn("leaking vchan id %X remote %X refcnt %d\n",
+ vchan->id, vchan->otherend_id,
+ get_refcnt(vchan->refcount));
+ }
+ read_unlock(&pchan->vchans_lock);
+
kfree(pchan->hyp_data);
kfree(pchan);
}
@@ -61,7 +76,7 @@ hab_pchan_find_domid(struct hab_device *dev, int dom_id)
{
struct physical_channel *pchan;
- mutex_lock(&dev->pchan_lock);
+ spin_lock_bh(&dev->pchan_lock);
list_for_each_entry(pchan, &dev->pchannels, node)
if (pchan->dom_id == dom_id || dom_id == HABCFG_VMID_DONT_CARE)
break;
@@ -75,7 +90,7 @@ hab_pchan_find_domid(struct hab_device *dev, int dom_id)
if (pchan && !kref_get_unless_zero(&pchan->refcount))
pchan = NULL;
- mutex_unlock(&dev->pchan_lock);
+ spin_unlock_bh(&dev->pchan_lock);
return pchan;
}
diff --git a/drivers/soc/qcom/hab/hab_qvm.c b/drivers/soc/qcom/hab/hab_qvm.c
index 280eb3148337..129d1deeb2f0 100644
--- a/drivers/soc/qcom/hab/hab_qvm.c
+++ b/drivers/soc/qcom/hab/hab_qvm.c
@@ -52,7 +52,8 @@ static struct shmem_irq_config pchan_factory_settings[] = {
{0x1b011000, 24},
{0x1b012000, 25},
{0x1b013000, 26},
-
+ {0x1b014000, 27},
+ {0x1b015000, 28},
};
static struct qvm_plugin_info {
@@ -70,14 +71,14 @@ static struct qvm_plugin_info {
static irqreturn_t shm_irq_handler(int irq, void *_pchan)
{
irqreturn_t rc = IRQ_NONE;
- struct physical_channel *pchan = _pchan;
+ struct physical_channel *pchan = (struct physical_channel *) _pchan;
struct qvm_channel *dev =
(struct qvm_channel *) (pchan ? pchan->hyp_data : NULL);
if (dev && dev->guest_ctrl) {
int status = dev->guest_ctrl->status;
- if (status & dev->idx) {
+ if (status & 0xffff) {/*source bitmask indicator*/
rc = IRQ_HANDLED;
tasklet_schedule(&dev->task);
}
@@ -94,13 +95,14 @@ static uint64_t get_guest_factory_paddr(struct qvm_channel *dev,
int i;
pr_debug("name = %s, factory paddr = 0x%lx, irq %d, pages %d\n",
- name, factory_addr, irq, pages);
+ name, factory_addr, irq, pages);
dev->guest_factory = (struct guest_shm_factory *)factory_addr;
if (dev->guest_factory->signature != GUEST_SHM_SIGNATURE) {
pr_err("signature error: %ld != %llu, factory addr %lx\n",
GUEST_SHM_SIGNATURE, dev->guest_factory->signature,
factory_addr);
+ iounmap(dev->guest_factory);
return 0;
}
@@ -119,6 +121,7 @@ static uint64_t get_guest_factory_paddr(struct qvm_channel *dev,
/* See if we successfully created/attached to the region. */
if (dev->guest_factory->status != GSS_OK) {
pr_err("create failed: %d\n", dev->guest_factory->status);
+ iounmap(dev->guest_factory);
return 0;
}
@@ -179,6 +182,7 @@ int habhyp_commdev_alloc(void **commdev, int is_be, char *name,
{
struct qvm_channel *dev = NULL;
struct qvm_plugin_info *qvm_priv = hab_driver.hyp_priv;
+ uint64_t paddr;
struct physical_channel **pchan = (struct physical_channel **)commdev;
int ret = 0, coid = 0, channel = 0;
char *shmdata;
@@ -186,7 +190,6 @@ int habhyp_commdev_alloc(void **commdev, int is_be, char *name,
hab_pipe_calc_required_bytes(PIPE_SHMEM_SIZE);
uint32_t pipe_alloc_pages =
(pipe_alloc_size + PAGE_SIZE - 1) / PAGE_SIZE;
- uint64_t paddr;
int temp;
int total_pages;
struct page **pages;
@@ -195,8 +198,10 @@ int habhyp_commdev_alloc(void **commdev, int is_be, char *name,
pipe_alloc_size);
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev)
- return -ENOMEM;
+ if (!dev) {
+ ret = -ENOMEM;
+ goto err;
+ }
spin_lock_init(&dev->io_lock);
@@ -207,7 +212,7 @@ int habhyp_commdev_alloc(void **commdev, int is_be, char *name,
pipe_alloc_pages);
qvm_priv->curr++;
if (qvm_priv->curr > qvm_priv->probe_cnt) {
- pr_err("factory setting %d overflow probed cnt %d\n",
+ pr_err("pchan guest factory setting %d overflow probed cnt %d\n",
qvm_priv->curr, qvm_priv->probe_cnt);
ret = -1;
goto err;
@@ -260,17 +265,18 @@ int habhyp_commdev_alloc(void **commdev, int is_be, char *name,
dev->coid = coid;
ret = create_dispatcher(*pchan);
- if (ret)
+ if (ret < 0)
goto err;
return ret;
err:
+ pr_err("habhyp_commdev_alloc failed\n");
+
kfree(dev);
if (*pchan)
hab_pchan_put(*pchan);
- pr_err("habhyp_commdev_alloc failed: %d\n", ret);
return ret;
}
@@ -279,6 +285,13 @@ int habhyp_commdev_dealloc(void *commdev)
struct physical_channel *pchan = (struct physical_channel *)commdev;
struct qvm_channel *dev = pchan->hyp_data;
+ dev->guest_ctrl->detach = 0;
+
+ if (get_refcnt(pchan->refcount) > 1) {
+ pr_warn("potential leak pchan %s vchans %d refcnt %d\n",
+ pchan->name, pchan->vcnt,
+ get_refcnt(pchan->refcount));
+ }
kfree(dev);
hab_pchan_put(pchan);
@@ -301,25 +314,13 @@ int hab_hypervisor_register(void)
void hab_hypervisor_unregister(void)
{
- int status, i;
-
- for (i = 0; i < hab_driver.ndevices; i++) {
- struct hab_device *dev = &hab_driver.devp[i];
- struct physical_channel *pchan;
-
- list_for_each_entry(pchan, &dev->pchannels, node) {
- status = habhyp_commdev_dealloc(pchan);
- if (status) {
- pr_err("failed to free pchan %pK, i %d, ret %d\n",
- pchan, i, status);
- }
- }
- }
+ hab_hypervisor_unregister_common();
qvm_priv_info.probe_cnt = 0;
qvm_priv_info.curr = 0;
}
+/* this happens before hypervisor register */
static int hab_shmem_probe(struct platform_device *pdev)
{
int irq = 0;
@@ -372,19 +373,6 @@ static int hab_shmem_remove(struct platform_device *pdev)
static void hab_shmem_shutdown(struct platform_device *pdev)
{
- int i;
- struct qvm_channel *dev;
- struct physical_channel *pchan;
- struct hab_device *hab_dev;
-
- for (i = 0; i < hab_driver.ndevices; i++) {
- hab_dev = &hab_driver.devp[i];
- pr_debug("detaching %s\n", hab_dev->name);
- list_for_each_entry(pchan, &hab_dev->pchannels, node) {
- dev = (struct qvm_channel *)pchan->hyp_data;
- dev->guest_ctrl->detach = 0;
- }
- }
}
static const struct of_device_id hab_shmem_match_table[] = {
diff --git a/drivers/soc/qcom/hab/hab_qvm.h b/drivers/soc/qcom/hab/hab_qvm.h
index b483f4c21331..fe7cb0bbda0a 100644
--- a/drivers/soc/qcom/hab/hab_qvm.h
+++ b/drivers/soc/qcom/hab/hab_qvm.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -36,6 +36,7 @@ struct qvm_channel {
int channel;
int coid;
+ /* Guest VM */
unsigned int guest_intr;
unsigned int guest_iid;
unsigned int factory_addr;
diff --git a/drivers/soc/qcom/hab/hab_vchan.c b/drivers/soc/qcom/hab/hab_vchan.c
index 91ae173f7e83..d127bcca19f8 100644
--- a/drivers/soc/qcom/hab/hab_vchan.c
+++ b/drivers/soc/qcom/hab/hab_vchan.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -13,7 +13,8 @@
#include "hab.h"
struct virtual_channel *
-hab_vchan_alloc(struct uhab_context *ctx, struct physical_channel *pchan)
+hab_vchan_alloc(struct uhab_context *ctx, struct physical_channel *pchan,
+ int openid)
{
int id;
struct virtual_channel *vchan;
@@ -28,11 +29,13 @@ hab_vchan_alloc(struct uhab_context *ctx, struct physical_channel *pchan)
/* This should be the first thing we do in this function */
idr_preload(GFP_KERNEL);
spin_lock_bh(&pchan->vid_lock);
- id = idr_alloc(&pchan->vchan_idr, vchan, 1, 256, GFP_NOWAIT);
+ id = idr_alloc(&pchan->vchan_idr, vchan, 1,
+ (HAB_VCID_ID_MASK >> HAB_VCID_ID_SHIFT) + 1, GFP_NOWAIT);
spin_unlock_bh(&pchan->vid_lock);
idr_preload_end();
- if (id < 0) {
+ if (id <= 0) {
+ pr_err("idr failed %d\n", id);
kfree(vchan);
return NULL;
}
@@ -40,8 +43,11 @@ hab_vchan_alloc(struct uhab_context *ctx, struct physical_channel *pchan)
hab_pchan_get(pchan);
vchan->pchan = pchan;
+ /* vchan need both vcid and openid to be properly located */
+ vchan->session_id = openid;
write_lock(&pchan->vchans_lock);
list_add_tail(&vchan->pnode, &pchan->vchannels);
+ pchan->vcnt++;
write_unlock(&pchan->vchans_lock);
vchan->id = ((id << HAB_VCID_ID_SHIFT) & HAB_VCID_ID_MASK) |
((pchan->habdev->id << HAB_VCID_MMID_SHIFT) &
@@ -53,7 +59,7 @@ hab_vchan_alloc(struct uhab_context *ctx, struct physical_channel *pchan)
init_waitqueue_head(&vchan->rx_queue);
kref_init(&vchan->refcount);
- kref_init(&vchan->usagecnt);
+
vchan->otherend_closed = pchan->closed;
hab_ctx_get(ctx);
@@ -65,11 +71,9 @@ hab_vchan_alloc(struct uhab_context *ctx, struct physical_channel *pchan)
static void
hab_vchan_free(struct kref *ref)
{
- int found;
struct virtual_channel *vchan =
container_of(ref, struct virtual_channel, refcount);
struct hab_message *message, *msg_tmp;
- struct export_desc *exp, *exp_tmp;
struct physical_channel *pchan = vchan->pchan;
struct uhab_context *ctx = vchan->ctx;
struct virtual_channel *vc, *vc_tmp;
@@ -81,76 +85,84 @@ hab_vchan_free(struct kref *ref)
}
spin_unlock_bh(&vchan->rx_lock);
- do {
- found = 0;
- write_lock(&ctx->exp_lock);
- list_for_each_entry_safe(exp, exp_tmp, &ctx->exp_whse, node) {
- if (exp->vcid_local == vchan->id) {
- list_del(&exp->node);
- found = 1;
- break;
- }
- }
- write_unlock(&ctx->exp_lock);
- if (found) {
- habmem_hyp_revoke(exp->payload, exp->payload_count);
- habmem_remove_export(exp);
- }
- } while (found);
-
- do {
- found = 0;
- spin_lock_bh(&ctx->imp_lock);
- list_for_each_entry_safe(exp, exp_tmp, &ctx->imp_whse, node) {
- if (exp->vcid_remote == vchan->id) {
- list_del(&exp->node);
- found = 1;
- break;
- }
- }
- spin_unlock_bh(&ctx->imp_lock);
- if (found) {
- habmm_imp_hyp_unmap(ctx->import_ctx,
- exp->import_index,
- exp->payload_count,
- ctx->kernel);
- ctx->import_total--;
- kfree(exp);
- }
- } while (found);
-
- spin_lock_bh(&pchan->vid_lock);
- idr_remove(&pchan->vchan_idr, HAB_VCID_GET_ID(vchan->id));
- spin_unlock_bh(&pchan->vid_lock);
+ /* the release vchan from ctx was done earlier in vchan close() */
+ hab_ctx_put(ctx); /* now ctx is not needed from this vchan's view */
+ vchan->ctx = NULL;
+ /* release vchan from pchan. no more msg for this vchan */
write_lock(&pchan->vchans_lock);
list_for_each_entry_safe(vc, vc_tmp, &pchan->vchannels, pnode) {
if (vchan == vc) {
list_del(&vc->pnode);
+ /* the ref is held in case of pchan is freed */
+ pchan->vcnt--;
break;
}
}
write_unlock(&pchan->vchans_lock);
- hab_pchan_put(pchan);
- hab_ctx_put(ctx);
+ /* release idr at the last so same idr will not be used early */
+ spin_lock_bh(&pchan->vid_lock);
+ idr_remove(&pchan->vchan_idr, HAB_VCID_GET_ID(vchan->id));
+ spin_unlock_bh(&pchan->vid_lock);
+
+ hab_pchan_put(pchan); /* no more need for pchan from this vchan */
kfree(vchan);
}
+/*
+ * only for msg recv path to retrieve vchan from vcid and openid based on
+ * pchan's vchan list
+ */
struct virtual_channel*
hab_vchan_get(struct physical_channel *pchan, struct hab_header *header)
{
struct virtual_channel *vchan;
uint32_t vchan_id = HAB_HEADER_GET_ID(*header);
uint32_t session_id = HAB_HEADER_GET_SESSION_ID(*header);
+ size_t sizebytes = HAB_HEADER_GET_SIZE(*header);
+ uint32_t payload_type = HAB_HEADER_GET_TYPE(*header);
spin_lock_bh(&pchan->vid_lock);
vchan = idr_find(&pchan->vchan_idr, HAB_VCID_GET_ID(vchan_id));
- if (vchan)
- if ((vchan->session_id != session_id) ||
- (!kref_get_unless_zero(&vchan->refcount)))
+ if (vchan) {
+ if (vchan->session_id != session_id)
+ /*
+ * skipped if session is different even vcid
+ * is the same
+ */
vchan = NULL;
+ else if (!vchan->otherend_id /*&& !vchan->session_id*/) {
+ /*
+ * not paired vchan can be fetched right after it is
+ * alloc'ed. so it has to be skipped during search
+ * for remote msg
+ */
+ pr_warn("vcid %x is not paired yet session %d refcnt %d type %d sz %zd\n",
+ vchan->id, vchan->otherend_id,
+ get_refcnt(vchan->refcount),
+ payload_type, sizebytes);
+ vchan = NULL;
+ } else if (!kref_get_unless_zero(&vchan->refcount)) {
+ /*
+ * this happens when refcnt is already zero
+ * (put from other thread) or there is an actual error
+ */
+ pr_err("failed to inc vcid %pK %x remote %x session %d refcnt %d header %x session %d type %d sz %zd\n",
+ vchan, vchan->id, vchan->otherend_id,
+ vchan->session_id, get_refcnt(vchan->refcount),
+ vchan_id, session_id, payload_type, sizebytes);
+ vchan = NULL;
+ } else if (vchan->otherend_closed || vchan->closed) {
+ pr_err("closed already remote %d local %d vcid %x remote %x session %d refcnt %d header %x session %d type %d sz %zd\n",
+ vchan->otherend_closed, vchan->closed,
+ vchan->id, vchan->otherend_id,
+ vchan->session_id, get_refcnt(vchan->refcount),
+ vchan_id, session_id, payload_type, sizebytes);
+ vchan = NULL;
+ }
+ }
spin_unlock_bh(&pchan->vid_lock);
return vchan;
@@ -160,7 +172,8 @@ void hab_vchan_stop(struct virtual_channel *vchan)
{
if (vchan) {
vchan->otherend_closed = 1;
- wake_up_interruptible(&vchan->rx_queue);
+ wake_up(&vchan->rx_queue);
+ wake_up_interruptible(&vchan->ctx->exp_wq);
}
}
@@ -187,23 +200,36 @@ int hab_vchan_find_domid(struct virtual_channel *vchan)
return vchan ? vchan->pchan->dom_id : -1;
}
-static void
-hab_vchan_free_deferred(struct work_struct *work)
+/* this sould be only called once after refcnt is zero */
+static void hab_vchan_schedule_free(struct kref *ref)
{
- struct virtual_channel *vchan =
- container_of(work, struct virtual_channel, work);
+ struct virtual_channel *vchanin =
+ container_of(ref, struct virtual_channel, refcount);
+ struct uhab_context *ctx = vchanin->ctx;
+ struct virtual_channel *vchan, *tmp;
+ int bnotify = 0;
- hab_vchan_free(&vchan->refcount);
-}
+ /*
+ * similar logic is in ctx free. if ctx free runs first,
+ * this is skipped
+ */
+ write_lock(&ctx->ctx_lock);
+ list_for_each_entry_safe(vchan, tmp, &ctx->vchannels, node) {
+ if (vchan == vchanin) {
+ pr_debug("vchan free refcnt = %d\n",
+ get_refcnt(vchan->refcount));
+ ctx->vcnt--;
+ list_del(&vchan->node);
+ bnotify = 1;
+ break;
+ }
+ }
+ write_unlock(&ctx->ctx_lock);
-static void
-hab_vchan_schedule_free(struct kref *ref)
-{
- struct virtual_channel *vchan =
- container_of(ref, struct virtual_channel, refcount);
+ if (bnotify)
+ hab_vchan_stop_notify(vchan);
- INIT_WORK(&vchan->work, hab_vchan_free_deferred);
- schedule_work(&vchan->work);
+ hab_vchan_free(ref);
}
void hab_vchan_put(struct virtual_channel *vchan)
@@ -211,3 +237,25 @@ void hab_vchan_put(struct virtual_channel *vchan)
if (vchan)
kref_put(&vchan->refcount, hab_vchan_schedule_free);
}
+
+int hab_vchan_query(struct uhab_context *ctx, int32_t vcid, uint64_t *ids,
+ char *names, size_t name_size, uint32_t flags)
+{
+ struct virtual_channel *vchan = hab_get_vchan_fromvcid(vcid, ctx);
+ if (!vchan)
+ return -EINVAL;
+
+ if (vchan->otherend_closed) {
+ hab_vchan_put(vchan);
+ return -ENODEV;
+ }
+
+ *ids = vchan->pchan->vmid_local |
+ ((uint64_t)vchan->pchan->vmid_remote) << 32;
+ names[0] = 0;
+ names[name_size/2] = 0;
+
+ hab_vchan_put(vchan);
+
+ return 0;
+}
diff --git a/drivers/soc/qcom/hab/khab.c b/drivers/soc/qcom/hab/khab.c
index 05e6aa2fa7ca..c4acf12fd553 100644
--- a/drivers/soc/qcom/hab/khab.c
+++ b/drivers/soc/qcom/hab/khab.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,13 +10,14 @@
* GNU General Public License for more details.
*
*/
-#include <linux/module.h>
#include "hab.h"
+#include <linux/module.h>
int32_t habmm_socket_open(int32_t *handle, uint32_t mm_ip_id,
uint32_t timeout, uint32_t flags)
{
- return hab_vchan_open(hab_driver.kctx, mm_ip_id, handle, flags);
+ return hab_vchan_open(hab_driver.kctx, mm_ip_id, handle,
+ timeout, flags);
}
EXPORT_SYMBOL(habmm_socket_open);
@@ -51,22 +52,17 @@ int32_t habmm_socket_recv(int32_t handle, void *dst_buff, uint32_t *size_bytes,
if (!size_bytes || !dst_buff)
return -EINVAL;
- msg = hab_vchan_recv(hab_driver.kctx, handle, flags);
-
- if (IS_ERR(msg)) {
- *size_bytes = 0;
- return PTR_ERR(msg);
- }
+ ret = hab_vchan_recv(hab_driver.kctx, &msg, handle, size_bytes, flags);
- if (*size_bytes < msg->sizebytes) {
- *size_bytes = 0;
- ret = -EINVAL;
- } else {
+ if (ret == 0 && msg)
memcpy(dst_buff, msg->data, msg->sizebytes);
- *size_bytes = msg->sizebytes;
- }
+ else if (ret && msg)
+ pr_warn("vcid %X recv failed %d but msg is still received %zd bytes\n",
+ handle, ret, msg->sizebytes);
+
+ if (msg)
+ hab_msg_free(msg);
- hab_msg_free(msg);
return ret;
}
EXPORT_SYMBOL(habmm_socket_recv);
@@ -138,3 +134,24 @@ int32_t habmm_unimport(int32_t handle,
return hab_mem_unimport(hab_driver.kctx, &param, 1);
}
EXPORT_SYMBOL(habmm_unimport);
+
+int32_t habmm_socket_query(int32_t handle,
+ struct hab_socket_info *info,
+ uint32_t flags)
+{
+ int ret;
+ uint64_t ids;
+ char nm[sizeof(info->vmname_remote) + sizeof(info->vmname_local)];
+
+ ret = hab_vchan_query(hab_driver.kctx, handle, &ids, nm, sizeof(nm), 1);
+ if (!ret) {
+ info->vmid_local = ids & 0xFFFFFFFF;
+ info->vmid_remote = (ids & 0xFFFFFFFF00000000UL) > 32;
+
+ strlcpy(info->vmname_local, nm, sizeof(info->vmname_local));
+ strlcpy(info->vmname_remote, &nm[sizeof(info->vmname_local)],
+ sizeof(info->vmname_remote));
+ }
+ return ret;
+}
+EXPORT_SYMBOL(habmm_socket_query);
diff --git a/drivers/soc/qcom/hab/khab_test.c b/drivers/soc/qcom/hab/khab_test.c
new file mode 100644
index 000000000000..3773211aeec7
--- /dev/null
+++ b/drivers/soc/qcom/hab/khab_test.c
@@ -0,0 +1,263 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include "hab.h"
+#include "khab_test.h"
+#include "hab_pipe.h"
+#include "hab_qvm.h"
+#include <asm/cacheflush.h>
+#include <linux/list.h>
+
+static char g_perf_test_result[256];
+
+enum hab_perf_test_type {
+ HAB_SHMM_THGPUT = 0x0,
+};
+
+#define HAB_PERF_TEST_MMID 802
+#define PERF_TEST_ITERATION 50
+#define MEM_READ_ITERATION 30
+
+static int hab_shmm_throughput_test(void)
+{
+ struct hab_device *habDev;
+ struct qvm_channel *dev;
+ struct hab_shared_buf *sh_buf;
+ struct physical_channel *pchan;
+ struct timeval tv1, tv2;
+ int i, counter;
+ void *test_data;
+ unsigned char *source_data, *shmm_adr;
+
+ register int sum;
+ register int *pp, *lastone;
+ int throughput[3][2] = {0};
+ int latency[6][PERF_TEST_ITERATION];
+ int ret = 0, tmp, size;
+
+ habDev = find_hab_device(HAB_PERF_TEST_MMID);
+ if (!habDev || list_empty(&(habDev->pchannels))) {
+ ret = -ENOMEM;
+ return ret;
+ }
+
+ pchan = list_first_entry(&(habDev->pchannels),
+ struct physical_channel, node);
+ dev = pchan->hyp_data;
+ if (!dev) {
+ ret = -EPERM;
+ return ret;
+ }
+
+ sh_buf = dev->pipe_ep->tx_info.sh_buf;
+ /* pChannel is of 128k, we use 64k to test */
+ size = 0x10000;
+
+ if (!sh_buf) {
+ pr_err("Share buffer address is empty, exit the perf test\n");
+ ret = -ENOMEM;
+ return ret;
+ }
+ shmm_adr = sh_buf->data;
+
+ test_data = kzalloc(size, GFP_ATOMIC);
+ if (!test_data) {
+ ret = -ENOMEM;
+ return ret;
+ }
+
+ source_data = kzalloc(size, GFP_ATOMIC);
+ if (!source_data) {
+ ret = -ENOMEM;
+ return ret;
+ }
+
+ for (i = 0; i < PERF_TEST_ITERATION; i++) {
+ /* Normal memory copy latency */
+ flush_cache_all();
+ do_gettimeofday(&tv1);
+ memcpy(test_data, source_data, size);
+ do_gettimeofday(&tv2);
+ latency[0][i] = (tv2.tv_sec - tv1.tv_sec)*1000000
+ + (tv2.tv_usec - tv1.tv_usec);
+
+ /* Share memory copy latency */
+ flush_cache_all();
+ do_gettimeofday(&tv1);
+ memcpy(shmm_adr, source_data, size);
+ do_gettimeofday(&tv2);
+ latency[1][i] = (tv2.tv_sec - tv1.tv_sec)*1000000
+ + (tv2.tv_usec - tv1.tv_usec);
+
+ /* Normal memory read latency */
+ counter = MEM_READ_ITERATION;
+ sum = 0;
+ latency[2][i] = 0;
+ flush_cache_all();
+ while (counter-- > 0) {
+ pp = test_data;
+ lastone = (int *)((char *)test_data + size - 512);
+ do_gettimeofday(&tv1);
+ while (pp <= lastone) {
+ sum +=
+ pp[0] + pp[4] + pp[8] + pp[12]
+ + pp[16] + pp[20] + pp[24] + pp[28]
+ + pp[32] + pp[36] + pp[40] + pp[44]
+ + pp[48] + pp[52] + pp[56] + pp[60]
+ + pp[64] + pp[68] + pp[72] + pp[76]
+ + pp[80] + pp[84] + pp[88] + pp[92]
+ + pp[96] + pp[100] + pp[104]
+ + pp[108] + pp[112]
+ + pp[116] + pp[120]
+ + pp[124];
+ pp += 128;
+ }
+ do_gettimeofday(&tv2);
+ latency[2][i] += (tv2.tv_sec - tv1.tv_sec)*1000000
+ + (tv2.tv_usec - tv1.tv_usec);
+ flush_cache_all();
+ }
+
+ /* Share memory read latency*/
+ counter = MEM_READ_ITERATION;
+ sum = 0;
+ latency[3][i] = 0;
+ while (counter-- > 0) {
+ pp = (int *)shmm_adr;
+ lastone = (int *)(shmm_adr + size - 512);
+ do_gettimeofday(&tv1);
+ while (pp <= lastone) {
+ sum +=
+ pp[0] + pp[4] + pp[8] + pp[12]
+ + pp[16] + pp[20] + pp[24] + pp[28]
+ + pp[32] + pp[36] + pp[40] + pp[44]
+ + pp[48] + pp[52] + pp[56] + pp[60]
+ + pp[64] + pp[68] + pp[72] + pp[76]
+ + pp[80] + pp[84] + pp[88] + pp[92]
+ + pp[96] + pp[100] + pp[104]
+ + pp[108] + pp[112]
+ + pp[116] + pp[120]
+ + pp[124];
+ pp += 128;
+ }
+ do_gettimeofday(&tv2);
+ latency[3][i] += (tv2.tv_sec - tv1.tv_sec)*1000000
+ + (tv2.tv_usec - tv1.tv_usec);
+ flush_cache_all();
+ }
+
+ /* Normal memory write latency */
+ flush_cache_all();
+ do_gettimeofday(&tv1);
+ memset(test_data, 'c', size);
+ do_gettimeofday(&tv2);
+ latency[4][i] = (tv2.tv_sec - tv1.tv_sec)*1000000
+ + (tv2.tv_usec - tv1.tv_usec);
+
+ /* Share memory write latency */
+ flush_cache_all();
+ do_gettimeofday(&tv1);
+ memset(shmm_adr, 'c', size);
+ do_gettimeofday(&tv2);
+ latency[5][i] = (tv2.tv_sec - tv1.tv_sec)*1000000
+ + (tv2.tv_usec - tv1.tv_usec);
+ }
+
+ /* Calculate normal memory copy throughput by average */
+ tmp = 0;
+ for (i = 0; i < PERF_TEST_ITERATION; i++)
+ tmp += latency[0][i];
+ throughput[0][0] = (tmp != 0) ? size*PERF_TEST_ITERATION/tmp : 0;
+
+ /* Calculate share memory copy throughput by average */
+ tmp = 0;
+ for (i = 0; i < PERF_TEST_ITERATION; i++)
+ tmp += latency[1][i];
+ throughput[0][1] = (tmp != 0) ? size*PERF_TEST_ITERATION/tmp : 0;
+
+ /* Calculate normal memory read throughput by average */
+ tmp = 0;
+ for (i = 0; i < PERF_TEST_ITERATION; i++)
+ tmp += latency[2][i];
+ throughput[1][0] = (tmp != 0) ?
+ size*PERF_TEST_ITERATION*MEM_READ_ITERATION/tmp : 0;
+
+ /* Calculate share memory read throughput by average */
+ tmp = 0;
+ for (i = 0; i < PERF_TEST_ITERATION; i++)
+ tmp += latency[3][i];
+ throughput[1][1] = (tmp != 0) ?
+ size*PERF_TEST_ITERATION*MEM_READ_ITERATION/tmp : 0;
+
+ /* Calculate normal memory write throughput by average */
+ tmp = 0;
+ for (i = 0; i < PERF_TEST_ITERATION; i++)
+ tmp += latency[4][i];
+ throughput[2][0] = (tmp != 0) ?
+ size*PERF_TEST_ITERATION/tmp : 0;
+
+ /* Calculate share memory write throughput by average */
+ tmp = 0;
+ for (i = 0; i < PERF_TEST_ITERATION; i++)
+ tmp += latency[5][i];
+ throughput[2][1] = (tmp != 0) ?
+ size*PERF_TEST_ITERATION/tmp : 0;
+
+ kfree(test_data);
+ kfree(source_data);
+
+ snprintf(g_perf_test_result, sizeof(g_perf_test_result),
+ "cpy(%d,%d)/read(%d,%d)/write(%d,%d)",
+ throughput[0][0], throughput[0][1], throughput[1][0],
+ throughput[1][1], throughput[2][0], throughput[2][1]);
+
+ return ret;
+}
+
+int hab_perf_test(long testId)
+{
+ int ret;
+
+ switch (testId) {
+ case HAB_SHMM_THGPUT:
+ ret = hab_shmm_throughput_test();
+ break;
+ default:
+ pr_err("Invalid performance test ID %ld\n", testId);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int kick_hab_perf_test(const char *val, struct kernel_param *kp);
+static int get_hab_perf_result(char *buffer, struct kernel_param *kp);
+
+module_param_call(perf_test, kick_hab_perf_test, get_hab_perf_result,
+ NULL, S_IRUSR | S_IWUSR);
+
+static int kick_hab_perf_test(const char *val, struct kernel_param *kp)
+{
+ long testId;
+ int err = kstrtol(val, 10, &testId);
+
+ if (err)
+ return err;
+ memset(g_perf_test_result, 0, sizeof(g_perf_test_result));
+ return hab_perf_test(testId);
+}
+
+static int get_hab_perf_result(char *buffer, struct kernel_param *kp)
+{
+ return strlcpy(buffer, g_perf_test_result,
+ strlen(g_perf_test_result)+1);
+}
diff --git a/drivers/soc/qcom/hab/khab_test.h b/drivers/soc/qcom/hab/khab_test.h
new file mode 100644
index 000000000000..bc2080ed08c7
--- /dev/null
+++ b/drivers/soc/qcom/hab/khab_test.h
@@ -0,0 +1,18 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __KHAB_TEST_H
+#define __KHAB_TEST_H
+
+int hab_perf_test(long testId);
+
+#endif /* __KHAB_TEST_H */
diff --git a/drivers/soc/qcom/hab/qvm_comm.c b/drivers/soc/qcom/hab/qvm_comm.c
index 41e34be9ac21..04381232b26a 100644
--- a/drivers/soc/qcom/hab/qvm_comm.c
+++ b/drivers/soc/qcom/hab/qvm_comm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -43,7 +43,6 @@ int physical_channel_send(struct physical_channel *pchan,
int sizebytes = HAB_HEADER_GET_SIZE(*header);
struct qvm_channel *dev = (struct qvm_channel *)pchan->hyp_data;
int total_size = sizeof(*header) + sizebytes;
- struct timeval tv;
if (total_size > dev->pipe_ep->tx_info.sh_buf->size)
return -EINVAL; /* too much data for ring */
@@ -67,9 +66,13 @@ int physical_channel_send(struct physical_channel *pchan,
}
if (HAB_HEADER_GET_TYPE(*header) == HAB_PAYLOAD_TYPE_PROFILE) {
+ struct timeval tv;
+ struct habmm_xing_vm_stat *pstat =
+ (struct habmm_xing_vm_stat *)payload;
+
do_gettimeofday(&tv);
- ((uint64_t *)payload)[0] = tv.tv_sec;
- ((uint64_t *)payload)[1] = tv.tv_usec;
+ pstat->tx_sec = tv.tv_sec;
+ pstat->tx_usec = tv.tv_usec;
}
if (sizebytes) {
@@ -102,7 +105,7 @@ void physical_channel_rx_dispatch(unsigned long data)
break; /* no data available */
if (header.signature != HAB_HEAD_SIGNATURE) {
- pr_err("HAB signature mismatch, expect %X, received %X, id_type_size %X, session %X, sequence %X\n",
+ pr_err("HAB signature mismatch expect %X received %X, id_type_size %X session %X sequence %X\n",
HAB_HEAD_SIGNATURE, header.signature,
header.id_type_size,
header.session_id,
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index 848328a1a4ea..4dfb533b724a 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -1181,8 +1181,9 @@ bool icnss_is_fw_down(void)
{
if (!penv)
return false;
- else
- return test_bit(ICNSS_FW_DOWN, &penv->state);
+
+ return test_bit(ICNSS_FW_DOWN, &penv->state) ||
+ test_bit(ICNSS_PD_RESTART, &penv->state);
}
EXPORT_SYMBOL(icnss_is_fw_down);
diff --git a/drivers/soc/qcom/ipc_router_mhi_xprt.c b/drivers/soc/qcom/ipc_router_mhi_xprt.c
index adf4078818a5..104053406ea9 100644
--- a/drivers/soc/qcom/ipc_router_mhi_xprt.c
+++ b/drivers/soc/qcom/ipc_router_mhi_xprt.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -424,7 +424,7 @@ static int ipc_router_mhi_write(void *data,
struct rr_packet *pkt = (struct rr_packet *)data;
struct sk_buff *ipc_rtr_pkt;
struct rr_packet *cloned_pkt;
- int rc;
+ int rc = 0;
struct ipc_router_mhi_xprt *mhi_xprtp =
container_of(xprt, struct ipc_router_mhi_xprt, xprt);
@@ -948,7 +948,7 @@ error:
*/
static int ipc_router_mhi_xprt_probe(struct platform_device *pdev)
{
- int rc;
+ int rc = -ENODEV;
struct ipc_router_mhi_xprt_config mhi_xprt_config;
if (pdev && pdev->dev.of_node) {
diff --git a/drivers/soc/qcom/ipc_router_smd_xprt.c b/drivers/soc/qcom/ipc_router_smd_xprt.c
index a94e81556027..6e17f0b9cc31 100644
--- a/drivers/soc/qcom/ipc_router_smd_xprt.c
+++ b/drivers/soc/qcom/ipc_router_smd_xprt.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2015, 2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -294,8 +294,10 @@ static void smd_xprt_read_data(struct work_struct *work)
spin_lock_irqsave(&smd_xprtp->ss_reset_lock, flags);
if (smd_xprtp->ss_reset) {
spin_unlock_irqrestore(&smd_xprtp->ss_reset_lock, flags);
- if (smd_xprtp->in_pkt)
+ if (smd_xprtp->in_pkt) {
release_pkt(smd_xprtp->in_pkt);
+ smd_xprtp->in_pkt = NULL;
+ }
smd_xprtp->is_partial_in_pkt = 0;
IPC_RTR_ERR("%s: %s channel reset\n",
__func__, smd_xprtp->xprt.name);
@@ -348,6 +350,7 @@ static void smd_xprt_read_data(struct work_struct *work)
__func__, smd_xprtp->xprt.name);
kfree_skb(ipc_rtr_pkt);
release_pkt(smd_xprtp->in_pkt);
+ smd_xprtp->in_pkt = NULL;
smd_xprtp->is_partial_in_pkt = 0;
return;
}
diff --git a/drivers/soc/qcom/msm_performance.c b/drivers/soc/qcom/msm_performance.c
index 1046af031838..1857d369bc94 100644
--- a/drivers/soc/qcom/msm_performance.c
+++ b/drivers/soc/qcom/msm_performance.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -2379,6 +2379,7 @@ end:
static void __ref try_hotplug(struct cluster *data)
{
unsigned int i;
+ struct device *dev;
if (!clusters_inited)
return;
@@ -2405,7 +2406,8 @@ static void __ref try_hotplug(struct cluster *data)
pr_debug("msm_perf: Offlining CPU%d\n", i);
cpumask_set_cpu(i, data->offlined_cpus);
lock_device_hotplug();
- if (device_offline(get_cpu_device(i))) {
+ dev = get_cpu_device(i);
+ if (!dev || device_offline(dev)) {
cpumask_clear_cpu(i, data->offlined_cpus);
pr_debug("msm_perf: Offlining CPU%d failed\n",
i);
@@ -2423,7 +2425,8 @@ static void __ref try_hotplug(struct cluster *data)
continue;
pr_debug("msm_perf: Onlining CPU%d\n", i);
lock_device_hotplug();
- if (device_online(get_cpu_device(i))) {
+ dev = get_cpu_device(i);
+ if (!dev || device_online(dev)) {
pr_debug("msm_perf: Onlining CPU%d failed\n",
i);
unlock_device_hotplug();
@@ -2442,11 +2445,19 @@ static void __ref try_hotplug(struct cluster *data)
static void __ref release_cluster_control(struct cpumask *off_cpus)
{
int cpu;
+ struct device *dev;
for_each_cpu(cpu, off_cpus) {
pr_debug("msm_perf: Release CPU %d\n", cpu);
lock_device_hotplug();
- if (!device_online(get_cpu_device(cpu)))
+ dev = get_cpu_device(cpu);
+ if (!dev) {
+ pr_debug("msm_perf: Failed to get CPU%d\n",
+ cpu);
+ unlock_device_hotplug();
+ continue;
+ }
+ if (!device_online(dev))
cpumask_clear_cpu(cpu, off_cpus);
unlock_device_hotplug();
}
diff --git a/drivers/soc/qcom/pasr.c b/drivers/soc/qcom/pasr.c
new file mode 100644
index 000000000000..da85dd50529e
--- /dev/null
+++ b/drivers/soc/qcom/pasr.c
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/memory.h>
+#include <linux/module.h>
+#include <soc/qcom/rpm-smd.h>
+
+struct memory_refresh_request {
+ u64 start; /* Lower bit signifies action
+ * 0 - disable self-refresh
+ * 1 - enable self-refresh
+ * upper bits are for base address
+ */
+ size_t size; /* size of memory region */
+};
+#define RPM_DDR_REQ 0x726464
+
+static void mem_region_refresh_control(unsigned long pfn,
+ unsigned long nr_pages, bool enable)
+{
+ struct memory_refresh_request mem_req;
+ struct msm_rpm_kvp rpm_kvp;
+ int ret;
+
+ mem_req.start = enable;
+ mem_req.start |= pfn << PAGE_SHIFT;
+ mem_req.size = nr_pages * PAGE_SIZE;
+
+ rpm_kvp.key = RPM_DDR_REQ;
+ rpm_kvp.data = (void *)&mem_req;
+ rpm_kvp.length = sizeof(mem_req);
+
+ ret = msm_rpm_send_message(MSM_RPM_CTX_ACTIVE_SET, RPM_DDR_REQ, 0,
+ &rpm_kvp, 1);
+ if (ret)
+ pr_err("PASR: Failed to send rpm message\n");
+}
+
+static int pasr_callback(struct notifier_block *self,
+ unsigned long action, void *arg)
+{
+ struct memory_notify *mn = arg;
+ unsigned long start, end;
+
+ start = SECTION_ALIGN_DOWN(mn->start_pfn);
+ end = SECTION_ALIGN_UP(mn->start_pfn + mn->nr_pages);
+
+ if ((start != mn->start_pfn) || (end != mn->start_pfn + mn->nr_pages)) {
+ pr_err("PASR: %s pfn not aligned to section\n", __func__);
+ pr_err("PASR: start pfn = %lu end pfn = %lu\n",
+ mn->start_pfn, mn->start_pfn + mn->nr_pages);
+ return -EINVAL;
+ }
+
+ switch (action) {
+ case MEM_GOING_ONLINE:
+ pr_debug("PASR: MEM_GOING_ONLINE : start = %lx end = %lx",
+ mn->start_pfn << PAGE_SHIFT,
+ (mn->start_pfn + mn->nr_pages) << PAGE_SHIFT);
+ mem_region_refresh_control(mn->start_pfn, mn->nr_pages, true);
+ break;
+ case MEM_OFFLINE:
+ pr_debug("PASR: MEM_OFFLINE: start = %lx end = %lx",
+ mn->start_pfn << PAGE_SHIFT,
+ (mn->start_pfn + mn->nr_pages) << PAGE_SHIFT);
+ mem_region_refresh_control(mn->start_pfn, mn->nr_pages, false);
+ break;
+ case MEM_CANCEL_ONLINE:
+ pr_debug("PASR: MEM_CANCEL_ONLINE: start = %lx end = %lx",
+ mn->start_pfn << PAGE_SHIFT,
+ (mn->start_pfn + mn->nr_pages) << PAGE_SHIFT);
+ mem_region_refresh_control(mn->start_pfn, mn->nr_pages, false);
+ break;
+ default:
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static int __init pasr_module_init(void)
+{
+ return hotplug_memory_notifier(pasr_callback, 0);
+}
+late_initcall(pasr_module_init);
diff --git a/drivers/soc/qcom/qdsp6v2/Makefile b/drivers/soc/qcom/qdsp6v2/Makefile
index 90feb8b659d1..250cc88ba32d 100644
--- a/drivers/soc/qcom/qdsp6v2/Makefile
+++ b/drivers/soc/qcom/qdsp6v2/Makefile
@@ -10,3 +10,5 @@ obj-$(CONFIG_MSM_QDSP6_SSR) += audio_ssr.o
obj-$(CONFIG_MSM_QDSP6_PDR) += audio_pdr.o
obj-$(CONFIG_MSM_QDSP6_NOTIFIER) += audio_notifier.o
obj-$(CONFIG_MSM_CDSP_LOADER) += cdsp-loader.o
+obj-$(CONFIG_EXT_ANC) += sdsp-anc.o audio_anc.o audio-anc-dev-mgr.o
+obj-$(CONFIG_MSM_LPASS_RESOURCE_MANAGER) += lpass_resource_mgr.o \ No newline at end of file
diff --git a/drivers/soc/qcom/qdsp6v2/apr.c b/drivers/soc/qcom/qdsp6v2/apr.c
index b1afd02b49bf..8cd86915be98 100644
--- a/drivers/soc/qcom/qdsp6v2/apr.c
+++ b/drivers/soc/qcom/qdsp6v2/apr.c
@@ -1,4 +1,5 @@
-/* Copyright (c) 2010-2014, 2016 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2014, 2016, 2018 The Linux Foundation.
+ * All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -44,7 +45,8 @@ static void *apr_pkt_ctx;
static wait_queue_head_t dsp_wait;
static wait_queue_head_t modem_wait;
static bool is_modem_up;
-static bool is_initial_boot;
+static bool is_initial_modem_boot;
+static bool is_initial_adsp_boot;
/* Subsystem restart: QDSP6 data, functions */
static struct workqueue_struct *apr_reset_workqueue;
static void apr_reset_deregister(struct work_struct *work);
@@ -209,6 +211,16 @@ static struct apr_svc_table svc_tbl_voice[] = {
},
};
+static const struct apr_svc_table svc_tbl_sdsp[] = {
+ {
+ /* Micro Audio Service */
+ .name = "MAS",
+ .idx = 0,
+ .id = APR_SVC_MAS,
+ .client_id = APR_CLIENT_AUDIO,
+ },
+};
+
enum apr_subsys_state apr_get_modem_state(void)
{
return atomic_read(&q6.modem_state);
@@ -444,6 +456,9 @@ struct apr_svc *apr_register(char *dest, char *svc_name, apr_fn svc_fn,
*/
can_open_channel = false;
domain_id = APR_DOMAIN_MODEM;
+ } else if (!strcmp(dest, "SDSP")) {
+ domain_id = APR_DOMAIN_SDSP;
+ pr_debug("APR: SDSP DOMAIN_ID %d\n", domain_id);
} else {
pr_err("APR: wrong destination\n");
goto done;
@@ -472,6 +487,8 @@ struct apr_svc *apr_register(char *dest, char *svc_name, apr_fn svc_fn,
}
}
pr_debug("%s: modem Up\n", __func__);
+ } else if (dest_id == APR_DEST_DSPS) {
+ pr_debug("%s: Sensor DSP Up\n", __func__);
}
if (apr_get_svc(svc_name, domain_id, &client_id, &svc_idx, &svc_id)) {
@@ -624,6 +641,8 @@ void apr_cb_func(void *buf, int len, void *priv)
pr_err("APR: Wrong svc :%d\n", svc);
return;
}
+ } else if (hdr->src_domain == APR_DOMAIN_SDSP) {
+ clnt = APR_CLIENT_AUDIO;
} else {
pr_err("APR: Pkt from wrong source: %d\n", hdr->src_domain);
return;
@@ -700,6 +719,9 @@ int apr_get_svc(const char *svc_name, int domain_id, int *client_id,
if ((domain_id == APR_DOMAIN_ADSP)) {
tbl = (struct apr_svc_table *)&svc_tbl_qdsp6;
size = ARRAY_SIZE(svc_tbl_qdsp6);
+ } else if (domain_id == APR_DOMAIN_SDSP) {
+ tbl = (struct apr_svc_table *)&svc_tbl_sdsp;
+ size = ARRAY_SIZE(svc_tbl_sdsp);
} else {
tbl = (struct apr_svc_table *)&svc_tbl_voice;
size = ARRAY_SIZE(svc_tbl_voice);
@@ -888,21 +910,28 @@ static int apr_notifier_service_cb(struct notifier_block *this,
* recovery notifications during initial boot
* up since everything is expected to be down.
*/
- if (is_initial_boot) {
- is_initial_boot = false;
- break;
- }
- if (cb_data->domain == AUDIO_NOTIFIER_MODEM_DOMAIN)
+ if (cb_data->domain == AUDIO_NOTIFIER_MODEM_DOMAIN) {
+ if (is_initial_modem_boot) {
+ is_initial_modem_boot = false;
+ break;
+ }
apr_modem_down(opcode);
- else
+ } else {
+ if (is_initial_adsp_boot) {
+ is_initial_adsp_boot = false;
+ break;
+ }
apr_adsp_down(opcode);
+ }
break;
case AUDIO_NOTIFIER_SERVICE_UP:
- is_initial_boot = false;
- if (cb_data->domain == AUDIO_NOTIFIER_MODEM_DOMAIN)
+ if (cb_data->domain == AUDIO_NOTIFIER_MODEM_DOMAIN) {
+ is_initial_modem_boot = false;
apr_modem_up();
- else
+ } else {
+ is_initial_adsp_boot = false;
apr_adsp_up();
+ }
break;
default:
break;
@@ -944,7 +973,8 @@ static int __init apr_init(void)
if (!apr_pkt_ctx)
pr_err("%s: Unable to create ipc log context\n", __func__);
- is_initial_boot = true;
+ is_initial_modem_boot = true;
+ is_initial_adsp_boot = true;
subsys_notif_register("apr_adsp", AUDIO_NOTIFIER_ADSP_DOMAIN,
&adsp_service_nb);
subsys_notif_register("apr_modem", AUDIO_NOTIFIER_MODEM_DOMAIN,
diff --git a/drivers/soc/qcom/qdsp6v2/apr_tal.c b/drivers/soc/qcom/qdsp6v2/apr_tal.c
index 6cffe7be655a..3884667cc12c 100644
--- a/drivers/soc/qcom/qdsp6v2/apr_tal.c
+++ b/drivers/soc/qcom/qdsp6v2/apr_tal.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2011, 2013-2014, 2016 The Linux Foundation.
+/* Copyright (c) 2010-2011, 2013-2014, 2016, 2018 The Linux Foundation.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
@@ -38,6 +38,14 @@ static char *svc_names[APR_DEST_MAX][APR_CLIENT_MAX] = {
"apr_audio_svc",
"apr_voice_svc",
},
+ {
+ "",
+ "",
+ },
+ {
+ "apr_apps_sdsp",
+ "apr_apps_sdsp",
+ },
};
struct apr_svc_ch_dev apr_svc_ch[APR_DL_MAX][APR_DEST_MAX][APR_CLIENT_MAX];
@@ -162,7 +170,8 @@ struct apr_svc_ch_dev *apr_tal_open(uint32_t clnt, uint32_t dest,
if ((clnt >= APR_CLIENT_MAX) || (dest >= APR_DEST_MAX) ||
(dl >= APR_DL_MAX)) {
- pr_err("apr_tal: Invalid params\n");
+ pr_err("apr_tal: Invalid params clnt %d dest %d dl %d\n",
+ clnt, dest, dl);
return NULL;
}
@@ -184,10 +193,12 @@ struct apr_svc_ch_dev *apr_tal_open(uint32_t clnt, uint32_t dest,
pr_debug("apr_tal:Wakeup done\n");
apr_svc_ch[dl][dest][clnt].dest_state = 0;
}
+
rc = smd_named_open_on_edge(svc_names[dest][clnt], dest,
- &apr_svc_ch[dl][dest][clnt].ch,
- &apr_svc_ch[dl][dest][clnt],
- apr_tal_notify);
+ &apr_svc_ch[dl][dest][clnt].ch,
+ &apr_svc_ch[dl][dest][clnt],
+ apr_tal_notify);
+
if (rc < 0) {
pr_err("apr_tal: smd_open failed %s\n",
svc_names[dest][clnt]);
@@ -256,6 +267,12 @@ static int apr_smd_probe(struct platform_device *pdev)
clnt = APR_CLIENT_AUDIO;
apr_svc_ch[APR_DL_SMD][dest][clnt].dest_state = 1;
wake_up(&apr_svc_ch[APR_DL_SMD][dest][clnt].dest);
+ } else if (pdev->id == APR_DEST_DSPS) {
+ pr_info("apr_tal:Sensor DSP Is Up\n");
+ dest = APR_DEST_DSPS;
+ clnt = APR_CLIENT_AUDIO;
+ apr_svc_ch[APR_DL_SMD][dest][clnt].dest_state = 1;
+ wake_up(&apr_svc_ch[APR_DL_SMD][dest][clnt].dest);
} else
pr_err("apr_tal:Invalid Dest Id: %d\n", pdev->id);
@@ -278,6 +295,14 @@ static struct platform_driver apr_modem_driver = {
},
};
+static struct platform_driver apr_sdsp_driver = {
+ .probe = apr_smd_probe,
+ .driver = {
+ .name = "apr_apps_sdsp",
+ .owner = THIS_MODULE,
+ },
+};
+
static int __init apr_tal_init(void)
{
int i, j, k;
@@ -293,6 +318,7 @@ static int __init apr_tal_init(void)
}
platform_driver_register(&apr_q6_driver);
platform_driver_register(&apr_modem_driver);
+ platform_driver_register(&apr_sdsp_driver);
return 0;
}
device_initcall(apr_tal_init);
diff --git a/drivers/soc/qcom/qdsp6v2/apr_v2.c b/drivers/soc/qcom/qdsp6v2/apr_v2.c
index 037fb3327ef0..d42f2ff5912e 100644
--- a/drivers/soc/qcom/qdsp6v2/apr_v2.c
+++ b/drivers/soc/qcom/qdsp6v2/apr_v2.c
@@ -37,6 +37,8 @@ uint16_t apr_get_data_src(struct apr_hdr *hdr)
return APR_DEST_MODEM;
else if (hdr->src_domain == APR_DOMAIN_ADSP)
return APR_DEST_QDSP6;
+ else if (hdr->src_domain == APR_DOMAIN_SDSP)
+ return APR_DEST_DSPS;
else {
pr_err("APR: Pkt from wrong source: %d\n", hdr->src_domain);
return APR_DEST_MAX; /*RETURN INVALID VALUE*/
@@ -47,6 +49,8 @@ int apr_get_dest_id(char *dest)
{
if (!strcmp(dest, "ADSP"))
return APR_DEST_QDSP6;
+ else if (!strcmp(dest, "SDSP"))
+ return APR_DEST_DSPS;
else
return APR_DEST_MODEM;
}
diff --git a/drivers/soc/qcom/qdsp6v2/apr_vm.c b/drivers/soc/qcom/qdsp6v2/apr_vm.c
index 56592ac91e1b..bd555b6e6f3b 100644
--- a/drivers/soc/qcom/qdsp6v2/apr_vm.c
+++ b/drivers/soc/qcom/qdsp6v2/apr_vm.c
@@ -529,25 +529,23 @@ static int apr_vm_cb_thread(void *data)
{
uint32_t apr_rx_buf_len;
struct aprv2_vm_ack_rx_pkt_available_t apr_ack;
+ unsigned long delay = jiffies + (HZ / 2);
int status = 0;
int ret = 0;
while (1) {
- apr_rx_buf_len = sizeof(apr_rx_buf);
- ret = habmm_socket_recv(hab_handle_rx,
- (void *)&apr_rx_buf,
- &apr_rx_buf_len,
- 0xFFFFFFFF,
- 0);
+ do {
+ apr_rx_buf_len = sizeof(apr_rx_buf);
+ ret = habmm_socket_recv(hab_handle_rx,
+ (void *)&apr_rx_buf,
+ &apr_rx_buf_len,
+ 0xFFFFFFFF,
+ 0);
+ } while (time_before(jiffies, delay) && (ret == -EAGAIN) &&
+ (apr_rx_buf_len == 0));
if (ret) {
pr_err("%s: habmm_socket_recv failed %d\n",
__func__, ret);
- /*
- * TODO: depends on the HAB error code,
- * may need to implement
- * a retry mechanism.
- * break if recv failed ?
- */
break;
}
diff --git a/drivers/soc/qcom/qdsp6v2/audio-anc-dev-mgr.c b/drivers/soc/qcom/qdsp6v2/audio-anc-dev-mgr.c
new file mode 100644
index 000000000000..75b114e6905c
--- /dev/null
+++ b/drivers/soc/qcom/qdsp6v2/audio-anc-dev-mgr.c
@@ -0,0 +1,1170 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/wcd9xxx/core.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/of_device.h>
+#include <linux/clk/msm-clk.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+#include <sound/apr_audio-v2.h>
+#include <sound/q6afe-v2.h>
+#include <sound/msm-dai-q6-v2.h>
+#include <linux/qdsp6v2/audio-anc-dev-mgr.h>
+#include <linux/qdsp6v2/sdsp_anc.h>
+
+#define LPM_START_ADDR (0x9120000 + 60*1024)
+#define LPM_LENGTH (4*1024)
+
+enum {
+ ANC_DEV_PORT_REFS = 0,
+ ANC_DEV_PORT_ANC_SPKR,
+ ANC_DEV_PORT_ANC_MIC,
+ ANC_DEV_PORT_MAX,
+};
+
+struct anc_tdm_port_cfg_info {
+ u16 port_id;
+ struct afe_param_id_tdm_cfg port_cfg;
+};
+
+struct anc_tdm_group_set_info {
+ struct afe_param_id_group_device_tdm_cfg gp_cfg;
+ uint32_t num_tdm_group_ports;
+ struct afe_clk_set tdm_clk_set;
+ uint32_t clk_mode;
+};
+
+struct anc_dev_drv_info {
+ uint32_t state;
+ uint32_t rpm;
+ uint32_t bypass_mode;
+ uint32_t algo_module_id;
+};
+
+struct anc_dev_port_cfg_info {
+ uint32_t port_id;
+ uint32_t sample_rate;
+ uint32_t num_channels;
+ uint32_t bit_width;
+};
+
+static struct aud_msvc_param_id_dev_anc_mic_spkr_layout_info
+ anc_mic_spkr_layout;
+
+static struct anc_dev_port_cfg_info anc_port_cfg[ANC_DEV_PORT_MAX];
+
+static struct anc_tdm_group_set_info anc_dev_tdm_gp_set[IDX_GROUP_TDM_MAX];
+
+static struct anc_tdm_port_cfg_info anc_dev_tdm_port_cfg[IDX_TDM_MAX];
+
+static struct anc_dev_drv_info this_anc_dev_info;
+
+static int anc_dev_get_free_tdm_gp_cfg_idx(void)
+{
+ int idx = -1;
+ int i;
+
+ for (i = 0; i < IDX_GROUP_TDM_MAX; i++) {
+ if (anc_dev_tdm_gp_set[i].gp_cfg.group_id == 0) {
+ idx = i;
+ break;
+ }
+ }
+
+ return idx;
+}
+
+static int anc_dev_get_free_tdm_port_cfg_idx(void)
+{
+ int idx = -1;
+ int i;
+
+ for (i = 0; i < IDX_TDM_MAX; i++) {
+ if (anc_dev_tdm_port_cfg[i].port_id == 0) {
+ idx = i;
+ break;
+ }
+ }
+
+ return idx;
+}
+
+static u16 get_group_id_from_port_id(int32_t port_id)
+{
+ u16 gp_id = AFE_PORT_INVALID;
+
+ switch (port_id) {
+ case AFE_PORT_ID_PRIMARY_TDM_RX:
+ case AFE_PORT_ID_PRIMARY_TDM_RX_1:
+ case AFE_PORT_ID_PRIMARY_TDM_RX_2:
+ case AFE_PORT_ID_PRIMARY_TDM_RX_3:
+ case AFE_PORT_ID_PRIMARY_TDM_RX_4:
+ case AFE_PORT_ID_PRIMARY_TDM_RX_5:
+ case AFE_PORT_ID_PRIMARY_TDM_RX_6:
+ case AFE_PORT_ID_PRIMARY_TDM_RX_7:
+ gp_id = AFE_GROUP_DEVICE_ID_PRIMARY_TDM_RX;
+ break;
+ case AFE_PORT_ID_SECONDARY_TDM_RX:
+ case AFE_PORT_ID_SECONDARY_TDM_RX_1:
+ case AFE_PORT_ID_SECONDARY_TDM_RX_2:
+ case AFE_PORT_ID_SECONDARY_TDM_RX_3:
+ case AFE_PORT_ID_SECONDARY_TDM_RX_4:
+ case AFE_PORT_ID_SECONDARY_TDM_RX_5:
+ case AFE_PORT_ID_SECONDARY_TDM_RX_6:
+ case AFE_PORT_ID_SECONDARY_TDM_RX_7:
+ gp_id = AFE_GROUP_DEVICE_ID_SECONDARY_TDM_RX;
+ break;
+ case AFE_PORT_ID_TERTIARY_TDM_RX:
+ case AFE_PORT_ID_TERTIARY_TDM_RX_1:
+ case AFE_PORT_ID_TERTIARY_TDM_RX_2:
+ case AFE_PORT_ID_TERTIARY_TDM_RX_3:
+ case AFE_PORT_ID_TERTIARY_TDM_RX_4:
+ case AFE_PORT_ID_TERTIARY_TDM_RX_5:
+ case AFE_PORT_ID_TERTIARY_TDM_RX_6:
+ case AFE_PORT_ID_TERTIARY_TDM_RX_7:
+ gp_id = AFE_GROUP_DEVICE_ID_TERTIARY_TDM_RX;
+ break;
+ case AFE_PORT_ID_QUATERNARY_TDM_RX:
+ case AFE_PORT_ID_QUATERNARY_TDM_RX_1:
+ case AFE_PORT_ID_QUATERNARY_TDM_RX_2:
+ case AFE_PORT_ID_QUATERNARY_TDM_RX_3:
+ case AFE_PORT_ID_QUATERNARY_TDM_RX_4:
+ case AFE_PORT_ID_QUATERNARY_TDM_RX_5:
+ case AFE_PORT_ID_QUATERNARY_TDM_RX_6:
+ case AFE_PORT_ID_QUATERNARY_TDM_RX_7:
+ gp_id = AFE_GROUP_DEVICE_ID_QUATERNARY_TDM_RX;
+ break;
+ default:
+ break;
+ }
+
+ return gp_id;
+}
+
+static int anc_dev_get_matched_tdm_gp_cfg_idx(u16 gp_id)
+{
+ int idx = -1;
+ int i;
+
+ for (i = 0; i < IDX_GROUP_TDM_MAX; i++) {
+ if (anc_dev_tdm_gp_set[i].gp_cfg.group_id == gp_id) {
+ idx = i;
+ break;
+ }
+ }
+
+ return idx;
+}
+
+static int anc_dev_get_matched_tdm_port_cfg_idx(u16 port_id)
+{
+ int idx = -1;
+ int i;
+
+ for (i = 0; i < IDX_TDM_MAX; i++) {
+ if (anc_dev_tdm_port_cfg[i].port_id == port_id) {
+ idx = i;
+ break;
+ }
+ }
+
+ return idx;
+}
+
+static int anc_dev_tdm_set_clk(
+ struct anc_tdm_group_set_info *gp_set_data,
+ u16 port_id, bool enable)
+{
+ int rc = 0;
+
+ switch (gp_set_data->gp_cfg.group_id) {
+ case AFE_GROUP_DEVICE_ID_PRIMARY_TDM_RX:
+ case AFE_GROUP_DEVICE_ID_PRIMARY_TDM_TX:
+ if (gp_set_data->clk_mode) {
+ gp_set_data->tdm_clk_set.clk_id =
+ Q6AFE_LPASS_CLK_ID_PRI_TDM_IBIT;
+ } else
+ gp_set_data->tdm_clk_set.clk_id =
+ Q6AFE_LPASS_CLK_ID_PRI_TDM_EBIT;
+ break;
+ case AFE_GROUP_DEVICE_ID_SECONDARY_TDM_RX:
+ case AFE_GROUP_DEVICE_ID_SECONDARY_TDM_TX:
+ if (gp_set_data->clk_mode) {
+ gp_set_data->tdm_clk_set.clk_id =
+ Q6AFE_LPASS_CLK_ID_SEC_TDM_IBIT;
+ } else
+ gp_set_data->tdm_clk_set.clk_id =
+ Q6AFE_LPASS_CLK_ID_SEC_TDM_EBIT;
+ break;
+ case AFE_GROUP_DEVICE_ID_TERTIARY_TDM_RX:
+ case AFE_GROUP_DEVICE_ID_TERTIARY_TDM_TX:
+ if (gp_set_data->clk_mode) {
+ gp_set_data->tdm_clk_set.clk_id =
+ Q6AFE_LPASS_CLK_ID_TER_TDM_IBIT;
+ } else
+ gp_set_data->tdm_clk_set.clk_id =
+ Q6AFE_LPASS_CLK_ID_TER_TDM_EBIT;
+ break;
+ case AFE_GROUP_DEVICE_ID_QUATERNARY_TDM_RX:
+ case AFE_GROUP_DEVICE_ID_QUATERNARY_TDM_TX:
+ if (gp_set_data->clk_mode) {
+ gp_set_data->tdm_clk_set.clk_id =
+ Q6AFE_LPASS_CLK_ID_QUAD_TDM_IBIT;
+ } else
+ gp_set_data->tdm_clk_set.clk_id =
+ Q6AFE_LPASS_CLK_ID_QUAD_TDM_EBIT;
+ break;
+ default:
+ pr_err("%s: port id 0x%x not supported\n",
+ __func__, port_id);
+ return -EINVAL;
+ }
+ gp_set_data->tdm_clk_set.enable = enable;
+
+ rc = afe_set_lpass_clock_v2(port_id,
+ &gp_set_data->tdm_clk_set);
+
+ if (rc < 0)
+ pr_err("%s: afe lpass clock failed, err:%d\n",
+ __func__, rc);
+
+ return rc;
+}
+
+static int anc_dev_port_start(int32_t which_port)
+{
+ int rc = 0;
+ int pt_idx;
+
+ struct afe_tdm_port_config tdm_cfg;
+
+ pt_idx =
+ anc_dev_get_matched_tdm_port_cfg_idx(anc_port_cfg[which_port].port_id);
+
+ if (pt_idx == -1) {
+ rc = -EINVAL;
+ goto rtn;
+ }
+
+ tdm_cfg.tdm = anc_dev_tdm_port_cfg[pt_idx].port_cfg;
+
+ tdm_cfg.tdm.num_channels = anc_port_cfg[which_port].num_channels;
+ tdm_cfg.tdm.sample_rate = anc_port_cfg[which_port].sample_rate;
+ tdm_cfg.tdm.bit_width = anc_port_cfg[which_port].bit_width;
+
+ tdm_cfg.tdm.nslots_per_frame = anc_port_cfg[which_port].num_channels;
+ tdm_cfg.tdm.slot_width = anc_port_cfg[which_port].bit_width;
+ tdm_cfg.tdm.slot_mask =
+ ((1 << anc_port_cfg[which_port].num_channels) - 1);
+
+ pr_debug("%s: port_id %x num_channels %x bit_width %x sample_rate %x nslots_per_frame %x slot_width %x slot_mask %x!\n",
+ __func__,
+ anc_port_cfg[which_port].port_id,
+ tdm_cfg.tdm.num_channels,
+ tdm_cfg.tdm.bit_width,
+ tdm_cfg.tdm.sample_rate,
+ tdm_cfg.tdm.nslots_per_frame,
+ tdm_cfg.tdm.slot_width,
+ tdm_cfg.tdm.slot_mask);
+
+ rc = anc_if_tdm_port_start(anc_port_cfg[which_port].port_id,
+ &tdm_cfg);
+ if (IS_ERR_VALUE(rc)) {
+ pr_err("%s: fail to open ANC port from SDSP 0x%x\n",
+ __func__, anc_port_cfg[which_port].port_id);
+ goto rtn;
+ }
+
+rtn:
+ return rc;
+}
+
+static int anc_dev_port_stop(int32_t which_port)
+{
+ int rc = 0;
+
+ rc = anc_if_tdm_port_stop(anc_port_cfg[which_port].port_id);
+ if (IS_ERR_VALUE(rc)) {
+ pr_err("%s: fail to stop ANC port from SDSP 0x%x\n",
+ __func__, anc_port_cfg[which_port].port_id);
+ }
+
+ return rc;
+}
+
+int msm_anc_dev_set_info(void *info_p, int32_t anc_cmd)
+{
+ int rc = 0;
+
+ switch (anc_cmd) {
+ case ANC_CMD_RPM: {
+ struct audio_anc_rpm_info *rpm_info_p =
+ (struct audio_anc_rpm_info *)info_p;
+
+ if (this_anc_dev_info.state)
+ rc = anc_if_set_rpm(
+ anc_port_cfg[ANC_DEV_PORT_ANC_SPKR].port_id,
+ rpm_info_p->rpm);
+ else
+ this_anc_dev_info.rpm = 0;
+ break;
+ }
+ case ANC_CMD_BYPASS_MODE: {
+ struct audio_anc_bypass_mode *bypass_mode_p =
+ (struct audio_anc_bypass_mode *)info_p;
+
+ if (this_anc_dev_info.state)
+ rc = anc_if_set_bypass_mode(
+ anc_port_cfg[ANC_DEV_PORT_ANC_SPKR].port_id,
+ bypass_mode_p->mode);
+ else
+ this_anc_dev_info.bypass_mode = bypass_mode_p->mode;
+ break;
+ }
+ case ANC_CMD_ALGO_MODULE: {
+ struct audio_anc_algo_module_info *module_info_p =
+ (struct audio_anc_algo_module_info *)info_p;
+
+ if (this_anc_dev_info.state)
+ rc = anc_if_set_algo_module_id(
+ anc_port_cfg[ANC_DEV_PORT_ANC_SPKR].port_id,
+ module_info_p->module_id);
+ else
+ this_anc_dev_info.algo_module_id =
+ module_info_p->module_id;
+ break;
+ }
+ }
+
+ return rc;
+}
+
+
+int msm_anc_dev_start(void)
+{
+ int rc = 0;
+ u16 group_id;
+ int gp_idx, pt_idx;
+ union afe_port_group_config anc_dev_gp_cfg;
+ struct afe_tdm_port_config tdm_cfg;
+
+ pr_debug("%s: ANC devices start in!\n", __func__);
+
+ memset(&tdm_cfg, 0, sizeof(tdm_cfg));
+
+ /*
+ * Refs port for ADSP
+ * 1. enable clk
+ * 2. group cfg and enable
+ * 3. Refs port cfg and start
+ */
+
+ group_id =
+ get_group_id_from_port_id(anc_port_cfg[ANC_DEV_PORT_REFS].port_id);
+
+ gp_idx = anc_dev_get_matched_tdm_gp_cfg_idx(group_id);
+
+ if (gp_idx == -1) {
+ rc = -EINVAL;
+ pr_err("%s: anc_dev_get_matched_tdm_gp_cfg_idx() failed with group_id 0x%x\n",
+ __func__, group_id);
+ goto rtn;
+ } else {
+ rc = anc_dev_tdm_set_clk(&anc_dev_tdm_gp_set[gp_idx],
+ (u16)anc_port_cfg[ANC_DEV_PORT_REFS].port_id, true);
+
+ if (IS_ERR_VALUE(rc)) {
+ pr_err("%s: fail to enable AFE clk 0x%x\n",
+ __func__,
+ anc_port_cfg[ANC_DEV_PORT_REFS].port_id);
+ goto rtn;
+ }
+
+ anc_dev_gp_cfg.tdm_cfg = anc_dev_tdm_gp_set[gp_idx].gp_cfg;
+
+ anc_dev_gp_cfg.tdm_cfg.group_device_cfg_minor_version =
+ AFE_API_VERSION_GROUP_DEVICE_TDM_CONFIG;
+ anc_dev_gp_cfg.tdm_cfg.num_channels =
+ anc_port_cfg[ANC_DEV_PORT_REFS].num_channels;
+ anc_dev_gp_cfg.tdm_cfg.bit_width =
+ anc_port_cfg[ANC_DEV_PORT_REFS].bit_width;
+ anc_dev_gp_cfg.tdm_cfg.sample_rate =
+ anc_port_cfg[ANC_DEV_PORT_REFS].sample_rate;
+ anc_dev_gp_cfg.tdm_cfg.nslots_per_frame =
+ anc_port_cfg[ANC_DEV_PORT_REFS].num_channels;
+ anc_dev_gp_cfg.tdm_cfg.slot_width =
+ anc_port_cfg[ANC_DEV_PORT_REFS].bit_width;
+ anc_dev_gp_cfg.tdm_cfg.slot_mask =
+ ((1 << anc_port_cfg[ANC_DEV_PORT_REFS].num_channels) - 1);
+
+ pr_debug("%s: refs_port_id %x\n", __func__,
+ anc_port_cfg[ANC_DEV_PORT_REFS].port_id);
+
+ pr_debug("%s: anc_dev_gp_cfg num_channels %x bit_width %x sample_rate %x nslots_per_frame %x slot_width %x slot_mask %x!\n",
+ __func__,
+ anc_dev_gp_cfg.tdm_cfg.num_channels,
+ anc_dev_gp_cfg.tdm_cfg.bit_width,
+ anc_dev_gp_cfg.tdm_cfg.sample_rate,
+ anc_dev_gp_cfg.tdm_cfg.nslots_per_frame,
+ anc_dev_gp_cfg.tdm_cfg.slot_width,
+ anc_dev_gp_cfg.tdm_cfg.slot_mask);
+
+ rc = afe_port_group_enable(group_id,
+ &anc_dev_gp_cfg, true);
+ if (IS_ERR_VALUE(rc)) {
+ pr_err("%s: fail to enable AFE group 0x%x\n",
+ __func__, group_id);
+ goto rtn;
+ }
+
+ pt_idx =
+ anc_dev_get_matched_tdm_port_cfg_idx(
+ anc_port_cfg[ANC_DEV_PORT_REFS].port_id);
+
+ if (pt_idx == -1) {
+ rc = -EINVAL;
+ pr_err("%s: anc_dev_get_matched_tdm_port_cfg_idx() failed with port_id 0x%x\n",
+ __func__,
+ anc_port_cfg[ANC_DEV_PORT_REFS].port_id);
+ goto rtn;
+ }
+
+ tdm_cfg.tdm = anc_dev_tdm_port_cfg[pt_idx].port_cfg;
+
+ tdm_cfg.tdm.num_channels =
+ anc_port_cfg[ANC_DEV_PORT_REFS].num_channels;
+ tdm_cfg.tdm.sample_rate =
+ anc_port_cfg[ANC_DEV_PORT_REFS].sample_rate;
+ tdm_cfg.tdm.bit_width =
+ anc_port_cfg[ANC_DEV_PORT_REFS].bit_width;
+
+ tdm_cfg.tdm.nslots_per_frame =
+ anc_dev_gp_cfg.tdm_cfg.nslots_per_frame;
+ tdm_cfg.tdm.slot_width = anc_dev_gp_cfg.tdm_cfg.slot_width;
+ tdm_cfg.tdm.slot_mask = anc_dev_gp_cfg.tdm_cfg.slot_mask;
+
+ rc = afe_tdm_port_start(anc_port_cfg[ANC_DEV_PORT_REFS].port_id,
+ &tdm_cfg,
+ anc_port_cfg[ANC_DEV_PORT_REFS].sample_rate, 0);
+ if (IS_ERR_VALUE(rc)) {
+ afe_port_group_enable(group_id,
+ &anc_dev_gp_cfg, false);
+
+ anc_dev_tdm_set_clk(&anc_dev_tdm_gp_set[gp_idx],
+ (u16)anc_port_cfg[ANC_DEV_PORT_REFS].port_id, false);
+
+ pr_err("%s: fail to open AFE port 0x%x\n",
+ __func__,
+ anc_port_cfg[ANC_DEV_PORT_REFS].port_id);
+ goto rtn;
+ }
+
+ }
+
+ rc = anc_if_set_anc_mic_spkr_layout(
+ anc_port_cfg[ANC_DEV_PORT_REFS].port_id,
+ &anc_mic_spkr_layout);
+ if (IS_ERR_VALUE(rc)) {
+ pr_err("%s: fail to pass ANC MIC and SPKR layout info to SDSP 0x%x\n",
+ __func__,
+ anc_port_cfg[ANC_DEV_PORT_REFS].port_id);
+ goto rtn;
+ }
+
+ rc = anc_if_share_resource(
+ anc_port_cfg[ANC_DEV_PORT_REFS].port_id, 4, 3,
+ LPM_START_ADDR, LPM_LENGTH);
+ if (IS_ERR_VALUE(rc)) {
+ pr_err("%s: fail to assign lpass resource to SDSP 0x%x\n",
+ __func__,
+ anc_port_cfg[ANC_DEV_PORT_REFS].port_id);
+ goto rtn;
+ }
+
+ rc = anc_if_config_ref(anc_port_cfg[ANC_DEV_PORT_REFS].port_id,
+ anc_port_cfg[ANC_DEV_PORT_REFS].sample_rate,
+ anc_port_cfg[ANC_DEV_PORT_REFS].bit_width,
+ anc_port_cfg[ANC_DEV_PORT_REFS].num_channels);
+ if (IS_ERR_VALUE(rc)) {
+ pr_err("%s: fail to refs port cfg in SDSP 0x%x\n",
+ __func__,
+ anc_port_cfg[ANC_DEV_PORT_REFS].port_id);
+ goto rtn;
+ }
+
+ if (this_anc_dev_info.algo_module_id != 0)
+ rc = anc_if_set_algo_module_id(
+ anc_port_cfg[ANC_DEV_PORT_ANC_SPKR].port_id,
+ this_anc_dev_info.algo_module_id);
+
+ if (this_anc_dev_info.bypass_mode != 0)
+ rc = anc_if_set_bypass_mode(
+ anc_port_cfg[ANC_DEV_PORT_ANC_SPKR].port_id,
+ this_anc_dev_info.bypass_mode);
+
+ group_id = get_group_id_from_port_id(
+ anc_port_cfg[ANC_DEV_PORT_ANC_SPKR].port_id);
+
+ gp_idx = anc_dev_get_matched_tdm_gp_cfg_idx(group_id);
+
+ if (gp_idx == -1) {
+ rc = -EINVAL;
+ pr_err("%s: anc_dev_get_matched_tdm_gp_cfg_idx() failed with group_id 0x%x\n",
+ __func__, group_id);
+ goto rtn;
+ } else {
+ rc = anc_dev_tdm_set_clk(&anc_dev_tdm_gp_set[gp_idx],
+ (u16)anc_port_cfg[ANC_DEV_PORT_ANC_SPKR].port_id, true);
+
+ if (IS_ERR_VALUE(rc)) {
+ pr_err("%s: fail to enable AFE clk 0x%x\n",
+ __func__,
+ anc_port_cfg[ANC_DEV_PORT_ANC_SPKR].port_id);
+ goto rtn;
+ }
+ }
+
+ rc = anc_dev_port_start(ANC_DEV_PORT_ANC_MIC);
+ if (IS_ERR_VALUE(rc)) {
+ pr_err("%s: fail to enable ANC MIC Port 0x%x\n",
+ __func__,
+ anc_port_cfg[ANC_DEV_PORT_ANC_MIC].port_id);
+ goto rtn;
+ }
+
+ rc = anc_dev_port_start(ANC_DEV_PORT_ANC_SPKR);
+ if (IS_ERR_VALUE(rc)) {
+ pr_err("%s: fail to enable ANC SPKR Port 0x%x\n",
+ __func__,
+ anc_port_cfg[ANC_DEV_PORT_ANC_SPKR].port_id);
+ goto rtn;
+ }
+
+ this_anc_dev_info.state = 1;
+
+ pr_debug("%s: ANC devices start successfully!\n", __func__);
+
+rtn:
+ return rc;
+}
+
+int msm_anc_dev_stop(void)
+{
+ int rc = 0;
+ u16 group_id;
+ int gp_idx;
+
+ anc_dev_port_stop(ANC_DEV_PORT_ANC_SPKR);
+ anc_dev_port_stop(ANC_DEV_PORT_ANC_MIC);
+
+ group_id = get_group_id_from_port_id(
+ anc_port_cfg[ANC_DEV_PORT_ANC_SPKR].port_id);
+
+ gp_idx = anc_dev_get_matched_tdm_gp_cfg_idx(group_id);
+
+ if (gp_idx == -1) {
+ rc = -EINVAL;
+ goto rtn;
+ } else {
+ rc = anc_dev_tdm_set_clk(&anc_dev_tdm_gp_set[gp_idx],
+ (u16)anc_port_cfg[ANC_DEV_PORT_ANC_SPKR].port_id, false);
+
+ if (IS_ERR_VALUE(rc)) {
+ pr_err("%s: fail to disable AFE clk 0x%x\n",
+ __func__,
+ anc_port_cfg[ANC_DEV_PORT_ANC_SPKR].port_id);
+ }
+ }
+
+ group_id =
+ get_group_id_from_port_id(anc_port_cfg[ANC_DEV_PORT_REFS].port_id);
+
+ gp_idx = anc_dev_get_matched_tdm_gp_cfg_idx(group_id);
+
+ if (gp_idx == -1) {
+ rc = -EINVAL;
+ goto rtn;
+ }
+
+ afe_close(anc_port_cfg[ANC_DEV_PORT_REFS].port_id);
+
+ afe_port_group_enable(group_id, NULL, false);
+
+ anc_dev_tdm_set_clk(&anc_dev_tdm_gp_set[gp_idx],
+ (u16)anc_port_cfg[ANC_DEV_PORT_REFS].port_id, false);
+
+ this_anc_dev_info.state = 0;
+ this_anc_dev_info.algo_module_id = 0;
+ this_anc_dev_info.rpm = 0;
+ this_anc_dev_info.bypass_mode = 0;
+
+ pr_debug("%s: ANC devices stop successfully!\n", __func__);
+
+rtn:
+ return rc;
+}
+
+
+static int msm_anc_tdm_dev_group_cfg_info(
+ struct platform_device *pdev,
+ struct device_node *ctx_node)
+{
+ int rc = 0;
+ const uint32_t *port_id_array = NULL;
+ uint32_t num_tdm_group_ports = 0;
+ uint32_t array_length = 0;
+ int i = 0;
+ int gp_idx = anc_dev_get_free_tdm_gp_cfg_idx();
+
+ if ((gp_idx < 0) || (gp_idx > IDX_GROUP_TDM_MAX)) {
+ dev_err(&pdev->dev, "%s: could not get abaiable tdm group cfg slot\n",
+ __func__);
+ rc = -EINVAL;
+ goto rtn;
+ }
+
+ /* extract tdm group info into static */
+ rc = of_property_read_u32(ctx_node,
+ "qcom,msm-cpudai-tdm-group-id",
+ (u32 *)&anc_dev_tdm_gp_set[gp_idx].gp_cfg.group_id);
+ if (rc) {
+ dev_err(&pdev->dev, "%s: Group ID from DT file %s\n",
+ __func__, "qcom,msm-cpudai-tdm-group-id");
+ goto rtn;
+ }
+
+ dev_dbg(&pdev->dev, "%s: dev_name: %s group_id: 0x%x\n",
+ __func__, dev_name(&pdev->dev),
+ anc_dev_tdm_gp_set[gp_idx].gp_cfg.group_id);
+
+ rc = of_property_read_u32(ctx_node,
+ "qcom,msm-cpudai-tdm-group-num-ports",
+ &num_tdm_group_ports);
+ if (rc) {
+ dev_err(&pdev->dev, "%s: Group Num Ports from DT file %s\n",
+ __func__, "qcom,msm-cpudai-tdm-group-num-ports");
+ goto rtn;
+ }
+ dev_dbg(&pdev->dev, "%s: Group Num Ports from DT file 0x%x\n",
+ __func__, num_tdm_group_ports);
+
+ if (num_tdm_group_ports > AFE_GROUP_DEVICE_NUM_PORTS) {
+ dev_err(&pdev->dev, "%s Group Num Ports %d greater than Max %d\n",
+ __func__, num_tdm_group_ports,
+ AFE_GROUP_DEVICE_NUM_PORTS);
+ rc = -EINVAL;
+ goto rtn;
+ }
+
+ port_id_array = of_get_property(ctx_node,
+ "qcom,msm-cpudai-tdm-group-port-id",
+ &array_length);
+ if (port_id_array == NULL) {
+ dev_err(&pdev->dev, "%s port_id_array is not valid\n",
+ __func__);
+ rc = -EINVAL;
+ goto rtn;
+ }
+ if (array_length != sizeof(uint32_t) * num_tdm_group_ports) {
+ dev_err(&pdev->dev, "%s array_length is %d, expected is %zd\n",
+ __func__, array_length,
+ sizeof(uint32_t) * num_tdm_group_ports);
+ rc = -EINVAL;
+ goto rtn;
+ }
+
+ for (i = 0; i < num_tdm_group_ports; i++)
+ anc_dev_tdm_gp_set[gp_idx].gp_cfg.port_id[i] =
+ (u16)be32_to_cpu(port_id_array[i]);
+ /* Unused index should be filled with 0 or AFE_PORT_INVALID */
+ for (i = num_tdm_group_ports;
+ i < AFE_GROUP_DEVICE_NUM_PORTS; i++)
+ anc_dev_tdm_gp_set[gp_idx].gp_cfg.port_id[i] = AFE_PORT_INVALID;
+
+ anc_dev_tdm_gp_set[gp_idx].num_tdm_group_ports = num_tdm_group_ports;
+
+ /* extract tdm clk info into static */
+ rc = of_property_read_u32(ctx_node,
+ "qcom,msm-cpudai-tdm-clk-rate",
+ &anc_dev_tdm_gp_set[gp_idx].tdm_clk_set.clk_freq_in_hz);
+ if (rc) {
+ dev_err(&pdev->dev, "%s: Clk Rate from DT file %s\n",
+ __func__, "qcom,msm-cpudai-tdm-clk-rate");
+ goto rtn;
+ }
+ dev_dbg(&pdev->dev, "%s: Clk Rate from DT file %d\n",
+ __func__,
+ anc_dev_tdm_gp_set[gp_idx].tdm_clk_set.clk_freq_in_hz);
+
+ anc_dev_tdm_gp_set[gp_idx].tdm_clk_set.clk_set_minor_version =
+ Q6AFE_LPASS_CLK_CONFIG_API_VERSION;
+ anc_dev_tdm_gp_set[gp_idx].tdm_clk_set.clk_attri =
+ Q6AFE_LPASS_CLK_ATTRIBUTE_INVERT_COUPLE_NO;
+ anc_dev_tdm_gp_set[gp_idx].tdm_clk_set.clk_root =
+ Q6AFE_LPASS_CLK_ROOT_DEFAULT;
+
+
+ /* extract tdm clk attribute into static */
+ if (of_find_property(ctx_node,
+ "qcom,msm-cpudai-tdm-clk-attribute", NULL)) {
+ rc = of_property_read_u16(ctx_node,
+ "qcom,msm-cpudai-tdm-clk-attribute",
+ &anc_dev_tdm_gp_set[gp_idx].tdm_clk_set.clk_attri);
+ if (rc) {
+ dev_err(&pdev->dev, "%s: No Clk attribute in DT file %s\n",
+ __func__,
+ "qcom,msm-cpudai-tdm-clk-attribute");
+ goto rtn;
+ }
+ } else {
+ dev_dbg(&pdev->dev, "%s: Clk Attribute from DT file %d\n",
+ __func__,
+ anc_dev_tdm_gp_set[gp_idx].tdm_clk_set.clk_attri);
+ }
+
+ /* extract tdm clk src master/slave info into static */
+ rc = of_property_read_u32(ctx_node,
+ "qcom,msm-cpudai-tdm-clk-internal",
+ &anc_dev_tdm_gp_set[gp_idx].clk_mode);
+ if (rc) {
+ dev_err(&pdev->dev, "%s: Clk id from DT file %s\n",
+ __func__, "qcom,msm-cpudai-tdm-clk-internal");
+ goto rtn;
+ }
+ dev_dbg(&pdev->dev, "%s: Clk id from DT file %d\n",
+ __func__, anc_dev_tdm_gp_set[gp_idx].clk_mode);
+
+rtn:
+ return rc;
+}
+
+
+static int msm_anc_tdm_dev_port_cfg_info(
+ struct platform_device *pdev,
+ struct device_node *ctx_node)
+{
+ int rc = 0;
+ u32 tdm_dev_id = 0;
+ int pt_idx = anc_dev_get_free_tdm_port_cfg_idx();
+ struct device_node *tdm_parent_node = NULL;
+
+ if ((pt_idx < 0) || (pt_idx > IDX_TDM_MAX)) {
+ dev_err(&pdev->dev, "%s: could not get abaiable tdm port cfg slot\n",
+ __func__);
+ rc = -EINVAL;
+ goto rtn;
+ }
+
+ /* retrieve device/afe id */
+ rc = of_property_read_u32(ctx_node,
+ "qcom,msm-cpudai-tdm-dev-id",
+ &tdm_dev_id);
+ if (rc) {
+ dev_err(&pdev->dev, "%s: Device ID missing in DT file\n",
+ __func__);
+ goto rtn;
+ }
+ if ((tdm_dev_id < AFE_PORT_ID_TDM_PORT_RANGE_START) ||
+ (tdm_dev_id > AFE_PORT_ID_TDM_PORT_RANGE_END)) {
+ dev_err(&pdev->dev, "%s: Invalid TDM Device ID 0x%x in DT file\n",
+ __func__, tdm_dev_id);
+ rc = -ENXIO;
+ goto rtn;
+ }
+ anc_dev_tdm_port_cfg[pt_idx].port_id = tdm_dev_id;
+
+ dev_dbg(&pdev->dev, "%s: dev_name: %s dev_id: 0x%x\n",
+ __func__, dev_name(&pdev->dev), tdm_dev_id);
+
+ /* TDM CFG */
+ tdm_parent_node = of_get_parent(ctx_node);
+ rc = of_property_read_u32(tdm_parent_node,
+ "qcom,msm-cpudai-tdm-sync-mode",
+ (u32 *)&anc_dev_tdm_port_cfg[pt_idx].port_cfg.sync_mode);
+ if (rc) {
+ dev_err(&pdev->dev, "%s: Sync Mode from DT file %s\n",
+ __func__, "qcom,msm-cpudai-tdm-sync-mode");
+ goto rtn;
+ }
+ dev_dbg(&pdev->dev, "%s: Sync Mode from DT file 0x%x\n",
+ __func__, anc_dev_tdm_port_cfg[pt_idx].port_cfg.sync_mode);
+
+ rc = of_property_read_u32(tdm_parent_node,
+ "qcom,msm-cpudai-tdm-sync-src",
+ (u32 *)&anc_dev_tdm_port_cfg[pt_idx].port_cfg.sync_src);
+ if (rc) {
+ dev_err(&pdev->dev, "%s: Sync Src from DT file %s\n",
+ __func__, "qcom,msm-cpudai-tdm-sync-src");
+ goto rtn;
+ }
+ dev_dbg(&pdev->dev, "%s: Sync Src from DT file 0x%x\n",
+ __func__, anc_dev_tdm_port_cfg[pt_idx].port_cfg.sync_src);
+
+ rc = of_property_read_u32(tdm_parent_node,
+ "qcom,msm-cpudai-tdm-data-out",
+ (u32 *)&anc_dev_tdm_port_cfg[pt_idx].port_cfg.ctrl_data_out_enable);
+ if (rc) {
+ dev_err(&pdev->dev, "%s: Data Out from DT file %s\n",
+ __func__, "qcom,msm-cpudai-tdm-data-out");
+ goto rtn;
+ }
+ dev_dbg(&pdev->dev, "%s: Data Out from DT file 0x%x\n",
+ __func__,
+ anc_dev_tdm_port_cfg[pt_idx].port_cfg.ctrl_data_out_enable);
+
+ rc = of_property_read_u32(tdm_parent_node,
+ "qcom,msm-cpudai-tdm-invert-sync",
+ (u32 *)&anc_dev_tdm_port_cfg[pt_idx].port_cfg.ctrl_invert_sync_pulse);
+ if (rc) {
+ dev_err(&pdev->dev, "%s: Invert Sync from DT file %s\n",
+ __func__, "qcom,msm-cpudai-tdm-invert-sync");
+ goto rtn;
+ }
+ dev_dbg(&pdev->dev, "%s: Invert Sync from DT file 0x%x\n",
+ __func__,
+ anc_dev_tdm_port_cfg[pt_idx].port_cfg.ctrl_invert_sync_pulse);
+
+ rc = of_property_read_u32(tdm_parent_node,
+ "qcom,msm-cpudai-tdm-data-delay",
+ (u32 *)&anc_dev_tdm_port_cfg[pt_idx].port_cfg.ctrl_sync_data_delay);
+ if (rc) {
+ dev_err(&pdev->dev, "%s: Data Delay from DT file %s\n",
+ __func__, "qcom,msm-cpudai-tdm-data-delay");
+ goto rtn;
+ }
+ dev_dbg(&pdev->dev, "%s: Data Delay from DT file 0x%x\n",
+ __func__,
+ anc_dev_tdm_port_cfg[pt_idx].port_cfg.ctrl_sync_data_delay);
+
+ /* TDM CFG -- set default */
+ anc_dev_tdm_port_cfg[pt_idx].port_cfg.data_format = AFE_LINEAR_PCM_DATA;
+ anc_dev_tdm_port_cfg[pt_idx].port_cfg.tdm_cfg_minor_version =
+ AFE_API_VERSION_TDM_CONFIG;
+
+ msm_anc_tdm_dev_group_cfg_info(pdev, tdm_parent_node);
+
+ return 0;
+
+rtn:
+ return rc;
+}
+
+static int msm_anc_dev_probe(struct platform_device *pdev)
+{
+ int rc = 0;
+
+ u32 port_id = 0;
+ const uint32_t *layout_array = NULL;
+ uint32_t num_anc_io = 0;
+ uint32_t array_length = 0;
+ int i = 0;
+ uint32_t sample_rate = 0;
+ uint32_t num_channels = 0;
+ uint32_t bit_width = 0;
+
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,refs-port-id",
+ (u32 *)&port_id);
+ if (rc) {
+ dev_err(&pdev->dev, "%s: ANC refs-port-id DT file %s\n",
+ __func__, "qcom,refs-port-id");
+ goto rtn;
+ }
+
+ anc_port_cfg[ANC_DEV_PORT_REFS].port_id = port_id;
+
+ dev_dbg(&pdev->dev, "%s: refs-port-id 0x%x\n",
+ __func__, port_id);
+
+ port_id = 0;
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,spkr-port-id",
+ (u32 *)&port_id);
+ if (rc) {
+ dev_err(&pdev->dev, "%s: ANC spkr-port-id DT file %s\n",
+ __func__, "qcom,spkr-port-id");
+ goto rtn;
+ }
+
+ anc_port_cfg[ANC_DEV_PORT_ANC_SPKR].port_id = port_id;
+
+ dev_dbg(&pdev->dev, "%s: spkr-port-id 0x%x\n",
+ __func__, port_id);
+
+ port_id = 0;
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,mic-port-id",
+ (u32 *)&port_id);
+ if (rc) {
+ dev_err(&pdev->dev, "%s: ANC mic-port-id DT file %s\n",
+ __func__, "qcom,mic-port-id");
+ goto rtn;
+ }
+
+ anc_port_cfg[ANC_DEV_PORT_ANC_MIC].port_id = port_id;
+
+ dev_dbg(&pdev->dev, "%s: mic-port-id 0x%x\n",
+ __func__, port_id);
+
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,sample-rate",
+ (u32 *)&sample_rate);
+ if (rc) {
+ dev_err(&pdev->dev, "%s: ANC sample rate DT file %s\n",
+ __func__, "qcom,sample-rate");
+ goto rtn;
+ }
+
+ dev_dbg(&pdev->dev, "%s: ANC sample rate 0x%x\n",
+ __func__, sample_rate);
+
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,num-channels",
+ (u32 *)&num_channels);
+ if (rc) {
+ dev_err(&pdev->dev, "%s: ANC num channels DT file %s\n",
+ __func__, "qcom,num-channels");
+ goto rtn;
+ }
+
+ dev_dbg(&pdev->dev, "%s: ANC num channel 0x%x\n",
+ __func__, num_channels);
+
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,bit-width",
+ (u32 *)&bit_width);
+ if (rc) {
+ dev_err(&pdev->dev, "%s: ANC bit width DT file %s\n",
+ __func__, "qcom,bit-width");
+ goto rtn;
+ }
+
+ dev_dbg(&pdev->dev, "%s: ANC bit width 0x%x\n",
+ __func__, bit_width);
+
+ for (i = 0; i < ANC_DEV_PORT_MAX; i++) {
+ anc_port_cfg[i].sample_rate = sample_rate;
+ anc_port_cfg[i].num_channels = num_channels;
+ anc_port_cfg[i].bit_width = bit_width;
+ }
+
+ memset(&anc_mic_spkr_layout, 0, sizeof(anc_mic_spkr_layout));
+
+ anc_mic_spkr_layout.minor_version = 1;
+
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,num-anc-mic",
+ (u32 *)&num_anc_io);
+ if (rc) {
+ dev_err(&pdev->dev, "%s: ANC num_anc_mic DT file %s\n",
+ __func__, "qcom,num-anc-mic");
+ goto rtn;
+ }
+
+ layout_array = of_get_property(pdev->dev.of_node,
+ "qcom,anc-mic-array",
+ &array_length);
+ if (layout_array == NULL) {
+ dev_err(&pdev->dev, "%s layout_array is not valid\n",
+ __func__);
+ rc = -EINVAL;
+ goto rtn;
+ }
+ if (array_length != sizeof(uint32_t) * num_anc_io) {
+ dev_err(&pdev->dev, "%s array_length is %d, expected is %zd\n",
+ __func__, array_length,
+ sizeof(uint32_t) * num_anc_io);
+ rc = -EINVAL;
+ goto rtn;
+ }
+
+ anc_mic_spkr_layout.num_anc_mic = num_anc_io;
+
+ for (i = 0; i < num_anc_io; i++)
+ anc_mic_spkr_layout.mic_layout_array[i] =
+ (u16)be32_to_cpu(layout_array[i]);
+
+ num_anc_io = 0;
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,num-anc-spkr",
+ (u32 *)&num_anc_io);
+ if (rc) {
+ dev_err(&pdev->dev, "%s: ANC num_anc_mic DT file %s\n",
+ __func__, "qcom,num-anc-spkr");
+ goto rtn;
+ }
+
+ layout_array = of_get_property(pdev->dev.of_node,
+ "qcom,anc-spkr-array",
+ &array_length);
+ if (layout_array == NULL) {
+ dev_err(&pdev->dev, "%s layout_array is not valid\n",
+ __func__);
+ rc = -EINVAL;
+ goto rtn;
+ }
+ if (array_length != sizeof(uint32_t) * num_anc_io) {
+ dev_err(&pdev->dev, "%s array_length is %d, expected is %zd\n",
+ __func__, array_length,
+ sizeof(uint32_t) * num_anc_io);
+ rc = -EINVAL;
+ goto rtn;
+ }
+
+ anc_mic_spkr_layout.num_anc_spkr = num_anc_io;
+
+ for (i = 0; i < num_anc_io; i++)
+ anc_mic_spkr_layout.spkr_layout_array[i] =
+ (u16)be32_to_cpu(layout_array[i]);
+
+ dev_dbg(&pdev->dev, "%s: num_anc_mic 0x%x\n",
+ __func__, anc_mic_spkr_layout.num_anc_mic);
+
+ dev_dbg(&pdev->dev, "%s: num_anc_spkr 0x%x\n",
+ __func__, anc_mic_spkr_layout.num_anc_spkr);
+
+ num_anc_io = 0;
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,num-add-mic-signal",
+ (u32 *)&num_anc_io);
+ if (rc) {
+ dev_err(&pdev->dev, "%s: ANC num_add_mic_signal DT file %s\n",
+ __func__, "qcom,num-add-mic-signal");
+ goto rtn;
+ }
+
+ anc_mic_spkr_layout.num_add_mic_signal = num_anc_io;
+
+ num_anc_io = 0;
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,num-add-spkr-signal",
+ (u32 *)&num_anc_io);
+ if (rc) {
+ dev_err(&pdev->dev, "%s: ANC num_add_spkr_signal DT file %s\n",
+ __func__, "qcom,num-add-spkr-signal");
+ goto rtn;
+ }
+
+ anc_mic_spkr_layout.num_add_spkr_signal = num_anc_io;
+
+ dev_dbg(&pdev->dev, "%s: num_add_mic_signal 0x%x\n",
+ __func__, anc_mic_spkr_layout.num_add_mic_signal);
+
+ dev_dbg(&pdev->dev, "%s: num_add_spkr_signal 0x%x\n",
+ __func__, anc_mic_spkr_layout.num_add_spkr_signal);
+
+ /* TDM group CFG and TDM port CFG */
+ {
+ struct device_node *ctx_node = NULL;
+
+ ctx_node = of_parse_phandle(pdev->dev.of_node,
+ "qcom,refs-tdm-rx", 0);
+ if (!ctx_node) {
+ pr_err("%s Could not find refs-tdm-rx info\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ rc = msm_anc_tdm_dev_port_cfg_info(pdev, ctx_node);
+ if (IS_ERR_VALUE(rc)) {
+ pr_err("%s: fail to probe TDM group info\n",
+ __func__);
+ }
+
+ ctx_node = of_parse_phandle(pdev->dev.of_node,
+ "qcom,spkr-tdm-rx", 0);
+ if (!ctx_node) {
+ pr_err("%s Could not find spkr-tdm-rx info\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ rc = msm_anc_tdm_dev_port_cfg_info(pdev, ctx_node);
+ if (IS_ERR_VALUE(rc)) {
+ pr_err("%s: fail to probe TDM group info\n",
+ __func__);
+ }
+
+ ctx_node = of_parse_phandle(pdev->dev.of_node,
+ "qcom,mic-tdm-tx", 0);
+ if (!ctx_node) {
+ pr_err("%s Could not find mic-tdm-tx info\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ rc = msm_anc_tdm_dev_port_cfg_info(pdev, ctx_node);
+ if (IS_ERR_VALUE(rc)) {
+ pr_err("%s: fail to probe TDM group info\n",
+ __func__);
+ }
+ }
+
+ rc = msm_anc_dev_create(pdev);
+
+rtn:
+ return rc;
+}
+
+static int msm_anc_dev_remove(struct platform_device *pdev)
+{
+ return msm_anc_dev_destroy(pdev);
+}
+
+static const struct of_device_id msm_anc_dev_dt_match[] = {
+ { .compatible = "qcom,msm-ext-anc", },
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, msm_anc_dev_dt_match);
+
+static struct platform_driver msm_anc_dev = {
+ .probe = msm_anc_dev_probe,
+ .remove = msm_anc_dev_remove,
+ .driver = {
+ .name = "msm-ext-anc",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_anc_dev_dt_match,
+ },
+};
+
+int msm_anc_dev_init(void)
+{
+ int rc = 0;
+
+ memset(&anc_dev_tdm_gp_set, 0, sizeof(anc_dev_tdm_gp_set));
+ memset(&anc_dev_tdm_port_cfg, 0, sizeof(anc_dev_tdm_port_cfg));
+ memset(&anc_port_cfg, 0, sizeof(anc_port_cfg));
+ memset(&this_anc_dev_info, 0, sizeof(this_anc_dev_info));
+
+ rc = platform_driver_register(&msm_anc_dev);
+ if (rc)
+ pr_err("%s: fail to register msm ANC device driver\n",
+ __func__);
+
+ return rc;
+}
+
+int msm_anc_dev_deinit(void)
+{
+ platform_driver_unregister(&msm_anc_dev);
+ return 0;
+}
+
diff --git a/drivers/soc/qcom/qdsp6v2/audio_anc.c b/drivers/soc/qcom/qdsp6v2/audio_anc.c
new file mode 100644
index 000000000000..65c585886453
--- /dev/null
+++ b/drivers/soc/qcom/qdsp6v2/audio_anc.c
@@ -0,0 +1,354 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/mutex.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/uaccess.h>
+#include <linux/device.h>
+
+#include <linux/qdsp6v2/audio-anc-dev-mgr.h>
+
+#define DEVICE_NAME "msm_audio_anc"
+
+struct audio_anc_info {
+ struct cdev myc;
+ struct class *anc_class;
+};
+
+static int major;
+
+static struct audio_anc_info audio_anc;
+
+static size_t get_user_anc_cmd_size(int32_t anc_cmd)
+{
+ size_t size = 0;
+
+ switch (anc_cmd) {
+ case ANC_CMD_START:
+ case ANC_CMD_STOP:
+ size = 0;
+ break;
+ case ANC_CMD_RPM:
+ size = sizeof(struct audio_anc_rpm_info);
+ break;
+ case ANC_CMD_BYPASS_MODE:
+ size = sizeof(struct audio_anc_bypass_mode);
+ break;
+ case ANC_CMD_ALGO_MODULE:
+ size = sizeof(struct audio_anc_algo_module_info);
+ break;
+ case ANC_CMD_ALGO_CALIBRATION:
+ size = sizeof(struct audio_anc_algo_calibration_info);
+ break;
+ default:
+ pr_err("%s:Invalid anc cmd %d!",
+ __func__, anc_cmd);
+ }
+ return size;
+}
+
+static int call_set_anc(int32_t anc_cmd,
+ size_t anc_cmd_size, void *data)
+{
+ int ret = 0;
+
+ pr_err("%s EXT_ANC anc_cmd %x\n", __func__, anc_cmd);
+
+ switch (anc_cmd) {
+ case ANC_CMD_START:
+ ret = msm_anc_dev_start();
+ break;
+ case ANC_CMD_STOP:
+ ret = msm_anc_dev_stop();
+ break;
+ case ANC_CMD_RPM:
+ case ANC_CMD_BYPASS_MODE:
+ case ANC_CMD_ALGO_MODULE:
+ case ANC_CMD_ALGO_CALIBRATION:
+ ret = msm_anc_dev_set_info(data, anc_cmd);
+ break;
+ default:
+ break;
+ }
+
+ pr_err("%s EXT_ANC ret %x\n", __func__, ret);
+
+ return ret;
+}
+
+static int call_get_anc(int32_t anc_cmd,
+ size_t anc_cmd_size, void *data)
+{
+ int ret = 0;
+
+ switch (anc_cmd) {
+ case ANC_CMD_RPM:
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int audio_anc_open(struct inode *inode, struct file *f)
+{
+ int ret = 0;
+
+ pr_debug("%s\n", __func__);
+ return ret;
+}
+
+static int audio_anc_close(struct inode *inode, struct file *f)
+{
+ int ret = 0;
+
+ pr_debug("%s\n", __func__);
+ return ret;
+}
+
+static long audio_anc_shared_ioctl(struct file *file, unsigned int cmd,
+ void __user *arg)
+{
+ int ret = 0;
+ int32_t size;
+ struct audio_anc_packet *data = NULL;
+
+ pr_err("%s EXT_ANC cmd %x\n", __func__, cmd);
+
+ switch (cmd) {
+ case AUDIO_ANC_SET_PARAM:
+ case AUDIO_ANC_GET_PARAM:
+ break;
+ default:
+ pr_err("%s: ioctl not found!\n", __func__);
+ ret = -EFAULT;
+ goto done;
+ }
+
+ if (copy_from_user(&size, (void *)arg, sizeof(size))) {
+ pr_err("%s: Could not copy size value from user\n", __func__);
+ ret = -EFAULT;
+ goto done;
+ } else if (size < sizeof(struct audio_anc_packet)) {
+ pr_err("%s: Invalid size sent to driver: %d, min size is %zd\n",
+ __func__, size, sizeof(struct audio_anc_packet));
+ ret = -EINVAL;
+ goto done;
+ }
+
+ data = kmalloc(size, GFP_KERNEL);
+ if (data == NULL) {
+ ret = -ENOMEM;
+ pr_err("%s: Could not allocate memory of size %d for ioctl\n",
+ __func__, size);
+ goto done;
+ } else if (copy_from_user(data, (void *)arg, size)) {
+ pr_err("%s: Could not copy data from user\n",
+ __func__);
+ ret = -EFAULT;
+ goto done;
+ } else if ((data->hdr.anc_cmd < 0) ||
+ (data->hdr.anc_cmd >= ANC_CMD_MAX)) {
+ pr_err("%s: anc_cmd %d is Invalid!\n",
+ __func__, data->hdr.anc_cmd);
+ ret = -EINVAL;
+ goto done;
+ } else if ((data->hdr.anc_cmd_size <
+ get_user_anc_cmd_size(data->hdr.anc_cmd)) ||
+ (data->hdr.anc_cmd_size >
+ sizeof(union audio_anc_data))) {
+ pr_err("%s: anc_cmd size %d is Invalid! Min is %zd Max is %zd!\n",
+ __func__, data->hdr.anc_cmd_size,
+ get_user_anc_cmd_size(data->hdr.anc_cmd),
+ sizeof(union audio_anc_data));
+ ret = -EINVAL;
+ goto done;
+ } else if ((data->hdr.anc_cmd_size + sizeof(data->hdr)) > size) {
+ pr_err("%s: anc_cmd size %d + anc cmd hdr size %zd is is greater than user buffer siz %d!\n",
+ __func__, data->hdr.anc_cmd_size, sizeof(data->hdr),
+ size);
+ ret = -EFAULT;
+ goto done;
+ }
+
+ switch (cmd) {
+ case AUDIO_ANC_SET_PARAM:
+ ret = call_set_anc(data->hdr.anc_cmd,
+ data->hdr.anc_cmd_size, &data->anc_data);
+ break;
+ case AUDIO_ANC_GET_PARAM:
+ ret = call_get_anc(data->hdr.anc_cmd,
+ data->hdr.anc_cmd_size, &data->anc_data);
+ break;
+ }
+
+ if (cmd == AUDIO_ANC_GET_PARAM) {
+ if (data->hdr.anc_cmd_size == 0)
+ goto done;
+ if (data == NULL)
+ goto done;
+ if (copy_to_user(arg, data,
+ sizeof(data->hdr) + data->hdr.anc_cmd_size)) {
+ pr_err("%s: Could not copy anc data to user\n",
+ __func__);
+ ret = -EFAULT;
+ goto done;
+ }
+ }
+
+done:
+ kfree(data);
+
+ pr_err("%s EXT_ANC ret %x\n", __func__, ret);
+
+ return ret;
+}
+
+static long audio_anc_ioctl(struct file *f,
+ unsigned int cmd, unsigned long arg)
+{
+ return audio_anc_shared_ioctl(f, cmd, (void __user *)arg);
+}
+
+#ifdef CONFIG_COMPAT
+
+#define AUDIO_ANC_SET_PARAM32 _IOWR(ANC_IOCTL_MAGIC, \
+ 300, compat_uptr_t)
+#define AUDIO_ANC_GET_PARAM32 _IOWR(ANC_IOCTL_MAGIC, \
+ 301, compat_uptr_t)
+
+static long audio_anc_compat_ioctl(struct file *f,
+ unsigned int cmd, unsigned long arg)
+{
+ unsigned int cmd64;
+ int ret = 0;
+
+ switch (cmd) {
+ case AUDIO_ANC_SET_PARAM32:
+ cmd64 = AUDIO_ANC_SET_PARAM;
+ break;
+ case AUDIO_ANC_GET_PARAM32:
+ cmd64 = AUDIO_ANC_GET_PARAM;
+ break;
+ default:
+ pr_err("%s: ioctl not found!\n", __func__);
+ ret = -EFAULT;
+ goto done;
+ }
+
+ ret = audio_anc_shared_ioctl(f, cmd64, compat_ptr(arg));
+done:
+ return ret;
+}
+#endif
+
+static const struct file_operations audio_anc_fops = {
+ .owner = THIS_MODULE,
+ .open = audio_anc_open,
+ .release = audio_anc_close,
+ .unlocked_ioctl = audio_anc_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = audio_anc_compat_ioctl,
+#endif
+};
+
+int msm_anc_dev_create(struct platform_device *pdev)
+{
+ int result = 0;
+ dev_t dev = MKDEV(major, 0);
+ struct device *device_handle;
+
+ pr_debug("%s\n", __func__);
+
+ if (major) {
+ result = register_chrdev_region(dev, 1, DEVICE_NAME);
+ } else {
+ result = alloc_chrdev_region(&dev, 0, 1, DEVICE_NAME);
+ major = MAJOR(dev);
+ }
+
+ if (result < 0) {
+ pr_err("%s: Registering msm_audio_anc device failed\n",
+ __func__);
+ goto done;
+ }
+
+ audio_anc.anc_class = class_create(THIS_MODULE, "msm_audio_anc");
+ if (IS_ERR(audio_anc.anc_class)) {
+ result = PTR_ERR(audio_anc.anc_class);
+ pr_err("%s: Error creating anc class: %d\n",
+ __func__, result);
+ goto unregister_chrdev_region;
+ }
+
+ cdev_init(&audio_anc.myc, &audio_anc_fops);
+ result = cdev_add(&audio_anc.myc, dev, 1);
+
+ if (result < 0) {
+ pr_err("%s: Registering file operations failed\n",
+ __func__);
+ goto class_destroy;
+ }
+
+ device_handle = device_create(audio_anc.anc_class,
+ NULL, audio_anc.myc.dev, NULL, "msm_audio_anc");
+ if (IS_ERR(device_handle)) {
+ result = PTR_ERR(device_handle);
+ pr_err("%s: device_create failed: %d\n", __func__, result);
+ goto class_destroy;
+ }
+
+ pr_debug("exit %s\n", __func__);
+ return 0;
+
+class_destroy:
+ class_destroy(audio_anc.anc_class);
+unregister_chrdev_region:
+ unregister_chrdev_region(MKDEV(major, 0), 1);
+done:
+ pr_err("exit %s\n", __func__);
+ return result;
+}
+
+int msm_anc_dev_destroy(struct platform_device *pdev)
+{
+ device_destroy(audio_anc.anc_class, audio_anc.myc.dev);
+ cdev_del(&audio_anc.myc);
+ class_destroy(audio_anc.anc_class);
+ unregister_chrdev_region(MKDEV(major, 0), 1);
+
+ return 0;
+}
+
+static int __init audio_anc_init(void)
+{
+ return msm_anc_dev_init();
+}
+
+static void __exit audio_anc_exit(void)
+{
+ msm_anc_dev_deinit();
+}
+
+module_init(audio_anc_init);
+module_exit(audio_anc_exit);
+
+MODULE_DESCRIPTION("SoC QDSP6v2 Audio ANC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/qdsp6v2/lpass_resource_mgr.c b/drivers/soc/qcom/qdsp6v2/lpass_resource_mgr.c
new file mode 100644
index 000000000000..6b097c0205bd
--- /dev/null
+++ b/drivers/soc/qcom/qdsp6v2/lpass_resource_mgr.c
@@ -0,0 +1,552 @@
+/*
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/qdsp6v2/apr.h>
+#include <linux/of_device.h>
+#include <linux/sysfs.h>
+#include <sound/q6afe-v2.h>
+#include <sound/q6core.h>
+
+#define LPASS_LPAIF_PCM_CTLa(a) (0x1500 + 0x1000 * (a))
+#define LPASS_LPAIF_PCM_CTLa_ELEM 4
+#define LPASS_LPAIF_PCM_CTLa_MAX 3
+#define LPASS_LPAIF_PCM_CTLa__ENABLE_TX___M 0x02000000
+
+#define LPASS_RES_MGR_THREAD_NAME "lpass_resource_mgr_thread"
+
+#define lpass_io_r(a) readl_relaxed(a)
+#define LPASS_REG_OFFSET(_virt_addr_, _phys_addr_) \
+ ((_virt_addr_)-(_phys_addr_))
+
+#define CHECK_EARLY_AUDIO_CMD 0
+#define MAX_TIMEOUT_COUNT 20
+#define LPASS_CHECK_DELAY_MS 1000
+#define LPASS_BOOT_DELAY_MS 2000
+#define LPASS_STATUS_DELAY_MS 500
+
+static ssize_t check_early_audio_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count);
+
+struct lpass_resource_mgr_private {
+ struct kobject *lpass_resource_mgr_obj;
+ struct attribute_group *attr_group;
+ void __iomem *lpaif_mapped_base;
+ struct task_struct *lpass_res_mgr_thread;
+ uint32_t lpass_lpaif_base_addr;
+ uint32_t lpass_lpaif_reg_size;
+ uint32_t lpass_max_rddma;
+ uint32_t lpass_max_wrdma;
+ uint32_t num_reserved_rddma;
+ uint32_t num_reserved_wrdma;
+ uint32_t *reserved_rddma;
+ uint32_t *reserved_wrdma;
+ uint32_t early_audio_pcm_idx;
+ u32 is_early_audio_enabled;
+};
+
+static struct kobj_attribute check_early_audio_attribute =
+ __ATTR(check_early_audio, 0220, NULL, check_early_audio_store);
+
+static struct attribute *attrs[] = {
+ &check_early_audio_attribute.attr,
+ NULL,
+};
+
+static struct lpass_resource_mgr_private *priv;
+
+static struct platform_device *dev_private;
+
+static uint32_t lpass_read_reg(void __iomem *phys_addr, uint32_t virt_offset)
+{
+ uint32_t read_val;
+
+ read_val = lpass_io_r(phys_addr+virt_offset);
+ return read_val;
+}
+
+static void lpass_resource_mgr_check_early_audio(struct platform_device *pdev)
+{
+ if (priv->is_early_audio_enabled)
+ dev_err(&pdev->dev, "%s: Online\n",
+ __func__);
+ else
+ dev_err(&pdev->dev, "%s: Offline\n",
+ __func__);
+}
+
+static int lpass_resource_mgr_thread(void *data)
+{
+ struct platform_device *pdev = dev_private;
+ int i, ret = 0;
+ bool *ret_rddma;
+ bool *ret_wrdma;
+ int total_num_allocated_dma;
+ int timeout_count = 0;
+
+ if (!pdev) {
+ dev_err(&pdev->dev, "%s: Platform device null\n", __func__);
+ goto done;
+ }
+
+ /* Check early audio status if it's enabled */
+ if (priv->is_early_audio_enabled) {
+ int mask, read_val = 0;
+ bool is_check_done = false;
+ int pcm_idx = priv->early_audio_pcm_idx;
+
+ mask = LPASS_LPAIF_PCM_CTLa__ENABLE_TX___M;
+ while (!is_check_done) {
+ if (timeout_count > MAX_TIMEOUT_COUNT) {
+ dev_err(&pdev->dev, "%s: Early audio check TIMED OUT.\n",
+ __func__);
+ ret = -ETIMEDOUT;
+ goto done;
+ }
+
+ read_val = lpass_read_reg(priv->lpaif_mapped_base,
+ LPASS_LPAIF_PCM_CTLa(pcm_idx));
+
+ if (!(read_val & mask)) {
+ dev_dbg(&pdev->dev, "%s: PCM interface %d is disabled\n",
+ __func__, pcm_idx);
+ is_check_done = true;
+ } else {
+ dev_dbg_ratelimited(&pdev->dev,
+ "%s: PCM Interface %d enabled\n",
+ __func__, pcm_idx);
+ }
+
+ msleep(LPASS_CHECK_DELAY_MS);
+ timeout_count++;
+ }
+ priv->is_early_audio_enabled = false;
+ }
+
+ total_num_allocated_dma = priv->num_reserved_rddma +
+ priv->num_reserved_wrdma;
+ if (total_num_allocated_dma == 0) {
+ dev_dbg(&pdev->dev, "%s: No DMAs to allocate\n",
+ __func__);
+ goto done;
+ }
+
+ timeout_count = 0;
+ while (apr_get_q6_state() == APR_SUBSYS_DOWN) {
+ if (timeout_count > MAX_TIMEOUT_COUNT) {
+ dev_err(&pdev->dev, "%s: apr_get_q6_state() TIMED OUT.\n",
+ __func__);
+ ret = -ETIMEDOUT;
+ goto done;
+ }
+
+ dev_dbg_ratelimited(&pdev->dev, "%s: ADSP is down\n",
+ __func__);
+ msleep(LPASS_BOOT_DELAY_MS);
+ timeout_count++;
+ }
+
+ timeout_count = 0;
+ while (q6core_is_adsp_ready() != AVCS_SERVICE_AND_ALL_MODULES_READY) {
+ if (timeout_count > MAX_TIMEOUT_COUNT) {
+ dev_err(&pdev->dev, "%s: q6core_is_adsp_ready() TIMED OUT.\n",
+ __func__);
+ ret = -ETIMEDOUT;
+ goto done;
+ }
+
+ dev_dbg_ratelimited(&pdev->dev,
+ "%s: Not All QADSP6 Services are ready!!\n",
+ __func__);
+ msleep(LPASS_STATUS_DELAY_MS);
+ timeout_count++;
+ }
+
+ /* Allocated resources then check DMA indices allocated */
+ ret = afe_request_dma_resources(AFE_LPAIF_DEFAULT_DMA_TYPE,
+ priv->num_reserved_rddma,
+ priv->num_reserved_wrdma);
+
+ if (ret) {
+ dev_err(&pdev->dev, "%s: AFE DMA Request failed with code %d\n",
+ __func__, ret);
+ goto done;
+ }
+
+ ret = afe_get_dma_idx(&ret_rddma, &ret_wrdma);
+
+ if (ret) {
+ dev_err(&pdev->dev, "%s: Cannot obtain DMA info %d\n",
+ __func__, ret);
+ goto done;
+ }
+
+ for (i = 0; i < priv->num_reserved_rddma; i++) {
+ if (ret_rddma[priv->reserved_rddma[i]])
+ break;
+
+ dev_err(&pdev->dev, "%s: ret rddma %d idx no match\n",
+ __func__, priv->reserved_rddma[i]);
+ }
+
+ for (i = 0; i < priv->num_reserved_wrdma; i++) {
+ if (ret_wrdma[priv->reserved_wrdma[i]])
+ break;
+
+ dev_err(&pdev->dev, "%s: ret wrdma %d idx no match\n",
+ __func__, priv->reserved_wrdma[i]);
+ }
+
+done:
+ return ret;
+}
+
+static ssize_t check_early_audio_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct platform_device *pdev = dev_private;
+ int cmd = 0;
+ int ret = 0;
+
+ if (!pdev) {
+ dev_err(&pdev->dev, "%s: Platform device null\n", __func__);
+ goto store_end;
+ }
+
+ ret = sscanf(buf, "%du", &cmd);
+
+ if (ret != 1) {
+ dev_err(&pdev->dev, "%s: Invalid number of arguments %d\n",
+ __func__, ret);
+ goto store_end;
+ }
+
+ switch (cmd) {
+ case CHECK_EARLY_AUDIO_CMD:
+ lpass_resource_mgr_check_early_audio(dev_private);
+ break;
+ default:
+ dev_err(&pdev->dev, "%s: Unrecoginized cmd %d\n",
+ __func__, cmd);
+ break;
+ }
+
+store_end:
+ dev_dbg(&pdev->dev, "%s: Exiting. Count is %d\n",
+ __func__, (int) count);
+ return count;
+}
+
+static int lpass_resource_mgr_init_sysfs(struct platform_device *pdev)
+{
+ int ret = -EINVAL;
+ u32 max_num_pcm_interfaces;
+ u32 lpass_lpaif_vals[2];
+
+ dev_private = NULL;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ ret = -ENOMEM;
+ goto priv_err_ret;
+ }
+
+ platform_set_drvdata(pdev, priv);
+
+ priv->lpass_resource_mgr_obj = NULL;
+ priv->attr_group = devm_kzalloc(&pdev->dev,
+ sizeof(*(priv->attr_group)),
+ GFP_KERNEL);
+ if (!priv->attr_group) {
+ ret = -ENOMEM;
+ goto priv_err_ret;
+ }
+
+ priv->attr_group->attrs = attrs;
+
+ priv->lpass_resource_mgr_obj = kobject_create_and_add(
+ "lpass_resource_mgr", kernel_kobj);
+ if (!priv->lpass_resource_mgr_obj) {
+ dev_err(&pdev->dev, "%s: sysfs create and add failed\n",
+ __func__);
+ ret = -ENOMEM;
+ goto priv_err_ret;
+ }
+
+ ret = sysfs_create_group(priv->lpass_resource_mgr_obj,
+ priv->attr_group);
+ if (ret) {
+ dev_err(&pdev->dev, "%s: sysfs create group failed %d\n",
+ __func__, ret);
+ goto lpass_obj_err_ret;
+ }
+
+ dev_private = pdev;
+
+ if (!pdev->dev.of_node) {
+ dev_err(&pdev->dev, "%s: Device tree information is missing\n",
+ __func__);
+ ret = -ENODATA;
+ goto lpass_obj_err_ret;
+ }
+
+ /* Read Device Tree Information */
+ ret = of_property_read_u32_array(pdev->dev.of_node,
+ "qcom,lpass-lpaif-reg", lpass_lpaif_vals, 2);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "%s: Error %d reading lpass-lpaif-reg.\n",
+ __func__, ret);
+ goto lpass_obj_err_ret;
+ }
+
+ priv->lpass_lpaif_base_addr = lpass_lpaif_vals[0];
+ priv->lpass_lpaif_reg_size = lpass_lpaif_vals[1];
+ priv->lpaif_mapped_base = ioremap(priv->lpass_lpaif_base_addr,
+ priv->lpass_lpaif_reg_size);
+ if (!priv->lpaif_mapped_base) {
+ dev_err(&pdev->dev, "%s: Failed to map LPASS LPAIF Base Address 0x%08x\n",
+ __func__, priv->lpass_lpaif_base_addr);
+ ret = -ENOMEM;
+ goto lpass_obj_err_ret;
+ }
+
+ ret = of_property_read_u32(pdev->dev.of_node,
+ "qcom,lpass-max-rddma", &priv->lpass_max_rddma);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "%s: Error %d reading lpass-max-rddma.\n",
+ __func__, ret);
+ goto lpaif_map_err_ret;
+ }
+
+ if (priv->lpass_max_rddma > AFE_MAX_RDDMA) {
+ dev_err(&pdev->dev,
+ "%s: Device tree max RDDMA > kernel max\n",
+ __func__);
+ ret = -EINVAL;
+ goto lpaif_map_err_ret;
+ }
+
+ ret = of_property_read_u32(pdev->dev.of_node,
+ "qcom,lpass-max-wrdma", &priv->lpass_max_wrdma);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "%s: Error %d reading lpass-max-wrdma.\n",
+ __func__, ret);
+ goto lpaif_map_err_ret;
+ }
+
+ if (priv->lpass_max_wrdma > AFE_MAX_WRDMA) {
+ dev_err(&pdev->dev,
+ "%s: Device tree max WRDMA > kernel max\n",
+ __func__);
+ ret = -EINVAL;
+ goto lpaif_map_err_ret;
+ }
+
+ ret = of_property_read_u32(pdev->dev.of_node,
+ "qcom,num-reserved-rddma", &priv->num_reserved_rddma);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "%s: Error %d reading num-reserved-rddma.\n",
+ __func__, ret);
+ goto lpaif_map_err_ret;
+ }
+
+ ret = of_property_read_u32(pdev->dev.of_node,
+ "qcom,num-reserved-wrdma", &priv->num_reserved_wrdma);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "%s: Error %d reading num-reserved-wrdma.\n",
+ __func__, ret);
+ ret = -EINVAL;
+ goto lpaif_map_err_ret;
+ }
+
+ if ((priv->num_reserved_rddma > priv->lpass_max_rddma) ||
+ (priv->num_reserved_wrdma > priv->lpass_max_wrdma)) {
+ dev_err(&pdev->dev,
+ "%s: Reserved DMA greater than max\n",
+ __func__);
+ ret = -EINVAL;
+ goto lpaif_map_err_ret;
+ }
+
+ if (priv->num_reserved_rddma > 0) {
+ priv->reserved_rddma = devm_kcalloc(&pdev->dev,
+ priv->num_reserved_rddma,
+ sizeof(uint32_t),
+ GFP_KERNEL);
+
+ if (!priv->reserved_rddma) {
+ ret = -ENOMEM;
+ goto lpaif_map_err_ret;
+ }
+
+ ret = of_property_read_u32_array(pdev->dev.of_node,
+ "qcom,reserved-rddma", priv->reserved_rddma,
+ priv->num_reserved_rddma);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "%s: Error %d reading reserved-rddma.\n",
+ __func__, ret);
+ goto lpaif_map_err_ret;
+ }
+ }
+
+ if (priv->num_reserved_wrdma > 0) {
+ priv->reserved_wrdma = devm_kcalloc(&pdev->dev,
+ priv->num_reserved_wrdma,
+ sizeof(uint32_t),
+ GFP_KERNEL);
+
+ if (!priv->reserved_wrdma) {
+ ret = -ENOMEM;
+ goto lpaif_map_err_ret;
+ }
+ ret = of_property_read_u32_array(pdev->dev.of_node,
+ "qcom,reserved-wrdma", priv->reserved_wrdma,
+ priv->num_reserved_wrdma);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "%s: Error %d reading reserved-wrdma.\n",
+ __func__, ret);
+ goto lpaif_map_err_ret;
+ }
+ }
+
+ ret = of_property_read_u32(pdev->dev.of_node,
+ "qcom,early-audio-enabled", &priv->is_early_audio_enabled);
+ if (ret) {
+ dev_dbg(&pdev->dev,
+ "%s: Error %d reading early-audio-enabled\n",
+ __func__, ret);
+ priv->is_early_audio_enabled = 0;
+ }
+
+ if (priv->is_early_audio_enabled) {
+ ret = of_property_read_u32(pdev->dev.of_node,
+ "qcom,max-num-pcm-intf", &max_num_pcm_interfaces);
+ if (ret)
+ dev_err(&pdev->dev,
+ "%s: Error %d reading max-num-pcm-intf\n",
+ __func__, ret);
+
+ ret = of_property_read_u32(pdev->dev.of_node,
+ "qcom,early-audio-pcm", &priv->early_audio_pcm_idx);
+ if (ret)
+ dev_err(&pdev->dev,
+ "%s: Error %d reading early-audio-pcm\n",
+ __func__, ret);
+ }
+
+ priv->lpass_res_mgr_thread = kthread_run(
+ lpass_resource_mgr_thread,
+ NULL,
+ LPASS_RES_MGR_THREAD_NAME);
+
+ return 0;
+lpaif_map_err_ret:
+ if (priv->lpaif_mapped_base)
+ iounmap(priv->lpaif_mapped_base);
+
+lpass_obj_err_ret:
+ if (priv->lpass_resource_mgr_obj) {
+ kobject_del(priv->lpass_resource_mgr_obj);
+ priv->lpass_resource_mgr_obj = NULL;
+ }
+
+priv_err_ret:
+ return ret;
+}
+
+static int lpass_resource_mgr_remove(struct platform_device *pdev)
+{
+ struct lpass_resource_mgr_private *priv = NULL;
+
+ priv = platform_get_drvdata(pdev);
+
+ if (!priv)
+ return 0;
+
+ if (priv->lpaif_mapped_base)
+ iounmap(priv->lpaif_mapped_base);
+
+ if (priv->lpass_resource_mgr_obj) {
+ sysfs_remove_group(priv->lpass_resource_mgr_obj,
+ priv->attr_group);
+
+ kobject_del(priv->lpass_resource_mgr_obj);
+ priv->lpass_resource_mgr_obj = NULL;
+ }
+
+ kthread_stop(priv->lpass_res_mgr_thread);
+ afe_release_all_dma_resources();
+ return 0;
+}
+
+static int lpass_resource_mgr_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+
+ ret = lpass_resource_mgr_init_sysfs(pdev);
+
+ if (ret != 0) {
+ dev_err(&pdev->dev, "%s: Error in initing sysfs\n", __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id lpass_resource_mgr_dt_match[] = {
+ { .compatible = "qcom,lpass-resource-manager" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, lpass_resource_mgr_dt_match);
+
+static struct platform_driver lpass_resource_mgr_driver = {
+ .driver = {
+ .name = "lpass-resource-manager",
+ .owner = THIS_MODULE,
+ .of_match_table = lpass_resource_mgr_dt_match,
+ },
+ .probe = lpass_resource_mgr_probe,
+ .remove = lpass_resource_mgr_remove,
+};
+
+static int __init lpass_resource_mgr_init(void)
+{
+ return platform_driver_register(&lpass_resource_mgr_driver);
+}
+module_init(lpass_resource_mgr_init);
+
+static void __exit lpass_resource_mgr_exit(void)
+{
+ platform_driver_unregister(&lpass_resource_mgr_driver);
+}
+module_exit(lpass_resource_mgr_exit);
+
+MODULE_DESCRIPTION("LPASS Resource Manager module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/qdsp6v2/msm_audio_ion.c b/drivers/soc/qcom/qdsp6v2/msm_audio_ion.c
index 9e61ff1ebfcc..c0aeb8712b4f 100644
--- a/drivers/soc/qcom/qdsp6v2/msm_audio_ion.c
+++ b/drivers/soc/qcom/qdsp6v2/msm_audio_ion.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -827,6 +827,11 @@ static int msm_audio_smmu_init_legacy(struct device *dev)
return -EINVAL;
}
msm_audio_ion_data.cb_dev = msm_iommu_get_ctx(cb->name);
+ if (msm_audio_ion_data.cb_dev == NULL) {
+ dev_err(dev, "%s Could not find IOMMU context\n",
+ __func__);
+ return -EINVAL;
+ }
cb->addr_range.start = (dma_addr_t) read_val[0];
cb->addr_range.size = (size_t) read_val[1];
dev_dbg(dev, "%s Legacy iommu usage\n", __func__);
diff --git a/drivers/soc/qcom/qdsp6v2/msm_audio_ion_vm.c b/drivers/soc/qcom/qdsp6v2/msm_audio_ion_vm.c
index 7ef16ad5575b..15c3e7e42c6d 100644
--- a/drivers/soc/qcom/qdsp6v2/msm_audio_ion_vm.c
+++ b/drivers/soc/qcom/qdsp6v2/msm_audio_ion_vm.c
@@ -83,6 +83,7 @@ static int msm_audio_ion_smmu_map(struct ion_client *client,
struct msm_audio_smmu_vm_map_cmd_rsp cmd_rsp;
struct msm_audio_smmu_map_data *map_data = NULL;
struct msm_audio_smmu_vm_map_cmd smmu_map_cmd;
+ unsigned long delay = jiffies + (HZ / 2);
rc = ion_handle_get_size(client, handle, len);
if (rc) {
@@ -122,12 +123,15 @@ static int msm_audio_ion_smmu_map(struct ion_client *client,
goto err;
}
- cmd_rsp_size = sizeof(cmd_rsp);
- rc = habmm_socket_recv(msm_audio_ion_hab_handle,
- (void *)&cmd_rsp,
- &cmd_rsp_size,
- 0xFFFFFFFF,
- 0);
+ do {
+ cmd_rsp_size = sizeof(cmd_rsp);
+ rc = habmm_socket_recv(msm_audio_ion_hab_handle,
+ (void *)&cmd_rsp,
+ &cmd_rsp_size,
+ 0xFFFFFFFF,
+ 0);
+ } while (time_before(jiffies, delay) && (rc == -EAGAIN) &&
+ (cmd_rsp_size == 0));
if (rc) {
pr_err("%s: habmm_socket_recv failed %d\n",
__func__, rc);
@@ -181,6 +185,7 @@ static int msm_audio_ion_smmu_unmap(struct ion_client *client,
struct msm_audio_smmu_vm_unmap_cmd_rsp cmd_rsp;
struct msm_audio_smmu_map_data *map_data, *next;
struct msm_audio_smmu_vm_unmap_cmd smmu_unmap_cmd;
+ unsigned long delay = jiffies + (HZ / 2);
/*
* Though list_for_each_entry_safe is delete safe, lock
@@ -205,12 +210,15 @@ static int msm_audio_ion_smmu_unmap(struct ion_client *client,
goto err;
}
- cmd_rsp_size = sizeof(cmd_rsp);
- rc = habmm_socket_recv(msm_audio_ion_hab_handle,
- (void *)&cmd_rsp,
- &cmd_rsp_size,
- 0xFFFFFFFF,
- 0);
+ do {
+ cmd_rsp_size = sizeof(cmd_rsp);
+ rc = habmm_socket_recv(msm_audio_ion_hab_handle,
+ (void *)&cmd_rsp,
+ &cmd_rsp_size,
+ 0xFFFFFFFF,
+ 0);
+ } while (time_before(jiffies, delay) &&
+ (rc == -EAGAIN) && (cmd_rsp_size == 0));
if (rc) {
pr_err("%s: habmm_socket_recv failed %d\n",
__func__, rc);
diff --git a/drivers/soc/qcom/qdsp6v2/sdsp-anc.c b/drivers/soc/qcom/qdsp6v2/sdsp-anc.c
new file mode 100644
index 000000000000..9294485f7ff2
--- /dev/null
+++ b/drivers/soc/qcom/qdsp6v2/sdsp-anc.c
@@ -0,0 +1,801 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+#include <linux/wakelock.h>
+#include <linux/jiffies.h>
+#include <linux/sched.h>
+#include <linux/msm_audio_ion.h>
+#include <linux/delay.h>
+#include <sound/apr_audio-v2.h>
+#include <sound/q6afe-v2.h>
+#include <sound/q6audio-v2.h>
+#include <sound/audio_cal_utils.h>
+#include <sound/adsp_err.h>
+#include <linux/qdsp6v2/apr_tal.h>
+
+#include <linux/qdsp6v2/sdsp_anc.h>
+
+#define TIMEOUT_MS 1000
+
+struct anc_if_ctl {
+ void *apr;
+ atomic_t state;
+ atomic_t status;
+ wait_queue_head_t wait[AFE_MAX_PORTS];
+ struct task_struct *task;
+ struct anc_get_rpm_resp rpm_calib_data;
+ uint32_t mmap_handle;
+ struct mutex afe_cmd_lock;
+};
+
+static struct anc_if_ctl this_anc_if;
+
+static int32_t anc_get_param_callback(uint32_t *payload,
+ uint32_t payload_size)
+{
+ u32 param_id;
+ struct anc_get_rpm_resp *resp =
+ (struct anc_get_rpm_resp *) payload;
+
+ if (!(&(resp->pdata))) {
+ pr_err("%s: Error: resp pdata is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ param_id = resp->pdata.param_id;
+ if (param_id == AUD_MSVC_PARAM_ID_PORT_ANC_ALGO_RPM) {
+ if (payload_size < sizeof(this_anc_if.rpm_calib_data)) {
+ pr_err("%s: Error: received size %d, calib_data size %zu\n",
+ __func__, payload_size,
+ sizeof(this_anc_if.rpm_calib_data));
+ return -EINVAL;
+ }
+
+ memcpy(&this_anc_if.rpm_calib_data, payload,
+ sizeof(this_anc_if.rpm_calib_data));
+ if (!this_anc_if.rpm_calib_data.status) {
+ atomic_set(&this_anc_if.state, 0);
+ } else {
+ pr_debug("%s: calib resp status: %d", __func__,
+ this_anc_if.rpm_calib_data.status);
+ atomic_set(&this_anc_if.state, -1);
+ }
+ }
+
+ return 0;
+}
+
+static void anc_if_callback_debug_print(struct apr_client_data *data)
+{
+ uint32_t *payload;
+
+ payload = data->payload;
+
+ if (data->payload_size >= 8)
+ pr_debug("%s: code = 0x%x PL#0[0x%x], PL#1[0x%x], size = %d\n",
+ __func__, data->opcode, payload[0], payload[1],
+ data->payload_size);
+ else if (data->payload_size >= 4)
+ pr_debug("%s: code = 0x%x PL#0[0x%x], size = %d\n",
+ __func__, data->opcode, payload[0],
+ data->payload_size);
+ else
+ pr_debug("%s: code = 0x%x, size = %d\n",
+ __func__, data->opcode, data->payload_size);
+}
+
+static int32_t anc_if_callback(struct apr_client_data *data, void *priv)
+{
+ if (!data) {
+ pr_err("%s: Invalid param data\n", __func__);
+ return -EINVAL;
+ }
+ if (data->opcode == RESET_EVENTS) {
+ pr_debug("%s: reset event = %d %d apr[%pK]\n",
+ __func__,
+ data->reset_event, data->reset_proc, this_anc_if.apr);
+
+ if (this_anc_if.apr) {
+ apr_reset(this_anc_if.apr);
+ atomic_set(&this_anc_if.state, 0);
+ this_anc_if.apr = NULL;
+ }
+
+ return 0;
+ }
+ anc_if_callback_debug_print(data);
+ if (data->opcode == AFE_PORT_CMDRSP_GET_PARAM_V2) {
+ u8 *payload = data->payload;
+
+ if (!payload || (data->token >= AFE_MAX_PORTS)) {
+ pr_err("%s: Error: size %d payload %pK token %d\n",
+ __func__, data->payload_size,
+ payload, data->token);
+ return -EINVAL;
+ }
+
+ if (anc_get_param_callback(data->payload, data->payload_size))
+ return -EINVAL;
+
+ wake_up(&this_anc_if.wait[data->token]);
+
+ } else if (data->payload_size) {
+ uint32_t *payload;
+
+ payload = data->payload;
+ if (data->opcode == APR_BASIC_RSP_RESULT) {
+ pr_debug("%s:opcode = 0x%x cmd = 0x%x status = 0x%x token=%d\n",
+ __func__, data->opcode,
+ payload[0], payload[1], data->token);
+ /* payload[1] contains the error status for response */
+ if (payload[1] != 0) {
+ atomic_set(&this_anc_if.status, payload[1]);
+ pr_err("%s: cmd = 0x%x returned error = 0x%x\n",
+ __func__, payload[0], payload[1]);
+ }
+ switch (payload[0]) {
+ case AFE_PORT_CMD_SET_PARAM_V2:
+ case AFE_PORT_CMD_DEVICE_STOP:
+ case AFE_PORT_CMD_DEVICE_START:
+ case AFE_SERVICE_CMD_SHARED_MEM_MAP_REGIONS:
+ case AFE_SERVICE_CMD_SHARED_MEM_UNMAP_REGIONS:
+ case AFE_SVC_CMD_SET_PARAM:
+ atomic_set(&this_anc_if.state, 0);
+ wake_up(&this_anc_if.wait[data->token]);
+ break;
+ default:
+ pr_err("%s: Unknown cmd 0x%x\n", __func__,
+ payload[0]);
+ break;
+ }
+ } else if (data->opcode ==
+ AFE_SERVICE_CMDRSP_SHARED_MEM_MAP_REGIONS) {
+ pr_err("%s: ANC mmap_handle: 0x%x\n",
+ __func__, payload[0]);
+ this_anc_if.mmap_handle = payload[0];
+ atomic_set(&this_anc_if.state, 0);
+ wake_up(&this_anc_if.wait[data->token]);
+ }
+ }
+ return 0;
+}
+
+int anc_sdsp_interface_prepare(void)
+{
+ int ret = 0;
+
+ pr_debug("%s:\n", __func__);
+
+ if (this_anc_if.apr == NULL) {
+ this_anc_if.apr = apr_register("SDSP", "MAS", anc_if_callback,
+ 0xFFFFFFFF, &this_anc_if);
+ if (this_anc_if.apr == NULL) {
+ pr_err("%s: Unable to register AFE\n", __func__);
+ ret = -ENODEV;
+ }
+ }
+ return ret;
+}
+
+/*
+ * anc_if_apr_send_pkt : returns 0 on success, negative otherwise.
+ */
+static int anc_if_apr_send_pkt(void *data, wait_queue_head_t *wait)
+{
+ int ret;
+
+ if (wait)
+ atomic_set(&this_anc_if.state, 1);
+ atomic_set(&this_anc_if.status, 0);
+ ret = apr_send_pkt(this_anc_if.apr, data);
+ if (ret > 0) {
+ if (wait) {
+ ret = wait_event_timeout(*wait,
+ (atomic_read(&this_anc_if.state) == 0),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ ret = -ETIMEDOUT;
+ } else if (atomic_read(&this_anc_if.status) > 0) {
+ pr_err("%s: DSP returned error[%s]\n", __func__,
+ adsp_err_get_err_str(atomic_read(
+ &this_anc_if.status)));
+ ret = adsp_err_get_lnx_err_code(
+ atomic_read(&this_anc_if.status));
+ } else {
+ ret = 0;
+ }
+ } else {
+ ret = 0;
+ }
+ } else if (ret == 0) {
+ pr_err("%s: packet not transmitted\n", __func__);
+ /* apr_send_pkt can return 0 when nothing is transmitted */
+ ret = -EINVAL;
+ }
+
+ pr_debug("%s: leave %d\n", __func__, ret);
+ return ret;
+}
+
+static int anc_if_send_cmd_port_start(u16 port_id)
+{
+ struct afe_port_cmd_device_start start;
+ int ret, index;
+
+ pr_debug("%s: enter\n", __func__);
+ index = q6audio_get_port_index(port_id);
+ if (index < 0 || index > AFE_MAX_PORTS) {
+ pr_err("%s: AFE port index[%d] invalid!\n",
+ __func__, index);
+ return -EINVAL;
+ }
+ ret = q6audio_validate_port(port_id);
+ if (ret < 0) {
+ pr_err("%s: port id: 0x%x ret %d\n", __func__, port_id, ret);
+ return -EINVAL;
+ }
+
+ start.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ start.hdr.pkt_size = sizeof(start);
+ start.hdr.src_port = 0;
+ start.hdr.dest_port = 0;
+ start.hdr.token = index;
+ start.hdr.opcode = AFE_PORT_CMD_DEVICE_START;
+ start.port_id = q6audio_get_port_id(port_id);
+ pr_debug("%s: cmd device start opcode[0x%x] port id[0x%x]\n",
+ __func__, start.hdr.opcode, start.port_id);
+
+ ret = anc_if_apr_send_pkt(&start, &this_anc_if.wait[index]);
+ if (ret) {
+ pr_err("%s: AFE enable for port 0x%x failed %d\n", __func__,
+ port_id, ret);
+ } else if (this_anc_if.task != current) {
+ this_anc_if.task = current;
+ pr_debug("task_name = %s pid = %d\n",
+ this_anc_if.task->comm, this_anc_if.task->pid);
+ }
+
+ return ret;
+}
+
+int anc_if_send_cmd_port_stop(int port_id)
+{
+ struct afe_port_cmd_device_stop stop;
+ int ret = 0;
+
+ if (this_anc_if.apr == NULL) {
+ pr_err("%s: AFE is already closed\n", __func__);
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+ pr_debug("%s: port_id = 0x%x\n", __func__, port_id);
+ port_id = q6audio_convert_virtual_to_portid(port_id);
+
+ stop.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ stop.hdr.pkt_size = sizeof(stop);
+ stop.hdr.src_port = 0;
+ stop.hdr.dest_port = 0;
+ stop.hdr.token = 0;
+ stop.hdr.opcode = AFE_PORT_CMD_DEVICE_STOP;
+ stop.port_id = port_id;
+ stop.reserved = 0;
+
+ ret = anc_if_apr_send_pkt(&stop, NULL);
+ if (ret)
+ pr_err("%s: AFE close failed %d\n", __func__, ret);
+
+fail_cmd:
+ return ret;
+
+}
+
+int anc_if_config_ref(u16 port_id, u32 sample_rate,
+ u32 bit_width, u16 num_channel)
+{
+ struct anc_config_ref_command config;
+ int ret = 0;
+ int index;
+
+ ret = anc_sdsp_interface_prepare();
+ if (ret != 0) {
+ pr_err("%s: Q6 interface prepare failed %d\n", __func__, ret);
+ return ret;
+ }
+
+ index = q6audio_get_port_index(port_id);
+ memset(&config, 0, sizeof(config));
+ config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ config.hdr.pkt_size = sizeof(config);
+ config.hdr.src_port = 0;
+ config.hdr.dest_port = 0;
+ config.hdr.token = index;
+ config.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
+ config.param.port_id = q6audio_get_port_id(port_id);
+ config.param.payload_size = sizeof(config) - sizeof(struct apr_hdr) -
+ sizeof(config.param);
+ config.param.payload_address_lsw = 0x00;
+ config.param.payload_address_msw = 0x00;
+ config.param.mem_map_handle = 0x00;
+ config.pdata.module_id = AUD_MSVC_MODULE_AUDIO_DEV_ANC_REFS;
+ config.pdata.param_id = AUD_MSVC_PARAM_ID_DEV_ANC_REFS_CONFIG;
+ config.pdata.param_size = sizeof(config.refs);
+ config.refs.minor_version = AUD_MSVC_API_VERSION_DEV_ANC_REFS_CONFIG;
+ config.refs.port_id = q6audio_get_port_id(port_id);
+ config.refs.sample_rate = sample_rate;
+ config.refs.bit_width = bit_width;
+ config.refs.num_channel = num_channel;
+
+ ret = anc_if_apr_send_pkt(&config, &this_anc_if.wait[index]);
+ if (ret) {
+ pr_err("%s: anc_if_config_ref for port 0x%x failed ret = %d\n",
+ __func__, port_id, ret);
+ pr_err("%s: anc_if_config_ref size of param is %lu\n",
+ __func__, sizeof(config.refs));
+ }
+
+ return ret;
+}
+
+int anc_if_share_resource(u16 port_id, u16 rddma_idx, u16 wrdma_idx,
+ u32 lpm_start_addr, u32 lpm_length)
+{
+ struct anc_share_resource_command config;
+ int ret = 0;
+ int index;
+
+ ret = anc_sdsp_interface_prepare();
+ if (ret != 0) {
+ pr_err("%s: Q6 interface prepare failed %d\n", __func__, ret);
+ return ret;
+ }
+
+ index = q6audio_get_port_index(port_id);
+ memset(&config, 0, sizeof(config));
+ config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ config.hdr.pkt_size = sizeof(config);
+ config.hdr.src_port = 0;
+ config.hdr.dest_port = 0;
+ config.hdr.token = index;
+ config.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
+ config.param.port_id = q6audio_get_port_id(port_id);
+ config.param.payload_size = sizeof(config) - sizeof(struct apr_hdr) -
+ sizeof(config.param);
+ config.param.payload_address_lsw = 0x00;
+ config.param.payload_address_msw = 0x00;
+ config.param.mem_map_handle = 0x00;
+ config.pdata.module_id = AUD_MSVC_MODULE_AUDIO_DEV_RESOURCE_SHARE;
+ config.pdata.param_id = AUD_MSVC_PARAM_ID_PORT_SHARE_RESOURCE_CONFIG;
+ config.pdata.param_size = sizeof(config.resource);
+ config.resource.minor_version =
+ AUD_MSVC_API_VERSION_SHARE_RESOURCE_CONFIG;
+ config.resource.rddma_idx = rddma_idx;
+ config.resource.wrdma_idx = wrdma_idx;
+ config.resource.lpm_start_addr = lpm_start_addr;
+ config.resource.lpm_length = lpm_length;
+
+ ret = anc_if_apr_send_pkt(&config, &this_anc_if.wait[index]);
+ if (ret) {
+ pr_err("%s: share resource for port 0x%x failed ret = %d\n",
+ __func__, port_id, ret);
+ }
+
+ return ret;
+}
+
+int anc_if_tdm_port_start(u16 port_id, struct afe_tdm_port_config *tdm_port)
+{
+ struct aud_audioif_config_command config;
+ int ret = 0;
+ int index = 0;
+
+ if (!tdm_port) {
+ pr_err("%s: Error, no configuration data\n", __func__);
+ return -EINVAL;
+ }
+
+ pr_debug("%s: port id: 0x%x\n", __func__, port_id);
+
+ index = q6audio_get_port_index(port_id);
+ if (index < 0 || index > AFE_MAX_PORTS) {
+ pr_err("%s: AFE port index[%d] invalid!\n",
+ __func__, index);
+ return -EINVAL;
+ }
+
+ ret = anc_sdsp_interface_prepare();
+ if (ret != 0) {
+ pr_err("%s: Q6 interface prepare failed %d\n", __func__, ret);
+ return ret;
+ }
+
+ memset(&config, 0, sizeof(config));
+ config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ config.hdr.pkt_size = sizeof(config);
+ config.hdr.src_port = 0;
+ config.hdr.dest_port = 0;
+ config.hdr.token = index;
+ config.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
+ config.param.port_id = q6audio_get_port_id(port_id);
+ config.param.payload_size = sizeof(config) - sizeof(struct apr_hdr) -
+ sizeof(config.param);
+ config.param.payload_address_lsw = 0x00;
+ config.param.payload_address_msw = 0x00;
+ config.param.mem_map_handle = 0x00;
+ config.pdata.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE;
+ config.pdata.param_id = AFE_PARAM_ID_TDM_CONFIG;
+ config.pdata.param_size = sizeof(config.port);
+ config.port.tdm = tdm_port->tdm;
+
+ ret = anc_if_apr_send_pkt(&config, &this_anc_if.wait[index]);
+ if (ret) {
+ pr_err("%s: AFE enable for port 0x%x failed ret = %d\n",
+ __func__, port_id, ret);
+ goto fail_cmd;
+ }
+
+ ret = anc_if_send_cmd_port_start(port_id);
+
+fail_cmd:
+ return ret;
+}
+
+int anc_if_tdm_port_stop(u16 port_id)
+{
+ return anc_if_send_cmd_port_stop(port_id);
+}
+
+int anc_if_set_rpm(u16 port_id, u32 rpm)
+{
+ int ret = 0;
+ int index;
+
+ ret = anc_sdsp_interface_prepare();
+ if (ret != 0) {
+ pr_err("%s: Q6 interface prepare failed %d\n", __func__, ret);
+ return ret;
+ }
+
+ index = q6audio_get_port_index(port_id);
+
+ {
+ struct anc_set_rpm_command config;
+
+ memset(&config, 0, sizeof(config));
+ config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ config.hdr.pkt_size = sizeof(config);
+ config.hdr.src_port = 0;
+ config.hdr.dest_port = 0;
+ config.hdr.token = index;
+ config.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
+ config.param.port_id = q6audio_get_port_id(port_id);
+ config.param.payload_size = sizeof(config) -
+ sizeof(struct apr_hdr) -
+ sizeof(config.param);
+ config.param.payload_address_lsw = 0x00;
+ config.param.payload_address_msw = 0x00;
+ config.param.mem_map_handle = 0x00;
+ config.pdata.module_id = AUD_MSVC_MODULE_AUDIO_DEV_ANC_ALGO;
+ config.pdata.param_id = AUD_MSVC_PARAM_ID_PORT_ANC_ALGO_RPM;
+ config.pdata.param_size = sizeof(config.set_rpm);
+ config.set_rpm.minor_version =
+ AUD_MSVC_API_VERSION_DEV_ANC_ALGO_RPM;
+ config.set_rpm.rpm = rpm;
+
+ ret = anc_if_apr_send_pkt(&config, &this_anc_if.wait[index]);
+ if (ret) {
+ pr_err("%s: share resource for port 0x%x failed ret = %d\n",
+ __func__, port_id, ret);
+ }
+ }
+
+ return ret;
+}
+
+int anc_if_set_bypass_mode(u16 port_id, u32 bypass_mode)
+{
+ int ret = 0;
+
+ int index;
+
+ ret = anc_sdsp_interface_prepare();
+ if (ret != 0) {
+ pr_err("%s: Q6 interface prepare failed %d\n", __func__, ret);
+ return ret;
+ }
+
+ index = q6audio_get_port_index(port_id);
+
+ {
+ struct anc_set_bypass_mode_command config;
+
+ memset(&config, 0, sizeof(config));
+ config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ config.hdr.pkt_size = sizeof(config);
+ config.hdr.src_port = 0;
+ config.hdr.dest_port = 0;
+ config.hdr.token = index;
+ config.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
+ config.param.port_id = q6audio_get_port_id(port_id);
+ config.param.payload_size = sizeof(config) -
+ sizeof(struct apr_hdr) -
+ sizeof(config.param);
+ config.param.payload_address_lsw = 0x00;
+ config.param.payload_address_msw = 0x00;
+ config.param.mem_map_handle = 0x00;
+ config.pdata.module_id = AUD_MSVC_MODULE_AUDIO_DEV_ANC_ALGO;
+ config.pdata.param_id =
+ AUD_MSVC_PARAM_ID_PORT_ANC_ALGO_BYPASS_MODE;
+ config.pdata.param_size = sizeof(config.set_bypass_mode);
+ config.set_bypass_mode.minor_version =
+ AUD_MSVC_API_VERSION_DEV_ANC_ALGO_BYPASS_MODE;
+ config.set_bypass_mode.bypass_mode = bypass_mode;
+
+ ret = anc_if_apr_send_pkt(&config, &this_anc_if.wait[index]);
+ if (ret) {
+ pr_err("%s: share resource for port 0x%x failed ret = %d\n",
+ __func__, port_id, ret);
+ }
+ }
+
+ return ret;
+}
+
+int anc_if_set_algo_module_id(u16 port_id, u32 module_id)
+{
+ int ret = 0;
+
+ int index;
+
+ ret = anc_sdsp_interface_prepare();
+ if (ret != 0) {
+ pr_err("%s: Q6 interface prepare failed %d\n", __func__, ret);
+ return ret;
+ }
+
+ index = q6audio_get_port_index(port_id);
+
+ {
+ struct anc_set_algo_module_id_command config;
+
+ memset(&config, 0, sizeof(config));
+ config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ config.hdr.pkt_size = sizeof(config);
+ config.hdr.src_port = 0;
+ config.hdr.dest_port = 0;
+ config.hdr.token = index;
+ config.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
+ config.param.port_id = q6audio_get_port_id(port_id);
+ config.param.payload_size = sizeof(config) -
+ sizeof(struct apr_hdr) -
+ sizeof(config.param);
+ config.param.payload_address_lsw = 0x00;
+ config.param.payload_address_msw = 0x00;
+ config.param.mem_map_handle = 0x00;
+ config.pdata.module_id = AUD_MSVC_MODULE_AUDIO_DEV_ANC_ALGO;
+ config.pdata.param_id =
+ AUD_MSVC_PARAM_ID_PORT_ANC_ALGO_MODULE_ID;
+ config.pdata.param_size = sizeof(config.set_algo_module_id);
+ config.set_algo_module_id.minor_version = 1;
+ config.set_algo_module_id.module_id = module_id;
+
+ ret = anc_if_apr_send_pkt(&config, &this_anc_if.wait[index]);
+ if (ret) {
+ pr_err("%s: anc algo module ID for port 0x%x failed ret = %d\n",
+ __func__, port_id, ret);
+ }
+ }
+
+ return ret;
+}
+
+int anc_if_set_anc_mic_spkr_layout(u16 port_id,
+struct aud_msvc_param_id_dev_anc_mic_spkr_layout_info *set_mic_spkr_layout_p)
+{
+ int ret = 0;
+
+ int index;
+
+ ret = anc_sdsp_interface_prepare();
+ if (ret != 0) {
+ pr_err("%s: Q6 interface prepare failed %d\n", __func__, ret);
+ return ret;
+ }
+
+ index = q6audio_get_port_index(port_id);
+
+ {
+ struct anc_set_mic_spkr_layout_info_command config;
+
+ memset(&config, 0, sizeof(config));
+ config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ config.hdr.pkt_size = sizeof(config);
+ config.hdr.src_port = 0;
+ config.hdr.dest_port = 0;
+ config.hdr.token = index;
+ config.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
+ config.param.port_id = q6audio_get_port_id(port_id);
+ config.param.payload_size = sizeof(config) -
+ sizeof(struct apr_hdr) -
+ sizeof(config.param);
+ config.param.payload_address_lsw = 0x00;
+ config.param.payload_address_msw = 0x00;
+ config.param.mem_map_handle = 0x00;
+ config.pdata.module_id = AUD_MSVC_MODULE_AUDIO_DEV_ANC_ALGO;
+ config.pdata.param_id =
+ AUD_MSVC_PARAM_ID_PORT_ANC_MIC_SPKR_LAYOUT_INFO;
+ config.pdata.param_size = sizeof(config.set_mic_spkr_layout);
+
+ memcpy(&config.set_mic_spkr_layout, set_mic_spkr_layout_p,
+ sizeof(config.set_mic_spkr_layout));
+ ret = anc_if_apr_send_pkt(&config, &this_anc_if.wait[index]);
+ if (ret) {
+ pr_err("%s: anc algo module ID for port 0x%x failed ret = %d\n",
+ __func__, port_id, ret);
+ }
+ }
+
+ return ret;
+}
+
+int anc_if_cmd_memory_map(int port_id, phys_addr_t dma_addr_p,
+ u32 dma_buf_sz)
+{
+ int ret = 0;
+ int cmd_size = 0;
+ void *payload = NULL;
+ void *mmap_region_cmd = NULL;
+ struct afe_service_cmd_shared_mem_map_regions *mregion = NULL;
+ struct afe_service_shared_map_region_payload *mregion_pl = NULL;
+ int index = 0;
+
+ pr_debug("%s:\n", __func__);
+
+ ret = anc_sdsp_interface_prepare();
+ if (ret != 0) {
+ pr_err("%s: Q6 interface prepare failed %d\n", __func__, ret);
+ return ret;
+ }
+
+ index = q6audio_get_port_index(port_id);
+ if (index < 0 || index > AFE_MAX_PORTS) {
+ pr_err("%s: AFE port index[%d] invalid!\n",
+ __func__, index);
+ return -EINVAL;
+ }
+ ret = q6audio_validate_port(port_id);
+ if (ret < 0) {
+ pr_err("%s: Invalid port 0x%x ret %d",
+ __func__, port_id, ret);
+ return -EINVAL;
+ }
+
+ cmd_size = sizeof(struct afe_service_cmd_shared_mem_map_regions)
+ + sizeof(struct afe_service_shared_map_region_payload);
+
+ mmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL);
+ if (!mmap_region_cmd) {
+ ret = -ENOMEM;
+ pr_err("%s: allocate mmap_region_cmd failed\n", __func__);
+ return ret;
+ }
+
+ mregion = (struct afe_service_cmd_shared_mem_map_regions *)
+ mmap_region_cmd;
+ mregion->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ mregion->hdr.pkt_size = cmd_size;
+ mregion->hdr.src_port = 0;
+ mregion->hdr.dest_port = 0;
+ mregion->hdr.token = index;
+ mregion->hdr.opcode = AFE_SERVICE_CMD_SHARED_MEM_MAP_REGIONS;
+ mregion->mem_pool_id = ADSP_MEMORY_MAP_SHMEM8_4K_POOL;
+ mregion->num_regions = 1;
+ mregion->property_flag = 0x00;
+
+ payload = ((u8 *) mmap_region_cmd +
+ sizeof(struct afe_service_cmd_shared_mem_map_regions));
+ mregion_pl = (struct afe_service_shared_map_region_payload *)payload;
+
+ mregion_pl->shm_addr_lsw = lower_32_bits(dma_addr_p);
+ mregion_pl->shm_addr_msw = msm_audio_populate_upper_32_bits(dma_addr_p);
+ mregion_pl->mem_size_bytes = dma_buf_sz;
+
+ ret = anc_if_apr_send_pkt(mmap_region_cmd, &this_anc_if.wait[index]);
+ if (ret)
+ pr_err("%s: AFE memory map cmd failed %d\n",
+ __func__, ret);
+ kfree(mmap_region_cmd);
+ return ret;
+}
+
+int anc_if_cmd_memory_unmap(int port_id, u32 mem_map_handle)
+{
+ int ret = 0;
+ struct afe_service_cmd_shared_mem_unmap_regions mregion;
+ int index = 0;
+
+ pr_debug("%s: handle 0x%x\n", __func__, mem_map_handle);
+
+ ret = anc_sdsp_interface_prepare();
+ if (ret != 0) {
+ pr_err("%s: Q6 interface prepare failed %d\n", __func__, ret);
+ return ret;
+ }
+
+ index = q6audio_get_port_index(port_id);
+ if (index < 0 || index > AFE_MAX_PORTS) {
+ pr_err("%s: AFE port index[%d] invalid!\n",
+ __func__, index);
+ return -EINVAL;
+ }
+ ret = q6audio_validate_port(port_id);
+ if (ret < 0) {
+ pr_err("%s: Invalid port 0x%x ret %d",
+ __func__, port_id, ret);
+ return -EINVAL;
+ }
+
+ mregion.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ mregion.hdr.pkt_size = sizeof(mregion);
+ mregion.hdr.src_port = 0;
+ mregion.hdr.dest_port = 0;
+ mregion.hdr.token = index;
+ mregion.hdr.opcode = AFE_SERVICE_CMD_SHARED_MEM_UNMAP_REGIONS;
+ mregion.mem_map_handle = mem_map_handle;
+
+ ret = anc_if_apr_send_pkt(&mregion, &this_anc_if.wait[index]);
+ if (ret)
+ pr_err("%s: msvc memory unmap cmd failed %d\n",
+ __func__, ret);
+
+ return ret;
+}
+
+static int __init sdsp_anc_init(void)
+{
+ int i = 0, ret = 0;
+
+ atomic_set(&this_anc_if.state, 0);
+ atomic_set(&this_anc_if.status, 0);
+ this_anc_if.apr = NULL;
+ this_anc_if.mmap_handle = 0;
+ mutex_init(&this_anc_if.afe_cmd_lock);
+ for (i = 0; i < AFE_MAX_PORTS; i++)
+ init_waitqueue_head(&this_anc_if.wait[i]);
+
+ return ret;
+}
+
+static void __exit sdsp_anc_exit(void)
+{
+ mutex_destroy(&this_anc_if.afe_cmd_lock);
+}
+
+device_initcall(sdsp_anc_init);
+__exitcall(sdsp_anc_exit);
diff --git a/drivers/soc/qcom/qdsp6v2/voice_svc.c b/drivers/soc/qcom/qdsp6v2/voice_svc.c
index c560ec7d7401..f01ab2499a75 100644
--- a/drivers/soc/qcom/qdsp6v2/voice_svc.c
+++ b/drivers/soc/qcom/qdsp6v2/voice_svc.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -773,7 +773,7 @@ static int voice_svc_probe(struct platform_device *pdev)
if (ret) {
pr_err("%s: Failed to alloc chrdev\n", __func__);
ret = -ENODEV;
- goto chrdev_err;
+ goto done;
}
voice_svc_dev->major = MAJOR(device_num);
@@ -820,8 +820,6 @@ dev_err:
class_destroy(voice_svc_class);
class_err:
unregister_chrdev_region(0, MINOR_NUMBER);
-chrdev_err:
- kfree(voice_svc_dev);
done:
return ret;
}
@@ -835,7 +833,6 @@ static int voice_svc_remove(struct platform_device *pdev)
device_destroy(voice_svc_class, device_num);
class_destroy(voice_svc_class);
unregister_chrdev_region(0, MINOR_NUMBER);
- kfree(voice_svc_dev);
return 0;
}
diff --git a/drivers/soc/qcom/rpm_stats.c b/drivers/soc/qcom/rpm_stats.c
index b54af9eae8ec..ed7493d063ae 100644
--- a/drivers/soc/qcom/rpm_stats.c
+++ b/drivers/soc/qcom/rpm_stats.c
@@ -430,7 +430,7 @@ static ssize_t rpmstats_show(struct kobject *kobj,
prvdata);
}
- ret = snprintf(buf, prvdata->len, prvdata->buf);
+ ret = snprintf(buf, prvdata->len, "%s", prvdata->buf);
iounmap(prvdata->reg_base);
ioremap_fail:
kfree(prvdata);
diff --git a/drivers/soc/qcom/scm_qcpe.c b/drivers/soc/qcom/scm_qcpe.c
index 3f2b05a0ec9e..614670888aac 100644
--- a/drivers/soc/qcom/scm_qcpe.c
+++ b/drivers/soc/qcom/scm_qcpe.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,6 +10,8 @@
* GNU General Public License for more details.
*/
+#define pr_fmt(fmt) "QSEECOM: %s:%d : " fmt, __func__, __LINE__
+
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/module.h>
@@ -29,6 +31,12 @@
#include <linux/habmm.h>
+#ifdef CONFIG_GHS_VMM
+#include <../../staging/android/ion/ion_hvenv_driver.h>
+#include <linux/msm_ion.h>
+#include <soc/qcom/qseecomi.h>
+#endif
+
#define SCM_ENOMEM (-5)
#define SCM_EOPNOTSUPP (-4)
#define SCM_EINVAL_ADDR (-3)
@@ -67,7 +75,8 @@ DEFINE_MUTEX(scm_lmh_lock);
else \
result = x + y; \
result; \
- })
+})
+
/**
* struct scm_command - one SCM command buffer
* @len: total available memory for command and response
@@ -112,6 +121,19 @@ struct scm_response {
u32 is_complete;
};
+struct scm_extra_arg {
+ union {
+ u32 args32[N_EXT_SCM_ARGS];
+ u64 args64[N_EXT_SCM_ARGS];
+ };
+};
+
+struct smc_params_s {
+ uint64_t fn_id;
+ uint64_t arginfo;
+ uint64_t args[MAX_SCM_ARGS];
+} __packed;
+
#ifdef CONFIG_ARM64
#define R0_STR "x0"
@@ -140,6 +162,16 @@ struct scm_response {
#endif
+static enum scm_interface_version {
+ SCM_UNKNOWN,
+ SCM_LEGACY,
+ SCM_ARMV8_32,
+ SCM_ARMV8_64,
+} scm_version = SCM_UNKNOWN;
+
+/* This will be set to specify SMC32 or SMC64 */
+static u32 scm_version_mask;
+
/**
* scm_command_to_response() - Get a pointer to a scm_response
* @cmd: command
@@ -194,71 +226,289 @@ static int scm_remap_error(int err)
return -EINVAL;
}
+#ifdef CONFIG_GHS_VMM
+enum SCM_QCPE_IONIZE {
+ /* args[0] - physical addr, args[1] - length */
+ IONIZE_IDX_0,
+
+ /* args[1] - physical addr, args[2] - length */
+ IONIZE_IDX_1,
+
+ /* args[0] - physical addr, args[1] - length */
+ /* args[2] - physical addr, args[3] - length */
+ IONIZE_IDX_0_2,
+
+ /* args[2] - physical addr, args[3] - length */
+ IONIZE_IDX_2,
+
+ /* args[5] - physical addr, args[6] - length */
+ IONIZE_IDX_5
+};
+
+static struct ion_client *ion_clnt;
+
+static int scm_ion_alloc(size_t len, void **vaddr,
+ ion_phys_addr_t *paddr, struct ion_handle **ihandle)
+{
+ struct ion_handle *ihndl = NULL;
+ void *mvaddr;
+ ion_phys_addr_t mpaddr;
+ int ret = 0;
+
+ if (!ion_clnt) {
+ ion_clnt = hvenv_ion_client_create("qseecom-kernel");
+ if (IS_ERR_OR_NULL(ion_clnt)) {
+ pr_err("Ion client cannot be created\n");
+ return SCM_ENOMEM;
+ }
+ }
+
+ ihndl = ion_alloc(ion_clnt, len,
+ SZ_4K, ION_HEAP(ION_QSECOM_HEAP_ID), 0);
+ if (IS_ERR_OR_NULL(ihandle)) {
+ pr_err("ION alloc failed\n");
+ return SCM_ENOMEM;
+ }
+
+ mvaddr = ion_map_kernel(ion_clnt, ihndl);
+ if (IS_ERR_OR_NULL(mvaddr)) {
+ pr_err("ION memory mapping for image loading failed\n");
+ ret = SCM_ENOMEM;
+ goto free_ion;
+ }
+
+ ret = ion_phys(ion_clnt, ihndl, &mpaddr, &len);
+ if (ret) {
+ pr_err("physical memory retrieval failure\n");
+ ret = SCM_ENOMEM;
+ goto unmap_ion;
+
+ }
+
+ *vaddr = mvaddr;
+ *paddr = mpaddr;
+ *ihandle = ihndl;
+ return ret;
+
+unmap_ion:
+ ion_unmap_kernel(ion_clnt, ihndl);
+free_ion:
+ ion_free(ion_clnt, ihndl);
+ return ret;
+}
+
+static int scm_ionize(enum SCM_QCPE_IONIZE idx,
+ u64 *args, struct ion_handle **ihandle)
+{
+ ion_phys_addr_t ion_paddr;
+ void *krn_vaddr;
+ void *ion_vaddr;
+ size_t len, len1;
+ struct ion_handle *ihndl = NULL;
+ int ret = 0;
+
+ switch (idx) {
+ case IONIZE_IDX_0:
+ len = (size_t)args[1];
+ ret = scm_ion_alloc(len, &ion_vaddr, &ion_paddr, &ihndl);
+ if (ret)
+ break;
+ krn_vaddr = phys_to_virt(args[0]);
+ memcpy(ion_vaddr, krn_vaddr, len);
+ args[0] = ion_paddr;
+ break;
+
+ case IONIZE_IDX_1:
+ len = (size_t)args[2];
+ ret = scm_ion_alloc(len, &ion_vaddr, &ion_paddr, &ihndl);
+ if (ret)
+ break;
+ krn_vaddr = phys_to_virt(args[1]);
+ memcpy(ion_vaddr, krn_vaddr, len);
+ args[1] = ion_paddr;
+ break;
+
+ case IONIZE_IDX_0_2:
+ len = (size_t)args[1] + (size_t)args[3];
+ ret = scm_ion_alloc(len, &ion_vaddr, &ion_paddr, &ihndl);
+ if (ret)
+ break;
+ krn_vaddr = phys_to_virt(args[0]);
+ len = (size_t)args[1];
+ memcpy(ion_vaddr, krn_vaddr, len);
+ args[0] = ion_paddr;
+
+ krn_vaddr = phys_to_virt(args[2]);
+ len1 = (size_t)args[3];
+ memcpy((uint8_t *)ion_vaddr + len, krn_vaddr, len1);
+ args[2] = ion_paddr;
+ break;
+
+ case IONIZE_IDX_2:
+ len = (size_t)args[3];
+ ret = scm_ion_alloc(len, &ion_vaddr, &ion_paddr, &ihndl);
+ if (ret)
+ break;
+ krn_vaddr = phys_to_virt(args[2]);
+ memcpy(ion_vaddr, krn_vaddr, len);
+ args[2] = ion_paddr;
+ break;
+
+ case IONIZE_IDX_5:
+ len = (size_t)args[6];
+ ret = scm_ion_alloc(len, &ion_vaddr, &ion_paddr, &ihndl);
+ if (ret)
+ break;
+ krn_vaddr = phys_to_virt(args[5]);
+ memcpy(ion_vaddr, krn_vaddr, len);
+ args[5] = ion_paddr;
+ break;
+ default:
+ break;
+ }
+ *ihandle = ihndl;
+ return ret;
+}
+
+static int ionize_buffers(u32 fn_id,
+ struct smc_params_s *desc, struct ion_handle **ihandle)
+{
+ struct ion_handle *ihndl = NULL;
+ int ret = 0;
+
+ switch (fn_id) {
+ case TZ_OS_APP_LOOKUP_ID:
+ case TZ_OS_KS_GEN_KEY_ID:
+ case TZ_OS_KS_DEL_KEY_ID:
+ case TZ_OS_KS_SET_PIPE_KEY_ID:
+ case TZ_OS_KS_UPDATE_KEY_ID:
+ ret = scm_ionize(IONIZE_IDX_0, desc->args, &ihndl);
+ break;
+
+ case TZ_ES_SAVE_PARTITION_HASH_ID:
+ ret = scm_ionize(IONIZE_IDX_1, desc->args, &ihndl);
+ break;
+
+ case TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_ID:
+ ret = scm_ionize(IONIZE_IDX_2, desc->args, &ihndl);
+ break;
+
+ case TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID:
+ case TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID:
+ case TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID:
+ ret = scm_ionize(IONIZE_IDX_5, desc->args, &ihndl);
+ break;
+ default:
+ break;
+ }
+ *ihandle = ihndl;
+ return ret;
+}
+
+static void free_ion_buffers(struct ion_handle *ihandle)
+{
+ ion_free(ion_clnt, ihandle);
+}
+#endif
+
static int scm_call_qcpe(u32 fn_id, struct scm_desc *desc)
{
static bool opened;
static u32 handle;
- u32 ret;
u32 size_bytes;
+ struct smc_params_s smc_params = {0,};
+ int ret;
+#ifdef CONFIG_GHS_VMM
+ int i;
+ uint64_t arglen = desc->arginfo & 0xf;
+ struct ion_handle *ihandle = NULL;
+#endif
- struct smc_params_s {
- uint64_t x0;
- uint64_t x1;
- uint64_t x2;
- uint64_t x3;
- uint64_t x4;
- uint64_t x5;
- uint64_t sid;
- } smc_params;
-
- pr_info("scm_call_qcpe: IN: 0x%x, 0x%x, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx",
- fn_id, desc->arginfo, desc->args[0], desc->args[1],
- desc->args[2], desc->args[3], desc->args[4],
- desc->args[5], desc->args[6]);
+ pr_info("IN: 0x%x, 0x%x, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx\n",
+ fn_id, desc->arginfo, desc->args[0], desc->args[1],
+ desc->args[2], desc->args[3], desc->x5);
if (!opened) {
ret = habmm_socket_open(&handle, MM_QCPE_VM1, 0, 0);
if (ret) {
- pr_err("scm_call_qcpe: habmm_socket_open failed with ret = %d",
- ret);
+ pr_err("habmm_socket_open failed with ret = %d\n", ret);
return ret;
}
opened = true;
}
- smc_params.x0 = fn_id | 0x40000000; /* SMC64_MASK */
- smc_params.x1 = desc->arginfo;
- smc_params.x2 = desc->args[0];
- smc_params.x3 = desc->args[1];
- smc_params.x4 = desc->args[2];
- smc_params.x5 = desc->x5;
- smc_params.sid = 0;
+ smc_params.fn_id = fn_id | scm_version_mask;
+ smc_params.arginfo = desc->arginfo;
+ smc_params.args[0] = desc->args[0];
+ smc_params.args[1] = desc->args[1];
+ smc_params.args[2] = desc->args[2];
- ret = habmm_socket_send(handle, &smc_params, sizeof(smc_params), 0);
+#ifdef CONFIG_GHS_VMM
+ if (arglen <= N_REGISTER_ARGS) {
+ smc_params.args[FIRST_EXT_ARG_IDX] = desc->x5;
+ } else {
+ struct scm_extra_arg *argbuf =
+ (struct scm_extra_arg *)desc->extra_arg_buf;
+ int j = 0;
+
+ if (scm_version == SCM_ARMV8_64)
+ for (i = FIRST_EXT_ARG_IDX; i < MAX_SCM_ARGS; i++)
+ smc_params.args[i] = argbuf->args64[j++];
+ else
+ for (i = FIRST_EXT_ARG_IDX; i < MAX_SCM_ARGS; i++)
+ smc_params.args[i] = argbuf->args32[j++];
+ }
+
+ ret = ionize_buffers(fn_id & (~SMC64_MASK), &smc_params, &ihandle);
if (ret)
return ret;
+#else
+ smc_params.args[3] = desc->x5;
+ smc_params.args[4] = 0;
+#endif
+
+ ret = habmm_socket_send(handle, &smc_params, sizeof(smc_params), 0);
+ if (ret) {
+ pr_err("habmm_socket_send failed, ret= 0x%x\n", ret);
+ goto err_ret;
+ }
size_bytes = sizeof(smc_params);
memset(&smc_params, 0x0, sizeof(smc_params));
- ret = habmm_socket_recv(handle, &smc_params, &size_bytes, 0, 0);
- if (ret)
- return ret;
+ ret = habmm_socket_recv(handle, &smc_params, &size_bytes, 0,
+ HABMM_SOCKET_RECV_FLAGS_UNINTERRUPTIBLE);
+ if (ret) {
+ pr_err("habmm_socket_recv failed, ret= 0x%x\n", ret);
+ goto err_ret;
+ }
if (size_bytes != sizeof(smc_params)) {
- pr_err("scm_call_qcpe: expected size: %lu, actual=%u\n",
- sizeof(smc_params), size_bytes);
- return SCM_ERROR;
+ pr_err("habmm_socket_recv expected size: %lu, actual=%u\n",
+ sizeof(smc_params),
+ size_bytes);
+ ret = SCM_ERROR;
+ goto err_ret;
}
- desc->ret[0] = smc_params.x1;
- desc->ret[1] = smc_params.x2;
- desc->ret[2] = smc_params.x3;
-
- pr_info("scm_call_qcpe: OUT: 0x%llx, 0x%llx, 0x%llx, 0x%llx",
- smc_params.x0, desc->ret[0], desc->ret[1], desc->ret[2]);
-
- return smc_params.x0;
+ desc->ret[0] = smc_params.args[1];
+ desc->ret[1] = smc_params.args[2];
+ desc->ret[2] = smc_params.args[3];
+ ret = smc_params.args[0];
+ pr_info("OUT: 0x%llx, 0x%llx, 0x%llx, 0x%llx",
+ smc_params.args[0], desc->ret[0], desc->ret[1], desc->ret[2]);
+ goto no_err;
+
+err_ret:
+ habmm_socket_close(handle);
+ opened = false;
+
+no_err:
+#ifdef CONFIG_GHS_VMM
+ if (ihandle)
+ free_ion_buffers(ihandle);
+#endif
+ return ret;
}
static u32 smc(u32 cmd_addr)
@@ -318,7 +568,7 @@ static void scm_inv_range(unsigned long start, unsigned long end)
outer_inv_range(start, end);
while (start < end) {
asm ("mcr p15, 0, %0, c7, c6, 1" : : "r" (start)
- : "memory");
+ : "memory");
start += cacheline_size;
}
mb(); /* Make sure memory is visible to TZ */
@@ -351,9 +601,9 @@ static void scm_inv_range(unsigned long start, unsigned long end)
*/
static int scm_call_common(u32 svc_id, u32 cmd_id, const void *cmd_buf,
- size_t cmd_len, void *resp_buf, size_t resp_len,
- struct scm_command *scm_buf,
- size_t scm_buf_length)
+ size_t cmd_len, void *resp_buf, size_t resp_len,
+ struct scm_command *scm_buf,
+ size_t scm_buf_length)
{
int ret;
struct scm_response *rsp;
@@ -397,15 +647,15 @@ static int scm_call_common(u32 svc_id, u32 cmd_id, const void *cmd_buf,
* since we want the first attempt to be the "fastpath".
*/
static int _scm_call_retry(u32 svc_id, u32 cmd_id, const void *cmd_buf,
- size_t cmd_len, void *resp_buf, size_t resp_len,
- struct scm_command *cmd,
- size_t len)
+ size_t cmd_len, void *resp_buf, size_t resp_len,
+ struct scm_command *cmd,
+ size_t len)
{
int ret, retry_count = 0;
do {
ret = scm_call_common(svc_id, cmd_id, cmd_buf, cmd_len,
- resp_buf, resp_len, cmd, len);
+ resp_buf, resp_len, cmd, len);
if (ret == SCM_EBUSY)
msleep(SCM_EBUSY_WAIT_MS);
if (retry_count == 33)
@@ -441,28 +691,11 @@ int scm_call_noalloc(u32 svc_id, u32 cmd_id, const void *cmd_buf,
memset(scm_buf, 0, scm_buf_len);
ret = scm_call_common(svc_id, cmd_id, cmd_buf, cmd_len, resp_buf,
- resp_len, scm_buf, len);
+ resp_len, scm_buf, len);
return ret;
}
-struct scm_extra_arg {
- union {
- u32 args32[N_EXT_SCM_ARGS];
- u64 args64[N_EXT_SCM_ARGS];
- };
-};
-
-static enum scm_interface_version {
- SCM_UNKNOWN,
- SCM_LEGACY,
- SCM_ARMV8_32,
- SCM_ARMV8_64,
-} scm_version = SCM_UNKNOWN;
-
-/* This will be set to specify SMC32 or SMC64 */
-static u32 scm_version_mask;
-
bool is_scm_armv8(void)
{
int ret;
@@ -472,7 +705,7 @@ bool is_scm_armv8(void)
if (likely(scm_version != SCM_UNKNOWN))
return (scm_version == SCM_ARMV8_32) ||
- (scm_version == SCM_ARMV8_64);
+ (scm_version == SCM_ARMV8_64);
/*
* This is a one time check that runs on the first ever
* invocation of is_scm_armv8. We might be called in atomic
@@ -509,7 +742,7 @@ bool is_scm_armv8(void)
scm_version_mask = SMC64_MASK;
pr_debug("scm_call: scm version is %x, mask is %x\n", scm_version,
- scm_version_mask);
+ scm_version_mask);
return (scm_version == SCM_ARMV8_32) ||
(scm_version == SCM_ARMV8_64);
@@ -551,7 +784,7 @@ static int allocate_extra_arg_buffer(struct scm_desc *desc, gfp_t flags)
desc->x5 = virt_to_phys(argbuf);
__cpuc_flush_dcache_area(argbuf, argbuflen);
outer_flush_range(virt_to_phys(argbuf),
- virt_to_phys(argbuf) + argbuflen);
+ virt_to_phys(argbuf) + argbuflen);
return 0;
}
@@ -576,7 +809,7 @@ static int allocate_extra_arg_buffer(struct scm_desc *desc, gfp_t flags)
* Note that cache maintenance on the argument buffer (desc->args) is taken care
* of by scm_call2; however, callers are responsible for any other cached
* buffers passed over to the secure world.
-*/
+ */
int scm_call2(u32 fn_id, struct scm_desc *desc)
{
int arglen = desc->arginfo & 0xf;
@@ -646,16 +879,16 @@ int scm_call2_atomic(u32 fn_id, struct scm_desc *desc)
x0 = fn_id | BIT(SMC_ATOMIC_SYSCALL) | scm_version_mask;
pr_debug("scm_call: func id %#llx, args: %#x, %#llx, %#llx, %#llx, %#llx\n",
- x0, desc->arginfo, desc->args[0], desc->args[1],
- desc->args[2], desc->x5);
+ x0, desc->arginfo, desc->args[0], desc->args[1],
+ desc->args[2], desc->x5);
ret = scm_call_qcpe(x0, desc);
if (ret < 0)
pr_err("scm_call failed: func id %#llx, arginfo: %#x, args: %#llx, %#llx, %#llx, %#llx, ret: %d, syscall returns: %#llx, %#llx, %#llx\n",
- x0, desc->arginfo, desc->args[0], desc->args[1],
- desc->args[2], desc->x5, ret, desc->ret[0],
- desc->ret[1], desc->ret[2]);
+ x0, desc->arginfo, desc->args[0], desc->args[1],
+ desc->args[2], desc->x5, ret, desc->ret[0],
+ desc->ret[1], desc->ret[2]);
if (arglen > N_REGISTER_ARGS)
kfree(desc->extra_arg_buf);
@@ -697,10 +930,10 @@ int scm_call(u32 svc_id, u32 cmd_id, const void *cmd_buf, size_t cmd_len,
return -ENOMEM;
ret = scm_call_common(svc_id, cmd_id, cmd_buf, cmd_len, resp_buf,
- resp_len, cmd, len);
+ resp_len, cmd, len);
if (unlikely(ret == SCM_EBUSY))
ret = _scm_call_retry(svc_id, cmd_id, cmd_buf, cmd_len,
- resp_buf, resp_len, cmd, PAGE_ALIGN(len));
+ resp_buf, resp_len, cmd, PAGE_ALIGN(len));
kfree(cmd);
return ret;
}
@@ -709,9 +942,9 @@ EXPORT_SYMBOL(scm_call);
#define SCM_CLASS_REGISTER (0x2 << 8)
#define SCM_MASK_IRQS BIT(5)
#define SCM_ATOMIC(svc, cmd, n) (((((svc) << 10)|((cmd) & 0x3ff)) << 12) | \
- SCM_CLASS_REGISTER | \
- SCM_MASK_IRQS | \
- (n & 0xf))
+ SCM_CLASS_REGISTER | \
+ SCM_MASK_IRQS | \
+ (n & 0xf))
/**
* scm_call_atomic1() - Send an atomic SCM command with one argument
@@ -908,7 +1141,7 @@ EXPORT_SYMBOL(scm_call_atomic4_3);
* uninterruptable, atomic and SMP safe.
*/
s32 scm_call_atomic5_3(u32 svc, u32 cmd, u32 arg1, u32 arg2,
- u32 arg3, u32 arg4, u32 arg5, u32 *ret1, u32 *ret2, u32 *ret3)
+ u32 arg3, u32 arg4, u32 arg5, u32 *ret1, u32 *ret2, u32 *ret3)
{
int ret;
int context_id;
@@ -985,8 +1218,8 @@ EXPORT_SYMBOL(scm_get_version);
u32 scm_io_read(phys_addr_t address)
{
struct scm_desc desc = {
- .args[0] = address,
- .arginfo = SCM_ARGS(1),
+ .args[0] = address,
+ .arginfo = SCM_ARGS(1),
};
if (!is_scm_armv8())
@@ -1005,12 +1238,12 @@ int scm_io_write(phys_addr_t address, u32 val)
ret = scm_call_atomic2(SCM_SVC_IO, SCM_IO_WRITE, address, val);
else {
struct scm_desc desc = {
- .args[0] = address,
- .args[1] = val,
- .arginfo = SCM_ARGS(2),
+ .args[0] = address,
+ .args[1] = val,
+ .arginfo = SCM_ARGS(2),
};
ret = scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_IO, SCM_IO_WRITE),
- &desc);
+ &desc);
}
return ret;
}
@@ -1026,7 +1259,7 @@ int scm_is_call_available(u32 svc_id, u32 cmd_id)
u32 svc_cmd = (svc_id << 10) | cmd_id;
ret = scm_call(SCM_SVC_INFO, IS_CALL_AVAIL_CMD, &svc_cmd,
- sizeof(svc_cmd), &ret_val, sizeof(ret_val));
+ sizeof(svc_cmd), &ret_val, sizeof(ret_val));
if (!ret && ret_val)
return 1;
else
@@ -1124,7 +1357,7 @@ bool scm_is_secure_device(void)
desc.arginfo = 0;
if (!is_scm_armv8()) {
ret = scm_call(SCM_SVC_INFO, TZ_INFO_GET_SECURE_STATE, NULL,
- 0, &resp, sizeof(resp));
+ 0, &resp, sizeof(resp));
} else {
ret = scm_call2(SCM_SIP_FNID(SCM_SVC_INFO,
TZ_INFO_GET_SECURE_STATE),
diff --git a/drivers/soc/qcom/subsystem_notif_virt.c b/drivers/soc/qcom/subsystem_notif_virt.c
new file mode 100644
index 000000000000..cf794f249d3d
--- /dev/null
+++ b/drivers/soc/qcom/subsystem_notif_virt.c
@@ -0,0 +1,163 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/notifier.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/io.h>
+#include <linux/of_platform.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <soc/qcom/subsystem_notif.h>
+
+static void __iomem *base_reg;
+
+struct state_notifier_block {
+ const char *subsystem;
+ struct notifier_block nb;
+ u32 offset;
+ void *handle;
+ struct list_head notifier_list;
+};
+
+static LIST_HEAD(notifier_block_list);
+
+static int subsys_state_callback(struct notifier_block *this,
+ unsigned long value, void *priv)
+{
+ struct state_notifier_block *notifier =
+ container_of(this, struct state_notifier_block, nb);
+
+ writel_relaxed(value, base_reg + notifier->offset);
+
+ return NOTIFY_OK;
+}
+
+static int subsys_notif_virt_probe(struct platform_device *pdev)
+{
+ struct device_node *node;
+ struct device_node *child = NULL;
+ struct resource *res;
+ struct state_notifier_block *notif_block;
+ int ret = 0;
+
+ if (!pdev) {
+ dev_err(&pdev->dev, "pdev is NULL\n");
+ return -EINVAL;
+ }
+
+ node = pdev->dev.of_node;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vdev_base");
+ base_reg = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR_OR_NULL(base_reg)) {
+ dev_err(&pdev->dev, "Memory mapping failed\n");
+ return -ENOMEM;
+ }
+
+ for_each_child_of_node(node, child) {
+
+ notif_block = devm_kmalloc(&pdev->dev,
+ sizeof(struct state_notifier_block),
+ GFP_KERNEL);
+ if (!notif_block)
+ return -ENOMEM;
+
+ notif_block->subsystem =
+ of_get_property(child, "subsys-name", NULL);
+ if (IS_ERR_OR_NULL(notif_block->subsystem)) {
+ dev_err(&pdev->dev, "Could not find subsystem name\n");
+ ret = -EINVAL;
+ goto err_nb;
+ }
+
+ notif_block->nb.notifier_call = subsys_state_callback;
+
+ notif_block->handle =
+ subsys_notif_register_notifier(notif_block->subsystem,
+ &notif_block->nb);
+ if (IS_ERR_OR_NULL(notif_block->handle)) {
+ dev_err(&pdev->dev, "Could not register SSR notifier cb\n");
+ ret = -EINVAL;
+ goto err_nb;
+ }
+
+ ret = of_property_read_u32(child, "offset",
+ &notif_block->offset);
+ if (ret) {
+ dev_err(&pdev->dev, "offset reading for %s failed\n",
+ notif_block->subsystem);
+ ret = -EINVAL;
+ goto err_offset;
+ }
+
+ list_add_tail(&notif_block->notifier_list,
+ &notifier_block_list);
+
+ }
+ return 0;
+
+err_offset:
+ subsys_notif_unregister_notifier(notif_block->handle,
+ &notif_block->nb);
+err_nb:
+ kfree(notif_block);
+ return ret;
+}
+
+static int subsys_notif_virt_remove(struct platform_device *pdev)
+{
+ struct state_notifier_block *notif_block;
+
+ list_for_each_entry(notif_block, &notifier_block_list,
+ notifier_list) {
+ subsys_notif_unregister_notifier(notif_block->handle,
+ &notif_block->nb);
+ list_del(&notif_block->notifier_list);
+ }
+ return 0;
+}
+
+static const struct of_device_id match_table[] = {
+ { .compatible = "qcom,subsys-notif-virt" },
+ {},
+};
+
+static struct platform_driver subsys_notif_virt_driver = {
+ .probe = subsys_notif_virt_probe,
+ .remove = subsys_notif_virt_remove,
+ .driver = {
+ .name = "subsys_notif_virt",
+ .owner = THIS_MODULE,
+ .of_match_table = match_table,
+ },
+};
+
+static int __init subsys_notif_virt_init(void)
+{
+ return platform_driver_register(&subsys_notif_virt_driver);
+}
+module_init(subsys_notif_virt_init);
+
+static void __exit subsys_notif_virt_exit(void)
+{
+ platform_driver_unregister(&subsys_notif_virt_driver);
+}
+module_exit(subsys_notif_virt_exit);
+
+MODULE_DESCRIPTION("Subsystem Notification Virtual Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/subsystem_restart.c b/drivers/soc/qcom/subsystem_restart.c
index ae249f382339..ea94456ccef8 100644
--- a/drivers/soc/qcom/subsystem_restart.c
+++ b/drivers/soc/qcom/subsystem_restart.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1083,7 +1083,7 @@ int subsystem_restart_dev(struct subsys_device *dev)
{
const char *name;
- if (!get_device(&dev->dev))
+ if ((!dev) || !get_device(&dev->dev))
return -ENODEV;
if (!try_module_get(dev->owner)) {
@@ -1177,11 +1177,21 @@ EXPORT_SYMBOL(subsystem_crashed);
void subsys_set_crash_status(struct subsys_device *dev,
enum crash_status crashed)
{
+ if (!dev) {
+ pr_err("Invalid subsystem device\n");
+ return;
+ }
+
dev->crashed = crashed;
}
enum crash_status subsys_get_crash_status(struct subsys_device *dev)
{
+ if (!dev) {
+ pr_err("Invalid subsystem device\n");
+ return CRASH_STATUS_NO_CRASH;
+ }
+
return dev->crashed;
}
diff --git a/drivers/soc/qcom/tracer_pkt.c b/drivers/soc/qcom/tracer_pkt.c
index 6d1fa590c055..8875207c6334 100644
--- a/drivers/soc/qcom/tracer_pkt.c
+++ b/drivers/soc/qcom/tracer_pkt.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015, 2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -100,7 +100,7 @@ int tracer_pkt_init(void *data, size_t data_len,
pkt_hdr->reserved = 0;
pkt_hdr->id_valid = 0;
pkt_hdr->qdss_tracing = qdss_tracing ? true : false;
- if (pkt_priv_len > MAX_CC_WLEN * sizeof(uint32_t))
+ if (pkt_priv_len >= MAX_CC_WLEN * sizeof(uint32_t))
pkt_hdr->ccl = MAX_CC_WLEN;
else
pkt_hdr->ccl = pkt_priv_len/sizeof(uint32_t) +
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 8feac599e9ab..44be6b593b30 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -1669,12 +1669,12 @@ static int atmel_spi_remove(struct platform_device *pdev)
pm_runtime_get_sync(&pdev->dev);
/* reset the hardware and block queue progress */
- spin_lock_irq(&as->lock);
if (as->use_dma) {
atmel_spi_stop_dma(as);
atmel_spi_release_dma(as);
}
+ spin_lock_irq(&as->lock);
spi_writel(as, CR, SPI_BIT(SWRST));
spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
spi_readl(as, SR);
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index 1ddba9ae8c0f..c872a2e54c4b 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -651,7 +651,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
buf = t->rx_buf;
t->rx_dma = dma_map_single(&spi->dev, buf,
t->len, DMA_FROM_DEVICE);
- if (dma_mapping_error(&spi->dev, !t->rx_dma)) {
+ if (dma_mapping_error(&spi->dev, t->rx_dma)) {
ret = -EFAULT;
goto err_rx_map;
}
diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c
index a6d7029a85ac..581df3ebfc88 100644
--- a/drivers/spi/spi-dw-mmio.c
+++ b/drivers/spi/spi-dw-mmio.c
@@ -120,8 +120,8 @@ static int dw_spi_mmio_remove(struct platform_device *pdev)
{
struct dw_spi_mmio *dwsmmio = platform_get_drvdata(pdev);
- clk_disable_unprepare(dwsmmio->clk);
dw_spi_remove_host(&dwsmmio->dws);
+ clk_disable_unprepare(dwsmmio->clk);
return 0;
}
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index ed8283e7397a..83b53cd956aa 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -457,6 +457,8 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
int elements = 0;
int word_len, element_count;
struct omap2_mcspi_cs *cs = spi->controller_state;
+ void __iomem *chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0;
+
mcspi = spi_master_get_devdata(spi->master);
mcspi_dma = &mcspi->dma_channels[spi->chip_select];
count = xfer->len;
@@ -517,8 +519,8 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
if (l & OMAP2_MCSPI_CHCONF_TURBO) {
elements--;
- if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0)
- & OMAP2_MCSPI_CHSTAT_RXS)) {
+ if (!mcspi_wait_for_reg_bit(chstat_reg,
+ OMAP2_MCSPI_CHSTAT_RXS)) {
u32 w;
w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0);
@@ -536,8 +538,7 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
return count;
}
}
- if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0)
- & OMAP2_MCSPI_CHSTAT_RXS)) {
+ if (!mcspi_wait_for_reg_bit(chstat_reg, OMAP2_MCSPI_CHSTAT_RXS)) {
u32 w;
w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0);
diff --git a/drivers/spi/spi-pxa2xx.h b/drivers/spi/spi-pxa2xx.h
index 58efa98313aa..24c07fea9de2 100644
--- a/drivers/spi/spi-pxa2xx.h
+++ b/drivers/spi/spi-pxa2xx.h
@@ -38,7 +38,7 @@ struct driver_data {
/* SSP register addresses */
void __iomem *ioaddr;
- u32 ssdr_physical;
+ phys_addr_t ssdr_physical;
/* SSP masks*/
u32 dma_cr1;
diff --git a/drivers/spi/spi-sun4i.c b/drivers/spi/spi-sun4i.c
index 39d7c7c70112..2eea3de5a668 100644
--- a/drivers/spi/spi-sun4i.c
+++ b/drivers/spi/spi-sun4i.c
@@ -458,7 +458,7 @@ err_free_master:
static int sun4i_spi_remove(struct platform_device *pdev)
{
- pm_runtime_disable(&pdev->dev);
+ pm_runtime_force_suspend(&pdev->dev);
return 0;
}
diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c
index e77add01b0e9..48888ab630c2 100644
--- a/drivers/spi/spi-sun6i.c
+++ b/drivers/spi/spi-sun6i.c
@@ -457,7 +457,7 @@ err_free_master:
static int sun6i_spi_remove(struct platform_device *pdev)
{
- pm_runtime_disable(&pdev->dev);
+ pm_runtime_force_suspend(&pdev->dev);
return 0;
}
diff --git a/drivers/spi/spi_qsd.c b/drivers/spi/spi_qsd.c
index aa7386325893..799bf2988b30 100644
--- a/drivers/spi/spi_qsd.c
+++ b/drivers/spi/spi_qsd.c
@@ -44,6 +44,7 @@
#include <linux/msm-sps.h>
#include <linux/msm-bus.h>
#include <linux/msm-bus-board.h>
+#include <soc/qcom/boot_stats.h>
#include "spi_qsd.h"
#define SPI_MAX_BYTES_PER_WORD (4)
@@ -2581,6 +2582,7 @@ static int msm_spi_probe(struct platform_device *pdev)
int i = 0;
int rc = -ENXIO;
struct msm_spi_platform_data *pdata;
+ char boot_marker[40];
master = spi_alloc_master(&pdev->dev, sizeof(struct msm_spi));
if (!master) {
@@ -2649,6 +2651,10 @@ static int msm_spi_probe(struct platform_device *pdev)
}
}
+ snprintf(boot_marker, sizeof(boot_marker),
+ "M - DRIVER MSM SPI_%d Init", pdev->id);
+ place_marker(boot_marker);
+
for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i)
dd->cs_gpios[i].valid = 0;
@@ -2740,6 +2746,10 @@ skip_dma_resources:
rc = sysfs_create_file(&(dd->dev->kobj), &dev_attr_spi_qup_state.attr);
spi_debugfs_init(dd);
+ snprintf(boot_marker, sizeof(boot_marker),
+ "M - DRIVER MSM SPI_%d Ready", pdev->id);
+ place_marker(boot_marker);
+
return 0;
err_attrs:
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index d3130cfd6433..4e4b39c26e89 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -691,19 +691,26 @@ static int qpnpint_irq_set_type(struct irq_data *d, unsigned int flow_type)
if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
type.type |= bit_mask_irq;
if (flow_type & IRQF_TRIGGER_RISING)
- type.polarity_high |= bit_mask_irq;
+ type.polarity_high |= bit_mask_irq;
+ else
+ type.polarity_high &= ~bit_mask_irq;
if (flow_type & IRQF_TRIGGER_FALLING)
- type.polarity_low |= bit_mask_irq;
+ type.polarity_low |= bit_mask_irq;
+ else
+ type.polarity_low &= ~bit_mask_irq;
} else {
if ((flow_type & (IRQF_TRIGGER_HIGH)) &&
(flow_type & (IRQF_TRIGGER_LOW)))
return -EINVAL;
type.type &= ~bit_mask_irq; /* level trig */
- if (flow_type & IRQF_TRIGGER_HIGH)
- type.polarity_high |= bit_mask_irq;
- else
- type.polarity_low |= bit_mask_irq;
+ if (flow_type & IRQF_TRIGGER_HIGH) {
+ type.polarity_high |= bit_mask_irq;
+ type.polarity_low &= ~bit_mask_irq;
+ } else {
+ type.polarity_low |= bit_mask_irq;
+ type.polarity_high &= ~bit_mask_irq;
+ }
}
qpnpint_spmi_write(d, QPNPINT_REG_SET_TYPE, &type, sizeof(type));
diff --git a/drivers/spmi/virtspmi-pmic-arb.c b/drivers/spmi/virtspmi-pmic-arb.c
index 59fc76149ba0..84e39a73724b 100644
--- a/drivers/spmi/virtspmi-pmic-arb.c
+++ b/drivers/spmi/virtspmi-pmic-arb.c
@@ -15,6 +15,9 @@
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -31,6 +34,10 @@
#define VPMIC_ARB_DATA0 0x08
#define VPMIC_ARB_DATA1 0x10
+/* Mapping Table */
+#define PMIC_ARB_MAX_PPID BIT(12) /* PPID is 12bit */
+#define PMIC_ARB_CHAN_VALID BIT(15)
+
/* Channel Status fields */
enum pmic_arb_chnl_status {
PMIC_ARB_STATUS_DONE = BIT(0),
@@ -39,9 +46,6 @@ enum pmic_arb_chnl_status {
PMIC_ARB_STATUS_DROPPED = BIT(3),
};
-/* Command register fields */
-#define PMIC_ARB_CMD_MAX_BYTE_COUNT 8
-
/* Command Opcodes */
enum pmic_arb_cmd_op_code {
PMIC_ARB_OP_EXT_WRITEL = 0,
@@ -74,21 +78,68 @@ enum pmic_arb_channel {
#define PMIC_ARB_TIMEOUT_US 100
#define PMIC_ARB_MAX_TRANS_BYTES (8)
+#define PMIC_ARB_APID_MASK 0xFF
+#define PMIC_ARB_PPID_MASK 0xFFF
+
+/* interrupt enable bit */
+#define SPMI_PIC_ACC_ENABLE_BIT BIT(0)
+
+#define HWIRQ(slave_id, periph_id, irq_id, apid) \
+ ((((slave_id) & 0xF) << 28) | \
+ (((periph_id) & 0xFF) << 20) | \
+ (((irq_id) & 0x7) << 16) | \
+ (((apid) & 0x1FF) << 0))
+
+#define HWIRQ_SID(hwirq) (((hwirq) >> 28) & 0xF)
+#define HWIRQ_PER(hwirq) (((hwirq) >> 20) & 0xFF)
+#define HWIRQ_IRQ(hwirq) (((hwirq) >> 16) & 0x7)
+#define HWIRQ_APID(hwirq) (((hwirq) >> 0) & 0x1FF)
+
struct vspmi_backend_driver_ver_ops;
+struct apid_data {
+ u16 ppid;
+ u8 write_owner;
+ u8 irq_owner;
+};
+
/**
* vspmi_pmic_arb - Virtual SPMI PMIC Arbiter object
*
+ * @wr_base: on v1 "core", on v2 "chnls" register base off DT.
+ * @intr: address of the SPMI interrupt control registers.
+ * @acc_status: address of SPMI ACC interrupt status registers.
* @lock: lock to synchronize accesses.
+ * @irq: PMIC ARB interrupt.
+ * @min_apid: minimum APID (used for bounding IRQ search)
+ * @max_apid: maximum APID
+ * @max_periph: maximum number of PMIC peripherals supported by HW.
+ * @mapping_table: in-memory copy of PPID -> APID mapping table.
+ * @domain: irq domain object for PMIC IRQ domain
* @spmic: SPMI controller object
* @ver_ops: backend version dependent operations.
+ * @ppid_to_apid in-memory copy of PPID -> channel (APID) mapping table.
*/
struct vspmi_pmic_arb {
+ void __iomem *wr_base;
void __iomem *core;
+ void __iomem *intr;
+ void __iomem *acc_status;
resource_size_t core_size;
raw_spinlock_t lock;
+ u8 channel;
+ int irq;
+ u16 min_apid;
+ u16 max_apid;
+ u16 max_periph;
+ u32 *mapping_table;
+ DECLARE_BITMAP(mapping_table_valid, PMIC_ARB_MAX_PERIPHS);
+ struct irq_domain *domain;
struct spmi_controller *spmic;
const struct vspmi_backend_driver_ver_ops *ver_ops;
+ u16 *ppid_to_apid;
+ u16 last_apid;
+ struct apid_data apid_data[PMIC_ARB_MAX_PERIPHS];
};
static struct vspmi_pmic_arb *the_pa;
@@ -96,11 +147,22 @@ static struct vspmi_pmic_arb *the_pa;
* pmic_arb_ver: version dependent functionality.
*
* @ver_str: version string.
+ * @ppid_to_apid: finds the apid for a given ppid.
* @fmt_cmd: formats a GENI/SPMI command.
+ * @acc_enable: offset of SPMI_PIC_ACC_ENABLEn.
+ * @irq_status: offset of SPMI_PIC_IRQ_STATUSn.
+ * @irq_clear: offset of SPMI_PIC_IRQ_CLEARn.
+ * @channel_map_offset: offset of PMIC_ARB_REG_CHNLn
*/
struct vspmi_backend_driver_ver_ops {
const char *ver_str;
+ int (*ppid_to_apid)(struct vspmi_pmic_arb *pa, u8 sid, u16 addr,
+ u16 *apid);
u32 (*fmt_cmd)(u8 opc, u8 sid, u16 addr, u8 bc);
+ u32 (*acc_enable)(u16 n);
+ u32 (*irq_status)(u16 n);
+ u32 (*irq_clear)(u16 n);
+ u32 (*channel_map_offset)(u16 n);
};
/**
@@ -112,7 +174,7 @@ struct vspmi_backend_driver_ver_ops {
static void
vspmi_pa_read_data(struct vspmi_pmic_arb *pa, u8 *buf, u32 reg, u8 bc)
{
- u32 data = __raw_readl(pa->core + reg);
+ u32 data = __raw_readl(pa->wr_base + reg);
memcpy(buf, &data, (bc & 3) + 1);
}
@@ -129,7 +191,7 @@ vspmi_pa_write_data(struct vspmi_pmic_arb *pa, const u8 *buf, u32 reg, u8 bc)
u32 data = 0;
memcpy(&data, buf, (bc & 3) + 1);
- writel_relaxed(data, pa->core + reg);
+ writel_relaxed(data, pa->wr_base + reg);
}
static int vspmi_pmic_arb_wait_for_done(struct spmi_controller *ctrl,
@@ -183,7 +245,7 @@ static int vspmi_pmic_arb_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
{
struct vspmi_pmic_arb *pa = spmi_controller_get_drvdata(ctrl);
unsigned long flags;
- u8 bc = len;
+ u8 bc = len - 1;
u32 cmd;
int rc;
@@ -207,8 +269,8 @@ static int vspmi_pmic_arb_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
cmd = pa->ver_ops->fmt_cmd(opc, sid, addr, bc);
raw_spin_lock_irqsave(&pa->lock, flags);
- writel_relaxed(cmd, pa->core + VPMIC_ARB_CMD);
- rc = vspmi_pmic_arb_wait_for_done(ctrl, pa->core, sid, addr,
+ writel_relaxed(cmd, pa->wr_base + VPMIC_ARB_CMD);
+ rc = vspmi_pmic_arb_wait_for_done(ctrl, pa->wr_base, sid, addr,
PMIC_ARB_CHANNEL_OBS);
if (rc)
goto done;
@@ -228,7 +290,7 @@ static int vspmi_pmic_arb_write_cmd(struct spmi_controller *ctrl, u8 opc,
{
struct vspmi_pmic_arb *pa = spmi_controller_get_drvdata(ctrl);
unsigned long flags;
- u8 bc = len;
+ u8 bc = len - 1;
u32 cmd;
int rc;
@@ -260,22 +322,415 @@ static int vspmi_pmic_arb_write_cmd(struct spmi_controller *ctrl, u8 opc,
vspmi_pa_write_data(pa, buf + 4, VPMIC_ARB_DATA1, bc - 4);
/* Start the transaction */
- writel_relaxed(cmd, pa->core + VPMIC_ARB_CMD);
- rc = vspmi_pmic_arb_wait_for_done(ctrl, pa->core, sid, addr,
+ writel_relaxed(cmd, pa->wr_base + VPMIC_ARB_CMD);
+ rc = vspmi_pmic_arb_wait_for_done(ctrl, pa->wr_base, sid, addr,
PMIC_ARB_CHANNEL_RW);
raw_spin_unlock_irqrestore(&pa->lock, flags);
return rc;
}
+enum qpnpint_regs {
+ QPNPINT_REG_RT_STS = 0x10,
+ QPNPINT_REG_SET_TYPE = 0x11,
+ QPNPINT_REG_POLARITY_HIGH = 0x12,
+ QPNPINT_REG_POLARITY_LOW = 0x13,
+ QPNPINT_REG_LATCHED_CLR = 0x14,
+ QPNPINT_REG_EN_SET = 0x15,
+ QPNPINT_REG_EN_CLR = 0x16,
+ QPNPINT_REG_LATCHED_STS = 0x18,
+};
+
+struct spmi_pmic_arb_qpnpint_type {
+ u8 type; /* 1 -> edge */
+ u8 polarity_high;
+ u8 polarity_low;
+} __packed;
+
+/* Simplified accessor functions for irqchip callbacks */
+static void qpnpint_spmi_write(struct irq_data *d, u8 reg, void *buf,
+ size_t len)
+{
+ struct vspmi_pmic_arb *pa = irq_data_get_irq_chip_data(d);
+ u8 sid = HWIRQ_SID(d->hwirq);
+ u8 per = HWIRQ_PER(d->hwirq);
+
+ if (vspmi_pmic_arb_write_cmd(pa->spmic, SPMI_CMD_EXT_WRITEL, sid,
+ (per << 8) + reg, buf, len))
+ dev_err_ratelimited(&pa->spmic->dev,
+ "failed irqchip transaction on %x\n",
+ d->irq);
+}
+
+static void qpnpint_spmi_read(struct irq_data *d, u8 reg, void *buf, size_t len)
+{
+ struct vspmi_pmic_arb *pa = irq_data_get_irq_chip_data(d);
+ u8 sid = HWIRQ_SID(d->hwirq);
+ u8 per = HWIRQ_PER(d->hwirq);
+
+ if (vspmi_pmic_arb_read_cmd(pa->spmic, SPMI_CMD_EXT_READL, sid,
+ (per << 8) + reg, buf, len))
+ dev_err_ratelimited(&pa->spmic->dev,
+ "failed irqchip transaction on %x\n",
+ d->irq);
+}
+
+static void cleanup_irq(struct vspmi_pmic_arb *pa, u16 apid, int id)
+{
+ u16 ppid = pa->apid_data[apid].ppid;
+ u8 sid = ppid >> 8;
+ u8 per = ppid & 0xFF;
+ u8 irq_mask = BIT(id);
+
+ dev_err_ratelimited(&pa->spmic->dev,
+ "cleanup_irq apid=%d sid=0x%x per=0x%x irq=%d\n",
+ apid, sid, per, id);
+ writel_relaxed(irq_mask, pa->intr + pa->ver_ops->irq_clear(apid));
+}
+
+static void periph_interrupt(struct vspmi_pmic_arb *pa, u16 apid, bool show)
+{
+ unsigned int irq;
+ u32 status;
+ int id;
+ u8 sid = (pa->apid_data[apid].ppid >> 8) & 0xF;
+ u8 per = pa->apid_data[apid].ppid & 0xFF;
+
+ status = readl_relaxed(pa->intr + pa->ver_ops->irq_status(apid));
+ while (status) {
+ id = ffs(status) - 1;
+ status &= ~BIT(id);
+ irq = irq_find_mapping(pa->domain, HWIRQ(sid, per, id, apid));
+ if (irq == 0) {
+ cleanup_irq(pa, apid, id);
+ continue;
+ }
+ if (show) {
+ struct irq_desc *desc;
+ const char *name = "null";
+
+ desc = irq_to_desc(irq);
+ if (desc == NULL)
+ name = "stray irq";
+ else if (desc->action && desc->action->name)
+ name = desc->action->name;
+
+ pr_warn("spmi_show_resume_irq: %d triggered [0x%01x, 0x%02x, 0x%01x] %s\n",
+ irq, sid, per, id, name);
+ } else {
+ generic_handle_irq(irq);
+ }
+ }
+}
+
+static void __pmic_arb_chained_irq(struct vspmi_pmic_arb *pa, bool show)
+{
+ u32 enable;
+ int i;
+ /* status based dispatch */
+ bool acc_valid = false;
+ u32 irq_status = 0;
+
+ /* ACC_STATUS is empty but IRQ fired check IRQ_STATUS */
+ if (!acc_valid) {
+ for (i = pa->min_apid; i <= pa->max_apid; i++) {
+ irq_status = readl_relaxed(pa->intr +
+ pa->ver_ops->irq_status(i));
+ if (irq_status) {
+ enable = readl_relaxed(pa->intr +
+ pa->ver_ops->acc_enable(i));
+ if (enable & SPMI_PIC_ACC_ENABLE_BIT) {
+ dev_dbg(&pa->spmic->dev,
+ "Dispatching IRQ for apid=%d status=%x\n",
+ i, irq_status);
+ periph_interrupt(pa, i, show);
+ }
+ }
+ }
+ }
+}
+
+static void pmic_arb_chained_irq(struct irq_desc *desc)
+{
+ struct vspmi_pmic_arb *pa = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+
+ chained_irq_enter(chip, desc);
+ __pmic_arb_chained_irq(pa, false);
+ chained_irq_exit(chip, desc);
+}
+
+static void qpnpint_irq_ack(struct irq_data *d)
+{
+ struct vspmi_pmic_arb *pa = irq_data_get_irq_chip_data(d);
+ u8 irq = HWIRQ_IRQ(d->hwirq);
+ u16 apid = HWIRQ_APID(d->hwirq);
+ u8 data;
+
+ writel_relaxed(BIT(irq), pa->intr + pa->ver_ops->irq_clear(apid));
+
+ data = BIT(irq);
+ qpnpint_spmi_write(d, QPNPINT_REG_LATCHED_CLR, &data, 1);
+}
+
+static void qpnpint_irq_mask(struct irq_data *d)
+{
+ u8 irq = HWIRQ_IRQ(d->hwirq);
+ u8 data = BIT(irq);
+
+ qpnpint_spmi_write(d, QPNPINT_REG_EN_CLR, &data, 1);
+}
+
+static void qpnpint_irq_unmask(struct irq_data *d)
+{
+ struct vspmi_pmic_arb *pa = irq_data_get_irq_chip_data(d);
+ u8 irq = HWIRQ_IRQ(d->hwirq);
+ u16 apid = HWIRQ_APID(d->hwirq);
+ u8 buf[2];
+
+ writel_relaxed(SPMI_PIC_ACC_ENABLE_BIT,
+ pa->intr + pa->ver_ops->acc_enable(apid));
+
+ qpnpint_spmi_read(d, QPNPINT_REG_EN_SET, &buf[0], 1);
+ if (!(buf[0] & BIT(irq))) {
+ /*
+ * Since the interrupt is currently disabled, write to both the
+ * LATCHED_CLR and EN_SET registers so that a spurious interrupt
+ * cannot be triggered when the interrupt is enabled
+ */
+ buf[0] = BIT(irq);
+ buf[1] = BIT(irq);
+ qpnpint_spmi_write(d, QPNPINT_REG_LATCHED_CLR, &buf, 2);
+ }
+}
+
+static int qpnpint_irq_set_type(struct irq_data *d, unsigned int flow_type)
+{
+ struct spmi_pmic_arb_qpnpint_type type;
+ u8 irq = HWIRQ_IRQ(d->hwirq);
+ u8 bit_mask_irq = BIT(irq);
+
+ qpnpint_spmi_read(d, QPNPINT_REG_SET_TYPE, &type, sizeof(type));
+
+ if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
+ type.type |= bit_mask_irq;
+ if (flow_type & IRQF_TRIGGER_RISING)
+ type.polarity_high |= bit_mask_irq;
+ if (flow_type & IRQF_TRIGGER_FALLING)
+ type.polarity_low |= bit_mask_irq;
+ } else {
+ if ((flow_type & (IRQF_TRIGGER_HIGH)) &&
+ (flow_type & (IRQF_TRIGGER_LOW)))
+ return -EINVAL;
+
+ type.type &= ~bit_mask_irq; /* level trig */
+ if (flow_type & IRQF_TRIGGER_HIGH)
+ type.polarity_high |= bit_mask_irq;
+ else
+ type.polarity_low |= bit_mask_irq;
+ }
+
+ qpnpint_spmi_write(d, QPNPINT_REG_SET_TYPE, &type, sizeof(type));
+
+ if (flow_type & IRQ_TYPE_EDGE_BOTH)
+ irq_set_handler_locked(d, handle_edge_irq);
+ else
+ irq_set_handler_locked(d, handle_level_irq);
+
+ return 0;
+}
+
+static int qpnpint_get_irqchip_state(struct irq_data *d,
+ enum irqchip_irq_state which,
+ bool *state)
+{
+ u8 irq = HWIRQ_IRQ(d->hwirq);
+ u8 status = 0;
+
+ if (which != IRQCHIP_STATE_LINE_LEVEL)
+ return -EINVAL;
+
+ qpnpint_spmi_read(d, QPNPINT_REG_RT_STS, &status, 1);
+ *state = !!(status & BIT(irq));
+
+ return 0;
+}
+
+static struct irq_chip pmic_arb_irqchip = {
+ .name = "pmic_arb",
+ .irq_ack = qpnpint_irq_ack,
+ .irq_mask = qpnpint_irq_mask,
+ .irq_unmask = qpnpint_irq_unmask,
+ .irq_set_type = qpnpint_irq_set_type,
+ .irq_get_irqchip_state = qpnpint_get_irqchip_state,
+ .flags = IRQCHIP_MASK_ON_SUSPEND
+ | IRQCHIP_SKIP_SET_WAKE,
+};
+
+static void qpnpint_irq_domain_activate(struct irq_domain *domain,
+ struct irq_data *d)
+{
+ u8 irq = HWIRQ_IRQ(d->hwirq);
+ u8 buf;
+
+ buf = BIT(irq);
+ qpnpint_spmi_write(d, QPNPINT_REG_EN_CLR, &buf, 1);
+ qpnpint_spmi_write(d, QPNPINT_REG_LATCHED_CLR, &buf, 1);
+}
+
+static int qpnpint_irq_domain_dt_translate(struct irq_domain *d,
+ struct device_node *controller,
+ const u32 *intspec,
+ unsigned int intsize,
+ unsigned long *out_hwirq,
+ unsigned int *out_type)
+{
+ struct vspmi_pmic_arb *pa = d->host_data;
+ int rc;
+ u16 apid;
+
+ dev_dbg(&pa->spmic->dev,
+ "intspec[0] 0x%1x intspec[1] 0x%02x intspec[2] 0x%02x\n",
+ intspec[0], intspec[1], intspec[2]);
+
+ if (irq_domain_get_of_node(d) != controller)
+ return -EINVAL;
+ if (intsize != 4)
+ return -EINVAL;
+ if (intspec[0] > 0xF || intspec[1] > 0xFF || intspec[2] > 0x7)
+ return -EINVAL;
+
+ rc = pa->ver_ops->ppid_to_apid(pa, intspec[0],
+ (intspec[1] << 8), &apid);
+ if (rc < 0) {
+ dev_err(&pa->spmic->dev,
+ "failed to xlate sid = 0x%x, periph = 0x%x, irq = %u rc = %d\n",
+ intspec[0], intspec[1], intspec[2], rc);
+ return rc;
+ }
+
+ /* Keep track of {max,min}_apid for bounding search during interrupt */
+ if (apid > pa->max_apid)
+ pa->max_apid = apid;
+ if (apid < pa->min_apid)
+ pa->min_apid = apid;
+
+ *out_hwirq = HWIRQ(intspec[0], intspec[1], intspec[2], apid);
+ *out_type = intspec[3] & IRQ_TYPE_SENSE_MASK;
+
+ dev_dbg(&pa->spmic->dev, "out_hwirq = %lu\n", *out_hwirq);
+
+ return 0;
+}
+
+static int qpnpint_irq_domain_map(struct irq_domain *d,
+ unsigned int virq,
+ irq_hw_number_t hwirq)
+{
+ struct vspmi_pmic_arb *pa = d->host_data;
+
+ dev_dbg(&pa->spmic->dev, "virq = %u, hwirq = %lu\n", virq, hwirq);
+
+ irq_set_chip_and_handler(virq, &pmic_arb_irqchip, handle_level_irq);
+ irq_set_chip_data(virq, d->host_data);
+ irq_set_noprobe(virq);
+ return 0;
+}
+
+static u16 pmic_arb_find_apid(struct vspmi_pmic_arb *pa, u16 ppid)
+{
+ u32 regval, offset;
+ u16 apid;
+ u16 id;
+
+ /*
+ * PMIC_ARB_REG_CHNL is a table in HW mapping channel to ppid.
+ * ppid_to_apid is an in-memory invert of that table.
+ */
+ for (apid = pa->last_apid; apid < pa->max_periph; apid++) {
+ offset = pa->ver_ops->channel_map_offset(apid);
+ if (offset >= pa->core_size)
+ break;
+
+ regval = readl_relaxed(pa->core + offset);
+ if (!regval) {
+ /* If this regval is 0, it means that this apid is
+ * unused. Write the current ppid to this reg to
+ * use this apid to map to the given ppid.
+ */
+ writel_relaxed(ppid, pa->core + offset);
+ regval = ppid;
+ }
+
+ id = regval & PMIC_ARB_PPID_MASK;
+ pa->ppid_to_apid[id] = apid | PMIC_ARB_CHAN_VALID;
+ pa->apid_data[apid].ppid = id;
+ if (id == ppid) {
+ apid |= PMIC_ARB_CHAN_VALID;
+ break;
+ }
+ }
+ pa->last_apid = apid & ~PMIC_ARB_CHAN_VALID;
+
+ return apid;
+}
+
+static int
+pmic_arb_ppid_to_apid_v2(struct vspmi_pmic_arb *pa, u8 sid, u16 addr, u16 *apid)
+{
+ u16 ppid = (sid << 8) | (addr >> 8);
+ u16 apid_valid;
+
+ apid_valid = pa->ppid_to_apid[ppid];
+ if (!(apid_valid & PMIC_ARB_CHAN_VALID))
+ apid_valid = pmic_arb_find_apid(pa, ppid);
+ if (!(apid_valid & PMIC_ARB_CHAN_VALID))
+ return -ENODEV;
+
+ *apid = (apid_valid & ~PMIC_ARB_CHAN_VALID);
+ return 0;
+}
+
static u32 vspmi_pmic_arb_fmt_cmd_v1(u8 opc, u8 sid, u16 addr, u8 bc)
{
- return (opc << 27) | ((sid & 0xf) << 20) | (addr << 4) | (bc & 0x7);
+ return (opc << 27) | ((sid & 0xf) << 20) | (addr << 4) |
+ ((bc & 0x7) + 1);
+}
+
+static u32 pmic_arb_acc_enable_v2(u16 n)
+{
+ return 0x1000 * n;
+}
+
+static u32 pmic_arb_irq_status_v2(u16 n)
+{
+ return 0x4 + 0x1000 * n;
+}
+
+static u32 pmic_arb_irq_clear_v2(u16 n)
+{
+ return 0x8 + 0x1000 * n;
+}
+
+static u32 pmic_arb_channel_map_offset_v2(u16 n)
+{
+ return 0x800 + 0x4 * n;
}
static const struct vspmi_backend_driver_ver_ops pmic_arb_v1 = {
.ver_str = "v1",
+ .ppid_to_apid = pmic_arb_ppid_to_apid_v2,
.fmt_cmd = vspmi_pmic_arb_fmt_cmd_v1,
+ .acc_enable = pmic_arb_acc_enable_v2,
+ .irq_status = pmic_arb_irq_status_v2,
+ .irq_clear = pmic_arb_irq_clear_v2,
+ .channel_map_offset = pmic_arb_channel_map_offset_v2,
+};
+
+static const struct irq_domain_ops pmic_arb_irq_domain_ops = {
+ .map = qpnpint_irq_domain_map,
+ .xlate = qpnpint_irq_domain_dt_translate,
+ .activate = qpnpint_irq_domain_activate,
};
static int vspmi_pmic_arb_probe(struct platform_device *pdev)
@@ -284,6 +739,7 @@ static int vspmi_pmic_arb_probe(struct platform_device *pdev)
struct spmi_controller *ctrl;
struct resource *res;
u32 backend_ver;
+ u32 channel;
int err;
ctrl = spmi_controller_alloc(&pdev->dev, sizeof(*pa));
@@ -312,22 +768,101 @@ static int vspmi_pmic_arb_probe(struct platform_device *pdev)
if (backend_ver == VPMIC_ARB_VERSION)
pa->ver_ops = &pmic_arb_v1;
+ /* the apid to ppid table starts at PMIC_ARB_REG_CHNL0 */
+ pa->max_periph
+ = (pa->core_size - pa->ver_ops->channel_map_offset(0)) / 4;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "chnls");
+ pa->wr_base = devm_ioremap_resource(&ctrl->dev, res);
+ if (IS_ERR(pa->wr_base)) {
+ err = PTR_ERR(pa->wr_base);
+ goto err_put_ctrl;
+ }
+
+ pa->ppid_to_apid = devm_kcalloc(&ctrl->dev,
+ PMIC_ARB_MAX_PPID,
+ sizeof(*pa->ppid_to_apid),
+ GFP_KERNEL);
+ if (!pa->ppid_to_apid) {
+ err = -ENOMEM;
+ goto err_put_ctrl;
+ }
+
+
dev_info(&ctrl->dev, "PMIC arbiter version %s (0x%x)\n",
pa->ver_ops->ver_str, backend_ver);
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "intr");
+ pa->intr = devm_ioremap_resource(&ctrl->dev, res);
+ if (IS_ERR(pa->intr)) {
+ err = PTR_ERR(pa->intr);
+ goto err_put_ctrl;
+ }
+ pa->acc_status = pa->intr;
+
+ pa->irq = platform_get_irq_byname(pdev, "periph_irq");
+ if (pa->irq < 0) {
+ err = pa->irq;
+ goto err_put_ctrl;
+ }
+
+ err = of_property_read_u32(pdev->dev.of_node, "qcom,channel", &channel);
+ if (err) {
+ dev_err(&pdev->dev, "channel unspecified.\n");
+ goto err_put_ctrl;
+ }
+
+ if (channel > 5) {
+ dev_err(&pdev->dev, "invalid channel (%u) specified.\n",
+ channel);
+ err = -EINVAL;
+ goto err_put_ctrl;
+ }
+
+ pa->channel = channel;
+
+ pa->mapping_table = devm_kcalloc(&ctrl->dev, PMIC_ARB_MAX_PERIPHS - 1,
+ sizeof(*pa->mapping_table), GFP_KERNEL);
+ if (!pa->mapping_table) {
+ err = -ENOMEM;
+ goto err_put_ctrl;
+ }
+
+ /* Initialize max_apid/min_apid to the opposite bounds, during
+ * the irq domain translation, we are sure to update these.
+ */
+ pa->max_apid = 0;
+ pa->min_apid = PMIC_ARB_MAX_PERIPHS - 1;
+
platform_set_drvdata(pdev, ctrl);
raw_spin_lock_init(&pa->lock);
ctrl->read_cmd = vspmi_pmic_arb_read_cmd;
ctrl->write_cmd = vspmi_pmic_arb_write_cmd;
+ dev_dbg(&pdev->dev, "adding irq domain\n");
+ pa->domain = irq_domain_add_tree(pdev->dev.of_node,
+ &pmic_arb_irq_domain_ops, pa);
+ if (!pa->domain) {
+ dev_err(&pdev->dev, "unable to create irq_domain\n");
+ err = -ENOMEM;
+ goto err_put_ctrl;
+ }
+
+ irq_set_chained_handler_and_data(pa->irq, pmic_arb_chained_irq, pa);
+ enable_irq_wake(pa->irq);
+
err = spmi_controller_add(ctrl);
if (err)
- goto err_put_ctrl;
+ goto err_domain_remove;
the_pa = pa;
return 0;
+err_domain_remove:
+ irq_set_chained_handler_and_data(pa->irq, NULL, NULL);
+ irq_domain_remove(pa->domain);
err_put_ctrl:
spmi_controller_put(ctrl);
return err;
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c
index 5d1e9a0fc389..e2ff6b5b2094 100644
--- a/drivers/ssb/main.c
+++ b/drivers/ssb/main.c
@@ -613,9 +613,10 @@ out:
return err;
}
-static int ssb_bus_register(struct ssb_bus *bus,
- ssb_invariants_func_t get_invariants,
- unsigned long baseaddr)
+static int __maybe_unused
+ssb_bus_register(struct ssb_bus *bus,
+ ssb_invariants_func_t get_invariants,
+ unsigned long baseaddr)
{
int err;
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
index 26629b856f91..6c4445863705 100644
--- a/drivers/staging/android/Kconfig
+++ b/drivers/staging/android/Kconfig
@@ -90,6 +90,15 @@ config ONESHOT_SYNC_USER
help
Provide a userspace API for creating oneshot sync objects.
+config ANDROID_VSOC
+ tristate "Android Virtual SoC support"
+ default n
+ depends on PCI_MSI
+ ---help---
+ This option adds support for the Virtual SoC driver needed to boot
+ a 'cuttlefish' Android image inside QEmu. The driver interacts with
+ a QEmu ivshmem device. If built as a module, it will be called vsoc.
+
source "drivers/staging/android/ion/Kconfig"
source "drivers/staging/android/fiq_debugger/Kconfig"
diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile
index b0b47ae4c0ea..8ef816152020 100644
--- a/drivers/staging/android/Makefile
+++ b/drivers/staging/android/Makefile
@@ -10,3 +10,4 @@ obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER) += lowmemorykiller.o
obj-$(CONFIG_SYNC) += sync.o sync_debug.o
obj-$(CONFIG_SW_SYNC) += sw_sync.o
obj-$(CONFIG_ONESHOT_SYNC) += oneshot_sync.o
+obj-$(CONFIG_ANDROID_VSOC) += vsoc.o
diff --git a/drivers/staging/android/TODO b/drivers/staging/android/TODO
index 8f3ac37bfe12..2188bc395a48 100644
--- a/drivers/staging/android/TODO
+++ b/drivers/staging/android/TODO
@@ -25,5 +25,14 @@ ion/
exposes existing cma regions and doesn't reserve unecessarily memory when
booting a system which doesn't use ion.
+vsoc.c, uapi/vsoc_shm.h
+ - The current driver uses the same wait queue for all of the futexes in a
+ region. This will cause false wakeups in regions with a large number of
+ waiting threads. We should eventually use multiple queues and select the
+ queue based on the region.
+ - Add debugfs support for examining the permissions of regions.
+ - Remove VSOC_WAIT_FOR_INCOMING_INTERRUPT ioctl. This functionality has been
+ superseded by the futex and is there for legacy reasons.
+
Please send patches to Greg Kroah-Hartman <greg@kroah.com> and Cc:
Arve Hjønnevåg <arve@android.com> and Riley Andrews <riandrews@android.com>
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 28c9afe538ca..cba6b4e17fee 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -330,24 +330,23 @@ static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin)
mutex_lock(&ashmem_mutex);
if (asma->size == 0) {
- ret = -EINVAL;
- goto out;
+ mutex_unlock(&ashmem_mutex);
+ return -EINVAL;
}
if (!asma->file) {
- ret = -EBADF;
- goto out;
+ mutex_unlock(&ashmem_mutex);
+ return -EBADF;
}
+ mutex_unlock(&ashmem_mutex);
+
ret = vfs_llseek(asma->file, offset, origin);
if (ret < 0)
- goto out;
+ return ret;
/** Copy f_pos from backing file, since f_ops->llseek() sets it */
file->f_pos = asma->file->f_pos;
-
-out:
- mutex_unlock(&ashmem_mutex);
return ret;
}
@@ -698,30 +697,30 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
size_t pgstart, pgend;
int ret = -EINVAL;
- if (unlikely(!asma->file))
- return -EINVAL;
-
if (unlikely(copy_from_user(&pin, p, sizeof(pin))))
return -EFAULT;
+ mutex_lock(&ashmem_mutex);
+
+ if (unlikely(!asma->file))
+ goto out_unlock;
+
/* per custom, you can pass zero for len to mean "everything onward" */
if (!pin.len)
pin.len = PAGE_ALIGN(asma->size) - pin.offset;
if (unlikely((pin.offset | pin.len) & ~PAGE_MASK))
- return -EINVAL;
+ goto out_unlock;
if (unlikely(((__u32)-1) - pin.offset < pin.len))
- return -EINVAL;
+ goto out_unlock;
if (unlikely(PAGE_ALIGN(asma->size) < pin.offset + pin.len))
- return -EINVAL;
+ goto out_unlock;
pgstart = pin.offset / PAGE_SIZE;
pgend = pgstart + (pin.len / PAGE_SIZE) - 1;
- mutex_lock(&ashmem_mutex);
-
switch (cmd) {
case ASHMEM_PIN:
ret = ashmem_pin(asma, pgstart, pgend);
@@ -734,6 +733,7 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
break;
}
+out_unlock:
mutex_unlock(&ashmem_mutex);
return ret;
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index f5a81fc48ffb..5ed1ed37fad8 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -1031,7 +1031,6 @@ void ion_client_destroy(struct ion_client *client)
struct ion_device *dev = client->dev;
struct rb_node *n;
- pr_debug("%s: %d\n", __func__, __LINE__);
mutex_lock(&debugfs_mutex);
while ((n = rb_first(&client->handles))) {
struct ion_handle *handle = rb_entry(n, struct ion_handle,
@@ -1239,9 +1238,6 @@ static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
int i;
- pr_debug("%s: syncing for device %s\n", __func__,
- dev ? dev_name(dev) : "null");
-
if (!ion_buffer_fault_user_mappings(buffer))
return;
@@ -1295,7 +1291,6 @@ static void ion_vm_open(struct vm_area_struct *vma)
mutex_lock(&buffer->lock);
list_add(&vma_list->list, &buffer->vmas);
mutex_unlock(&buffer->lock);
- pr_debug("%s: adding %pK\n", __func__, vma);
}
static void ion_vm_close(struct vm_area_struct *vma)
@@ -1303,14 +1298,12 @@ static void ion_vm_close(struct vm_area_struct *vma)
struct ion_buffer *buffer = vma->vm_private_data;
struct ion_vma_list *vma_list, *tmp;
- pr_debug("%s\n", __func__);
mutex_lock(&buffer->lock);
list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
if (vma_list->vma != vma)
continue;
list_del(&vma_list->list);
kfree(vma_list);
- pr_debug("%s: deleting %pK\n", __func__, vma);
break;
}
mutex_unlock(&buffer->lock);
@@ -1717,7 +1710,6 @@ static int ion_release(struct inode *inode, struct file *file)
{
struct ion_client *client = file->private_data;
- pr_debug("%s: %d\n", __func__, __LINE__);
ion_client_destroy(client);
return 0;
}
@@ -1729,7 +1721,6 @@ static int ion_open(struct inode *inode, struct file *file)
struct ion_client *client;
char debug_name[64];
- pr_debug("%s: %d\n", __func__, __LINE__);
snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
client = ion_client_create(dev, debug_name);
if (IS_ERR(client))
diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c
index d932db4f9810..77bc25dfd562 100644
--- a/drivers/staging/android/ion/ion_cma_heap.c
+++ b/drivers/staging/android/ion/ion_cma_heap.c
@@ -65,8 +65,6 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
struct device *dev = heap->priv;
struct ion_cma_buffer_info *info;
- dev_dbg(dev, "Request buffer allocation len %ld\n", len);
-
info = kzalloc(sizeof(struct ion_cma_buffer_info), GFP_KERNEL);
if (!info)
return ION_CMA_ALLOCATE_FAILED;
@@ -94,7 +92,6 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
/* keep this for memory release */
buffer->priv_virt = info;
- dev_dbg(dev, "Allocate buffer %pK\n", buffer);
return 0;
err:
@@ -107,7 +104,6 @@ static void ion_cma_free(struct ion_buffer *buffer)
struct device *dev = buffer->heap->priv;
struct ion_cma_buffer_info *info = buffer->priv_virt;
- dev_dbg(dev, "Release buffer %pK\n", buffer);
/* release memory */
dma_free_coherent(dev, buffer->size, info->cpu_addr, info->handle);
sg_free_table(info->table);
diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c
index c19b87d10df0..0034dfe17ac8 100644
--- a/drivers/staging/android/ion/ion_page_pool.c
+++ b/drivers/staging/android/ion/ion_page_pool.c
@@ -64,6 +64,9 @@ static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page)
list_add_tail(&page->lru, &pool->low_items);
pool->low_count++;
}
+
+ mod_zone_page_state(page_zone(page), NR_INDIRECTLY_RECLAIMABLE_BYTES,
+ (1 << (PAGE_SHIFT + pool->order)));
mutex_unlock(&pool->mutex);
return 0;
}
@@ -83,6 +86,8 @@ static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high)
}
list_del(&page->lru);
+ mod_zone_page_state(page_zone(page), NR_INDIRECTLY_RECLAIMABLE_BYTES,
+ -(1 << (PAGE_SHIFT + pool->order)));
return page;
}
diff --git a/drivers/staging/android/ion/msm/msm_ion.c b/drivers/staging/android/ion/msm/msm_ion.c
index 989d336c0648..7326aa46a8f6 100644
--- a/drivers/staging/android/ion/msm/msm_ion.c
+++ b/drivers/staging/android/ion/msm/msm_ion.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -745,7 +745,7 @@ long msm_ion_custom_ioctl(struct ion_client *client,
data.flush_data.offset;
end = start + data.flush_data.length;
- if (check_vaddr_bounds(start, end)) {
+ if (start && check_vaddr_bounds(start, end)) {
pr_err("%s: virtual address %pK is out of bounds\n",
__func__, data.flush_data.vaddr);
ret = -EINVAL;
diff --git a/drivers/staging/android/uapi/vsoc_shm.h b/drivers/staging/android/uapi/vsoc_shm.h
new file mode 100644
index 000000000000..741b1387c25b
--- /dev/null
+++ b/drivers/staging/android/uapi/vsoc_shm.h
@@ -0,0 +1,303 @@
+/*
+ * Copyright (C) 2017 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_VSOC_SHM_H
+#define _UAPI_LINUX_VSOC_SHM_H
+
+#include <linux/types.h>
+
+/**
+ * A permission is a token that permits a receiver to read and/or write an area
+ * of memory within a Vsoc region.
+ *
+ * An fd_scoped permission grants both read and write access, and can be
+ * attached to a file description (see open(2)).
+ * Ownership of the area can then be shared by passing a file descriptor
+ * among processes.
+ *
+ * begin_offset and end_offset define the area of memory that is controlled by
+ * the permission. owner_offset points to a word, also in shared memory, that
+ * controls ownership of the area.
+ *
+ * ownership of the region expires when the associated file description is
+ * released.
+ *
+ * At most one permission can be attached to each file description.
+ *
+ * This is useful when implementing HALs like gralloc that scope and pass
+ * ownership of shared resources via file descriptors.
+ *
+ * The caller is responsibe for doing any fencing.
+ *
+ * The calling process will normally identify a currently free area of
+ * memory. It will construct a proposed fd_scoped_permission_arg structure:
+ *
+ * begin_offset and end_offset describe the area being claimed
+ *
+ * owner_offset points to the location in shared memory that indicates the
+ * owner of the area.
+ *
+ * owned_value is the value that will be stored in owner_offset iff the
+ * permission can be granted. It must be different than VSOC_REGION_FREE.
+ *
+ * Two fd_scoped_permission structures are compatible if they vary only by
+ * their owned_value fields.
+ *
+ * The driver ensures that, for any group of simultaneous callers proposing
+ * compatible fd_scoped_permissions, it will accept exactly one of the
+ * propopsals. The other callers will get a failure with errno of EAGAIN.
+ *
+ * A process receiving a file descriptor can identify the region being
+ * granted using the VSOC_GET_FD_SCOPED_PERMISSION ioctl.
+ */
+struct fd_scoped_permission {
+ __u32 begin_offset;
+ __u32 end_offset;
+ __u32 owner_offset;
+ __u32 owned_value;
+};
+
+/*
+ * This value represents a free area of memory. The driver expects to see this
+ * value at owner_offset when creating a permission otherwise it will not do it,
+ * and will write this value back once the permission is no longer needed.
+ */
+#define VSOC_REGION_FREE ((__u32)0)
+
+/**
+ * ioctl argument for VSOC_CREATE_FD_SCOPE_PERMISSION
+ */
+struct fd_scoped_permission_arg {
+ struct fd_scoped_permission perm;
+ __s32 managed_region_fd;
+};
+
+#define VSOC_NODE_FREE ((__u32)0)
+
+/*
+ * Describes a signal table in shared memory. Each non-zero entry in the
+ * table indicates that the receiver should signal the futex at the given
+ * offset. Offsets are relative to the region, not the shared memory window.
+ *
+ * interrupt_signalled_offset is used to reliably signal interrupts across the
+ * vmm boundary. There are two roles: transmitter and receiver. For example,
+ * in the host_to_guest_signal_table the host is the transmitter and the
+ * guest is the receiver. The protocol is as follows:
+ *
+ * 1. The transmitter should convert the offset of the futex to an offset
+ * in the signal table [0, (1 << num_nodes_lg2))
+ * The transmitter can choose any appropriate hashing algorithm, including
+ * hash = futex_offset & ((1 << num_nodes_lg2) - 1)
+ *
+ * 3. The transmitter should atomically compare and swap futex_offset with 0
+ * at hash. There are 3 possible outcomes
+ * a. The swap fails because the futex_offset is already in the table.
+ * The transmitter should stop.
+ * b. Some other offset is in the table. This is a hash collision. The
+ * transmitter should move to another table slot and try again. One
+ * possible algorithm:
+ * hash = (hash + 1) & ((1 << num_nodes_lg2) - 1)
+ * c. The swap worked. Continue below.
+ *
+ * 3. The transmitter atomically swaps 1 with the value at the
+ * interrupt_signalled_offset. There are two outcomes:
+ * a. The prior value was 1. In this case an interrupt has already been
+ * posted. The transmitter is done.
+ * b. The prior value was 0, indicating that the receiver may be sleeping.
+ * The transmitter will issue an interrupt.
+ *
+ * 4. On waking the receiver immediately exchanges a 0 with the
+ * interrupt_signalled_offset. If it receives a 0 then this a spurious
+ * interrupt. That may occasionally happen in the current protocol, but
+ * should be rare.
+ *
+ * 5. The receiver scans the signal table by atomicaly exchanging 0 at each
+ * location. If a non-zero offset is returned from the exchange the
+ * receiver wakes all sleepers at the given offset:
+ * futex((int*)(region_base + old_value), FUTEX_WAKE, MAX_INT);
+ *
+ * 6. The receiver thread then does a conditional wait, waking immediately
+ * if the value at interrupt_signalled_offset is non-zero. This catches cases
+ * here additional signals were posted while the table was being scanned.
+ * On the guest the wait is handled via the VSOC_WAIT_FOR_INCOMING_INTERRUPT
+ * ioctl.
+ */
+struct vsoc_signal_table_layout {
+ /* log_2(Number of signal table entries) */
+ __u32 num_nodes_lg2;
+ /*
+ * Offset to the first signal table entry relative to the start of the
+ * region
+ */
+ __u32 futex_uaddr_table_offset;
+ /*
+ * Offset to an atomic_t / atomic uint32_t. A non-zero value indicates
+ * that one or more offsets are currently posted in the table.
+ * semi-unique access to an entry in the table
+ */
+ __u32 interrupt_signalled_offset;
+};
+
+#define VSOC_REGION_WHOLE ((__s32)0)
+#define VSOC_DEVICE_NAME_SZ 16
+
+/**
+ * Each HAL would (usually) talk to a single device region
+ * Mulitple entities care about these regions:
+ * - The ivshmem_server will populate the regions in shared memory
+ * - The guest kernel will read the region, create minor device nodes, and
+ * allow interested parties to register for FUTEX_WAKE events in the region
+ * - HALs will access via the minor device nodes published by the guest kernel
+ * - Host side processes will access the region via the ivshmem_server:
+ * 1. Pass name to ivshmem_server at a UNIX socket
+ * 2. ivshmemserver will reply with 2 fds:
+ * - host->guest doorbell fd
+ * - guest->host doorbell fd
+ * - fd for the shared memory region
+ * - region offset
+ * 3. Start a futex receiver thread on the doorbell fd pointed at the
+ * signal_nodes
+ */
+struct vsoc_device_region {
+ __u16 current_version;
+ __u16 min_compatible_version;
+ __u32 region_begin_offset;
+ __u32 region_end_offset;
+ __u32 offset_of_region_data;
+ struct vsoc_signal_table_layout guest_to_host_signal_table;
+ struct vsoc_signal_table_layout host_to_guest_signal_table;
+ /* Name of the device. Must always be terminated with a '\0', so
+ * the longest supported device name is 15 characters.
+ */
+ char device_name[VSOC_DEVICE_NAME_SZ];
+ /* There are two ways that permissions to access regions are handled:
+ * - When subdivided_by is VSOC_REGION_WHOLE, any process that can
+ * open the device node for the region gains complete access to it.
+ * - When subdivided is set processes that open the region cannot
+ * access it. Access to a sub-region must be established by invoking
+ * the VSOC_CREATE_FD_SCOPE_PERMISSION ioctl on the region
+ * referenced in subdivided_by, providing a fileinstance
+ * (represented by a fd) opened on this region.
+ */
+ __u32 managed_by;
+};
+
+/*
+ * The vsoc layout descriptor.
+ * The first 4K should be reserved for the shm header and region descriptors.
+ * The regions should be page aligned.
+ */
+
+struct vsoc_shm_layout_descriptor {
+ __u16 major_version;
+ __u16 minor_version;
+
+ /* size of the shm. This may be redundant but nice to have */
+ __u32 size;
+
+ /* number of shared memory regions */
+ __u32 region_count;
+
+ /* The offset to the start of region descriptors */
+ __u32 vsoc_region_desc_offset;
+};
+
+/*
+ * This specifies the current version that should be stored in
+ * vsoc_shm_layout_descriptor.major_version and
+ * vsoc_shm_layout_descriptor.minor_version.
+ * It should be updated only if the vsoc_device_region and
+ * vsoc_shm_layout_descriptor structures have changed.
+ * Versioning within each region is transferred
+ * via the min_compatible_version and current_version fields in
+ * vsoc_device_region. The driver does not consult these fields: they are left
+ * for the HALs and host processes and will change independently of the layout
+ * version.
+ */
+#define CURRENT_VSOC_LAYOUT_MAJOR_VERSION 2
+#define CURRENT_VSOC_LAYOUT_MINOR_VERSION 0
+
+#define VSOC_CREATE_FD_SCOPED_PERMISSION \
+ _IOW(0xF5, 0, struct fd_scoped_permission)
+#define VSOC_GET_FD_SCOPED_PERMISSION _IOR(0xF5, 1, struct fd_scoped_permission)
+
+/*
+ * This is used to signal the host to scan the guest_to_host_signal_table
+ * for new futexes to wake. This sends an interrupt if one is not already
+ * in flight.
+ */
+#define VSOC_MAYBE_SEND_INTERRUPT_TO_HOST _IO(0xF5, 2)
+
+/*
+ * When this returns the guest will scan host_to_guest_signal_table to
+ * check for new futexes to wake.
+ */
+/* TODO(ghartman): Consider moving this to the bottom half */
+#define VSOC_WAIT_FOR_INCOMING_INTERRUPT _IO(0xF5, 3)
+
+/*
+ * Guest HALs will use this to retrieve the region description after
+ * opening their device node.
+ */
+#define VSOC_DESCRIBE_REGION _IOR(0xF5, 4, struct vsoc_device_region)
+
+/*
+ * Wake any threads that may be waiting for a host interrupt on this region.
+ * This is mostly used during shutdown.
+ */
+#define VSOC_SELF_INTERRUPT _IO(0xF5, 5)
+
+/*
+ * This is used to signal the host to scan the guest_to_host_signal_table
+ * for new futexes to wake. This sends an interrupt unconditionally.
+ */
+#define VSOC_SEND_INTERRUPT_TO_HOST _IO(0xF5, 6)
+
+enum wait_types {
+ VSOC_WAIT_UNDEFINED = 0,
+ VSOC_WAIT_IF_EQUAL = 1,
+ VSOC_WAIT_IF_EQUAL_TIMEOUT = 2
+};
+
+/*
+ * Wait for a condition to be true
+ *
+ * Note, this is sized and aligned so the 32 bit and 64 bit layouts are
+ * identical.
+ */
+struct vsoc_cond_wait {
+ /* Input: Offset of the 32 bit word to check */
+ __u32 offset;
+ /* Input: Value that will be compared with the offset */
+ __u32 value;
+ /* Monotonic time to wake at in seconds */
+ __u64 wake_time_sec;
+ /* Input: Monotonic time to wait in nanoseconds */
+ __u32 wake_time_nsec;
+ /* Input: Type of wait */
+ __u32 wait_type;
+ /* Output: Number of times the thread woke before returning. */
+ __u32 wakes;
+ /* Ensure that we're 8-byte aligned and 8 byte length for 32/64 bit
+ * compatibility.
+ */
+ __u32 reserved_1;
+};
+
+#define VSOC_COND_WAIT _IOWR(0xF5, 7, struct vsoc_cond_wait)
+
+/* Wake any local threads waiting at the offset given in arg */
+#define VSOC_COND_WAKE _IO(0xF5, 8)
+
+#endif /* _UAPI_LINUX_VSOC_SHM_H */
diff --git a/drivers/staging/android/vsoc.c b/drivers/staging/android/vsoc.c
new file mode 100644
index 000000000000..954ed2c5d807
--- /dev/null
+++ b/drivers/staging/android/vsoc.c
@@ -0,0 +1,1165 @@
+/*
+ * drivers/android/staging/vsoc.c
+ *
+ * Android Virtual System on a Chip (VSoC) driver
+ *
+ * Copyright (C) 2017 Google, Inc.
+ *
+ * Author: ghartman@google.com
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ * Based on drivers/char/kvm_ivshmem.c - driver for KVM Inter-VM shared memory
+ * Copyright 2009 Cam Macdonell <cam@cs.ualberta.ca>
+ *
+ * Based on cirrusfb.c and 8139cp.c:
+ * Copyright 1999-2001 Jeff Garzik
+ * Copyright 2001-2004 Jeff Garzik
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/freezer.h>
+#include <linux/futex.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/proc_fs.h>
+#include <linux/sched.h>
+#include <linux/syscalls.h>
+#include <linux/uaccess.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/cdev.h>
+#include <linux/file.h>
+#include "uapi/vsoc_shm.h"
+
+#define VSOC_DEV_NAME "vsoc"
+
+/*
+ * Description of the ivshmem-doorbell PCI device used by QEmu. These
+ * constants follow docs/specs/ivshmem-spec.txt, which can be found in
+ * the QEmu repository. This was last reconciled with the version that
+ * came out with 2.8
+ */
+
+/*
+ * These constants are determined KVM Inter-VM shared memory device
+ * register offsets
+ */
+enum {
+ INTR_MASK = 0x00, /* Interrupt Mask */
+ INTR_STATUS = 0x04, /* Interrupt Status */
+ IV_POSITION = 0x08, /* VM ID */
+ DOORBELL = 0x0c, /* Doorbell */
+};
+
+static const int REGISTER_BAR; /* Equal to 0 */
+static const int MAX_REGISTER_BAR_LEN = 0x100;
+/*
+ * The MSI-x BAR is not used directly.
+ *
+ * static const int MSI_X_BAR = 1;
+ */
+static const int SHARED_MEMORY_BAR = 2;
+
+struct vsoc_region_data {
+ char name[VSOC_DEVICE_NAME_SZ + 1];
+ wait_queue_head_t interrupt_wait_queue;
+ /* TODO(b/73664181): Use multiple futex wait queues */
+ wait_queue_head_t futex_wait_queue;
+ /* Flag indicating that an interrupt has been signalled by the host. */
+ atomic_t *incoming_signalled;
+ /* Flag indicating the guest has signalled the host. */
+ atomic_t *outgoing_signalled;
+ bool irq_requested;
+ bool device_created;
+};
+
+struct vsoc_device {
+ /* Kernel virtual address of REGISTER_BAR. */
+ void __iomem *regs;
+ /* Physical address of SHARED_MEMORY_BAR. */
+ phys_addr_t shm_phys_start;
+ /* Kernel virtual address of SHARED_MEMORY_BAR. */
+ void __iomem *kernel_mapped_shm;
+ /* Size of the entire shared memory window in bytes. */
+ size_t shm_size;
+ /*
+ * Pointer to the virtual address of the shared memory layout structure.
+ * This is probably identical to kernel_mapped_shm, but saving this
+ * here saves a lot of annoying casts.
+ */
+ struct vsoc_shm_layout_descriptor *layout;
+ /*
+ * Points to a table of region descriptors in the kernel's virtual
+ * address space. Calculated from
+ * vsoc_shm_layout_descriptor.vsoc_region_desc_offset
+ */
+ struct vsoc_device_region *regions;
+ /* Head of a list of permissions that have been granted. */
+ struct list_head permissions;
+ struct pci_dev *dev;
+ /* Per-region (and therefore per-interrupt) information. */
+ struct vsoc_region_data *regions_data;
+ /*
+ * Table of msi-x entries. This has to be separated from struct
+ * vsoc_region_data because the kernel deals with them as an array.
+ */
+ struct msix_entry *msix_entries;
+ /* Mutex that protectes the permission list */
+ struct mutex mtx;
+ /* Major number assigned by the kernel */
+ int major;
+ /* Character device assigned by the kernel */
+ struct cdev cdev;
+ /* Device class assigned by the kernel */
+ struct class *class;
+ /*
+ * Flags that indicate what we've initialized. These are used to do an
+ * orderly cleanup of the device.
+ */
+ bool enabled_device;
+ bool requested_regions;
+ bool cdev_added;
+ bool class_added;
+ bool msix_enabled;
+};
+
+static struct vsoc_device vsoc_dev;
+
+/*
+ * TODO(ghartman): Add a /sys filesystem entry that summarizes the permissions.
+ */
+
+struct fd_scoped_permission_node {
+ struct fd_scoped_permission permission;
+ struct list_head list;
+};
+
+struct vsoc_private_data {
+ struct fd_scoped_permission_node *fd_scoped_permission_node;
+};
+
+static long vsoc_ioctl(struct file *, unsigned int, unsigned long);
+static int vsoc_mmap(struct file *, struct vm_area_struct *);
+static int vsoc_open(struct inode *, struct file *);
+static int vsoc_release(struct inode *, struct file *);
+static ssize_t vsoc_read(struct file *, char __user *, size_t, loff_t *);
+static ssize_t vsoc_write(struct file *, const char __user *, size_t, loff_t *);
+static loff_t vsoc_lseek(struct file *filp, loff_t offset, int origin);
+static int do_create_fd_scoped_permission(
+ struct vsoc_device_region *region_p,
+ struct fd_scoped_permission_node *np,
+ struct fd_scoped_permission_arg __user *arg);
+static void do_destroy_fd_scoped_permission(
+ struct vsoc_device_region *owner_region_p,
+ struct fd_scoped_permission *perm);
+static long do_vsoc_describe_region(struct file *,
+ struct vsoc_device_region __user *);
+static ssize_t vsoc_get_area(struct file *filp, __u32 *perm_off);
+
+/**
+ * Validate arguments on entry points to the driver.
+ */
+inline int vsoc_validate_inode(struct inode *inode)
+{
+ if (iminor(inode) >= vsoc_dev.layout->region_count) {
+ dev_err(&vsoc_dev.dev->dev,
+ "describe_region: invalid region %d\n", iminor(inode));
+ return -ENODEV;
+ }
+ return 0;
+}
+
+inline int vsoc_validate_filep(struct file *filp)
+{
+ int ret = vsoc_validate_inode(file_inode(filp));
+
+ if (ret)
+ return ret;
+ if (!filp->private_data) {
+ dev_err(&vsoc_dev.dev->dev,
+ "No private data on fd, region %d\n",
+ iminor(file_inode(filp)));
+ return -EBADFD;
+ }
+ return 0;
+}
+
+/* Converts from shared memory offset to virtual address */
+static inline void *shm_off_to_virtual_addr(__u32 offset)
+{
+ return (void __force *)vsoc_dev.kernel_mapped_shm + offset;
+}
+
+/* Converts from shared memory offset to physical address */
+static inline phys_addr_t shm_off_to_phys_addr(__u32 offset)
+{
+ return vsoc_dev.shm_phys_start + offset;
+}
+
+/**
+ * Convenience functions to obtain the region from the inode or file.
+ * Dangerous to call before validating the inode/file.
+ */
+static inline struct vsoc_device_region *vsoc_region_from_inode(
+ struct inode *inode)
+{
+ return &vsoc_dev.regions[iminor(inode)];
+}
+
+static inline struct vsoc_device_region *vsoc_region_from_filep(
+ struct file *inode)
+{
+ return vsoc_region_from_inode(file_inode(inode));
+}
+
+static inline uint32_t vsoc_device_region_size(struct vsoc_device_region *r)
+{
+ return r->region_end_offset - r->region_begin_offset;
+}
+
+static const struct file_operations vsoc_ops = {
+ .owner = THIS_MODULE,
+ .open = vsoc_open,
+ .mmap = vsoc_mmap,
+ .read = vsoc_read,
+ .unlocked_ioctl = vsoc_ioctl,
+ .compat_ioctl = vsoc_ioctl,
+ .write = vsoc_write,
+ .llseek = vsoc_lseek,
+ .release = vsoc_release,
+};
+
+static struct pci_device_id vsoc_id_table[] = {
+ {0x1af4, 0x1110, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {0},
+};
+
+MODULE_DEVICE_TABLE(pci, vsoc_id_table);
+
+static void vsoc_remove_device(struct pci_dev *pdev);
+static int vsoc_probe_device(struct pci_dev *pdev,
+ const struct pci_device_id *ent);
+
+static struct pci_driver vsoc_pci_driver = {
+ .name = "vsoc",
+ .id_table = vsoc_id_table,
+ .probe = vsoc_probe_device,
+ .remove = vsoc_remove_device,
+};
+
+static int do_create_fd_scoped_permission(
+ struct vsoc_device_region *region_p,
+ struct fd_scoped_permission_node *np,
+ struct fd_scoped_permission_arg __user *arg)
+{
+ struct file *managed_filp;
+ s32 managed_fd;
+ atomic_t *owner_ptr = NULL;
+ struct vsoc_device_region *managed_region_p;
+
+ if (copy_from_user(&np->permission, &arg->perm, sizeof(*np)) ||
+ copy_from_user(&managed_fd,
+ &arg->managed_region_fd, sizeof(managed_fd))) {
+ return -EFAULT;
+ }
+ managed_filp = fdget(managed_fd).file;
+ /* Check that it's a valid fd, */
+ if (!managed_filp || vsoc_validate_filep(managed_filp))
+ return -EPERM;
+ /* EEXIST if the given fd already has a permission. */
+ if (((struct vsoc_private_data *)managed_filp->private_data)->
+ fd_scoped_permission_node)
+ return -EEXIST;
+ managed_region_p = vsoc_region_from_filep(managed_filp);
+ /* Check that the provided region is managed by this one */
+ if (&vsoc_dev.regions[managed_region_p->managed_by] != region_p)
+ return -EPERM;
+ /* The area must be well formed and have non-zero size */
+ if (np->permission.begin_offset >= np->permission.end_offset)
+ return -EINVAL;
+ /* The area must fit in the memory window */
+ if (np->permission.end_offset >
+ vsoc_device_region_size(managed_region_p))
+ return -ERANGE;
+ /* The area must be in the region data section */
+ if (np->permission.begin_offset <
+ managed_region_p->offset_of_region_data)
+ return -ERANGE;
+ /* The area must be page aligned */
+ if (!PAGE_ALIGNED(np->permission.begin_offset) ||
+ !PAGE_ALIGNED(np->permission.end_offset))
+ return -EINVAL;
+ /* Owner offset must be naturally aligned in the window */
+ if (np->permission.owner_offset &
+ (sizeof(np->permission.owner_offset) - 1))
+ return -EINVAL;
+ /* The owner flag must reside in the owner memory */
+ if (np->permission.owner_offset + sizeof(np->permission.owner_offset) >
+ vsoc_device_region_size(region_p))
+ return -ERANGE;
+ /* The owner flag must reside in the data section */
+ if (np->permission.owner_offset < region_p->offset_of_region_data)
+ return -EINVAL;
+ /* The owner value must change to claim the memory */
+ if (np->permission.owned_value == VSOC_REGION_FREE)
+ return -EINVAL;
+ owner_ptr =
+ (atomic_t *)shm_off_to_virtual_addr(region_p->region_begin_offset +
+ np->permission.owner_offset);
+ /* We've already verified that this is in the shared memory window, so
+ * it should be safe to write to this address.
+ */
+ if (atomic_cmpxchg(owner_ptr,
+ VSOC_REGION_FREE,
+ np->permission.owned_value) != VSOC_REGION_FREE) {
+ return -EBUSY;
+ }
+ ((struct vsoc_private_data *)managed_filp->private_data)->
+ fd_scoped_permission_node = np;
+ /* The file offset needs to be adjusted if the calling
+ * process did any read/write operations on the fd
+ * before creating the permission.
+ */
+ if (managed_filp->f_pos) {
+ if (managed_filp->f_pos > np->permission.end_offset) {
+ /* If the offset is beyond the permission end, set it
+ * to the end.
+ */
+ managed_filp->f_pos = np->permission.end_offset;
+ } else {
+ /* If the offset is within the permission interval
+ * keep it there otherwise reset it to zero.
+ */
+ if (managed_filp->f_pos < np->permission.begin_offset) {
+ managed_filp->f_pos = 0;
+ } else {
+ managed_filp->f_pos -=
+ np->permission.begin_offset;
+ }
+ }
+ }
+ return 0;
+}
+
+static void do_destroy_fd_scoped_permission_node(
+ struct vsoc_device_region *owner_region_p,
+ struct fd_scoped_permission_node *node)
+{
+ if (node) {
+ do_destroy_fd_scoped_permission(owner_region_p,
+ &node->permission);
+ mutex_lock(&vsoc_dev.mtx);
+ list_del(&node->list);
+ mutex_unlock(&vsoc_dev.mtx);
+ kfree(node);
+ }
+}
+
+static void do_destroy_fd_scoped_permission(
+ struct vsoc_device_region *owner_region_p,
+ struct fd_scoped_permission *perm)
+{
+ atomic_t *owner_ptr = NULL;
+ int prev = 0;
+
+ if (!perm)
+ return;
+ owner_ptr = (atomic_t *)shm_off_to_virtual_addr(
+ owner_region_p->region_begin_offset + perm->owner_offset);
+ prev = atomic_xchg(owner_ptr, VSOC_REGION_FREE);
+ if (prev != perm->owned_value)
+ dev_err(&vsoc_dev.dev->dev,
+ "%x-%x: owner (%s) %x: expected to be %x was %x",
+ perm->begin_offset, perm->end_offset,
+ owner_region_p->device_name, perm->owner_offset,
+ perm->owned_value, prev);
+}
+
+static long do_vsoc_describe_region(struct file *filp,
+ struct vsoc_device_region __user *dest)
+{
+ struct vsoc_device_region *region_p;
+ int retval = vsoc_validate_filep(filp);
+
+ if (retval)
+ return retval;
+ region_p = vsoc_region_from_filep(filp);
+ if (copy_to_user(dest, region_p, sizeof(*region_p)))
+ return -EFAULT;
+ return 0;
+}
+
+/**
+ * Implements the inner logic of cond_wait. Copies to and from userspace are
+ * done in the helper function below.
+ */
+static int handle_vsoc_cond_wait(struct file *filp, struct vsoc_cond_wait *arg)
+{
+ DEFINE_WAIT(wait);
+ u32 region_number = iminor(file_inode(filp));
+ struct vsoc_region_data *data = vsoc_dev.regions_data + region_number;
+ struct hrtimer_sleeper timeout, *to = NULL;
+ int ret = 0;
+ struct vsoc_device_region *region_p = vsoc_region_from_filep(filp);
+ atomic_t *address = NULL;
+ struct timespec ts;
+
+ /* Ensure that the offset is aligned */
+ if (arg->offset & (sizeof(uint32_t) - 1))
+ return -EADDRNOTAVAIL;
+ /* Ensure that the offset is within shared memory */
+ if (((uint64_t)arg->offset) + region_p->region_begin_offset +
+ sizeof(uint32_t) > region_p->region_end_offset)
+ return -E2BIG;
+ address = shm_off_to_virtual_addr(region_p->region_begin_offset +
+ arg->offset);
+
+ /* Ensure that the type of wait is valid */
+ switch (arg->wait_type) {
+ case VSOC_WAIT_IF_EQUAL:
+ break;
+ case VSOC_WAIT_IF_EQUAL_TIMEOUT:
+ to = &timeout;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (to) {
+ /* Copy the user-supplied timesec into the kernel structure.
+ * We do things this way to flatten differences between 32 bit
+ * and 64 bit timespecs.
+ */
+ ts.tv_sec = arg->wake_time_sec;
+ ts.tv_nsec = arg->wake_time_nsec;
+
+ if (!timespec_valid(&ts))
+ return -EINVAL;
+ hrtimer_init_on_stack(&to->timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_ABS);
+ hrtimer_set_expires_range_ns(&to->timer, timespec_to_ktime(ts),
+ current->timer_slack_ns);
+
+ hrtimer_init_sleeper(to, current);
+ }
+
+ while (1) {
+ prepare_to_wait(&data->futex_wait_queue, &wait,
+ TASK_INTERRUPTIBLE);
+ /*
+ * Check the sentinel value after prepare_to_wait. If the value
+ * changes after this check the writer will call signal,
+ * changing the task state from INTERRUPTIBLE to RUNNING. That
+ * will ensure that schedule() will eventually schedule this
+ * task.
+ */
+ if (atomic_read(address) != arg->value) {
+ ret = 0;
+ break;
+ }
+ if (to) {
+ hrtimer_start_expires(&to->timer, HRTIMER_MODE_ABS);
+ if (likely(to->task))
+ freezable_schedule();
+ hrtimer_cancel(&to->timer);
+ if (!to->task) {
+ ret = -ETIMEDOUT;
+ break;
+ }
+ } else {
+ freezable_schedule();
+ }
+ /* Count the number of times that we woke up. This is useful
+ * for unit testing.
+ */
+ ++arg->wakes;
+ if (signal_pending(current)) {
+ ret = -EINTR;
+ break;
+ }
+ }
+ finish_wait(&data->futex_wait_queue, &wait);
+ if (to)
+ destroy_hrtimer_on_stack(&to->timer);
+ return ret;
+}
+
+/**
+ * Handles the details of copying from/to userspace to ensure that the copies
+ * happen on all of the return paths of cond_wait.
+ */
+static int do_vsoc_cond_wait(struct file *filp,
+ struct vsoc_cond_wait __user *untrusted_in)
+{
+ struct vsoc_cond_wait arg;
+ int rval = 0;
+
+ if (copy_from_user(&arg, untrusted_in, sizeof(arg)))
+ return -EFAULT;
+ /* wakes is an out parameter. Initialize it to something sensible. */
+ arg.wakes = 0;
+ rval = handle_vsoc_cond_wait(filp, &arg);
+ if (copy_to_user(untrusted_in, &arg, sizeof(arg)))
+ return -EFAULT;
+ return rval;
+}
+
+static int do_vsoc_cond_wake(struct file *filp, uint32_t offset)
+{
+ struct vsoc_device_region *region_p = vsoc_region_from_filep(filp);
+ u32 region_number = iminor(file_inode(filp));
+ struct vsoc_region_data *data = vsoc_dev.regions_data + region_number;
+ /* Ensure that the offset is aligned */
+ if (offset & (sizeof(uint32_t) - 1))
+ return -EADDRNOTAVAIL;
+ /* Ensure that the offset is within shared memory */
+ if (((uint64_t)offset) + region_p->region_begin_offset +
+ sizeof(uint32_t) > region_p->region_end_offset)
+ return -E2BIG;
+ /*
+ * TODO(b/73664181): Use multiple futex wait queues.
+ * We need to wake every sleeper when the condition changes. Typically
+ * only a single thread will be waiting on the condition, but there
+ * are exceptions. The worst case is about 10 threads.
+ */
+ wake_up_interruptible_all(&data->futex_wait_queue);
+ return 0;
+}
+
+static long vsoc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ int rv = 0;
+ struct vsoc_device_region *region_p;
+ u32 reg_num;
+ struct vsoc_region_data *reg_data;
+ int retval = vsoc_validate_filep(filp);
+
+ if (retval)
+ return retval;
+ region_p = vsoc_region_from_filep(filp);
+ reg_num = iminor(file_inode(filp));
+ reg_data = vsoc_dev.regions_data + reg_num;
+ switch (cmd) {
+ case VSOC_CREATE_FD_SCOPED_PERMISSION:
+ {
+ struct fd_scoped_permission_node *node = NULL;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ /* We can't allocate memory for the permission */
+ if (!node)
+ return -ENOMEM;
+ INIT_LIST_HEAD(&node->list);
+ rv = do_create_fd_scoped_permission(
+ region_p,
+ node,
+ (struct fd_scoped_permission_arg __user *)arg);
+ if (!rv) {
+ mutex_lock(&vsoc_dev.mtx);
+ list_add(&node->list, &vsoc_dev.permissions);
+ mutex_unlock(&vsoc_dev.mtx);
+ } else {
+ kfree(node);
+ return rv;
+ }
+ }
+ break;
+
+ case VSOC_GET_FD_SCOPED_PERMISSION:
+ {
+ struct fd_scoped_permission_node *node =
+ ((struct vsoc_private_data *)filp->private_data)->
+ fd_scoped_permission_node;
+ if (!node)
+ return -ENOENT;
+ if (copy_to_user
+ ((struct fd_scoped_permission __user *)arg,
+ &node->permission, sizeof(node->permission)))
+ return -EFAULT;
+ }
+ break;
+
+ case VSOC_MAYBE_SEND_INTERRUPT_TO_HOST:
+ if (!atomic_xchg(
+ reg_data->outgoing_signalled,
+ 1)) {
+ writel(reg_num, vsoc_dev.regs + DOORBELL);
+ return 0;
+ } else {
+ return -EBUSY;
+ }
+ break;
+
+ case VSOC_SEND_INTERRUPT_TO_HOST:
+ writel(reg_num, vsoc_dev.regs + DOORBELL);
+ return 0;
+
+ case VSOC_WAIT_FOR_INCOMING_INTERRUPT:
+ wait_event_interruptible(
+ reg_data->interrupt_wait_queue,
+ (atomic_read(reg_data->incoming_signalled) != 0));
+ break;
+
+ case VSOC_DESCRIBE_REGION:
+ return do_vsoc_describe_region(
+ filp,
+ (struct vsoc_device_region __user *)arg);
+
+ case VSOC_SELF_INTERRUPT:
+ atomic_set(reg_data->incoming_signalled, 1);
+ wake_up_interruptible(&reg_data->interrupt_wait_queue);
+ break;
+
+ case VSOC_COND_WAIT:
+ return do_vsoc_cond_wait(filp,
+ (struct vsoc_cond_wait __user *)arg);
+ case VSOC_COND_WAKE:
+ return do_vsoc_cond_wake(filp, arg);
+
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static ssize_t vsoc_read(struct file *filp, char __user *buffer, size_t len,
+ loff_t *poffset)
+{
+ __u32 area_off;
+ const void *area_p;
+ ssize_t area_len;
+ int retval = vsoc_validate_filep(filp);
+
+ if (retval)
+ return retval;
+ area_len = vsoc_get_area(filp, &area_off);
+ area_p = shm_off_to_virtual_addr(area_off);
+ area_p += *poffset;
+ area_len -= *poffset;
+ if (area_len <= 0)
+ return 0;
+ if (area_len < len)
+ len = area_len;
+ if (copy_to_user(buffer, area_p, len))
+ return -EFAULT;
+ *poffset += len;
+ return len;
+}
+
+static loff_t vsoc_lseek(struct file *filp, loff_t offset, int origin)
+{
+ ssize_t area_len = 0;
+ int retval = vsoc_validate_filep(filp);
+
+ if (retval)
+ return retval;
+ area_len = vsoc_get_area(filp, NULL);
+ switch (origin) {
+ case SEEK_SET:
+ break;
+
+ case SEEK_CUR:
+ if (offset > 0 && offset + filp->f_pos < 0)
+ return -EOVERFLOW;
+ offset += filp->f_pos;
+ break;
+
+ case SEEK_END:
+ if (offset > 0 && offset + area_len < 0)
+ return -EOVERFLOW;
+ offset += area_len;
+ break;
+
+ case SEEK_DATA:
+ if (offset >= area_len)
+ return -EINVAL;
+ if (offset < 0)
+ offset = 0;
+ break;
+
+ case SEEK_HOLE:
+ /* Next hole is always the end of the region, unless offset is
+ * beyond that
+ */
+ if (offset < area_len)
+ offset = area_len;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (offset < 0 || offset > area_len)
+ return -EINVAL;
+ filp->f_pos = offset;
+
+ return offset;
+}
+
+static ssize_t vsoc_write(struct file *filp, const char __user *buffer,
+ size_t len, loff_t *poffset)
+{
+ __u32 area_off;
+ void *area_p;
+ ssize_t area_len;
+ int retval = vsoc_validate_filep(filp);
+
+ if (retval)
+ return retval;
+ area_len = vsoc_get_area(filp, &area_off);
+ area_p = shm_off_to_virtual_addr(area_off);
+ area_p += *poffset;
+ area_len -= *poffset;
+ if (area_len <= 0)
+ return 0;
+ if (area_len < len)
+ len = area_len;
+ if (copy_from_user(area_p, buffer, len))
+ return -EFAULT;
+ *poffset += len;
+ return len;
+}
+
+static irqreturn_t vsoc_interrupt(int irq, void *region_data_v)
+{
+ struct vsoc_region_data *region_data =
+ (struct vsoc_region_data *)region_data_v;
+ int reg_num = region_data - vsoc_dev.regions_data;
+
+ if (unlikely(!region_data))
+ return IRQ_NONE;
+
+ if (unlikely(reg_num < 0 ||
+ reg_num >= vsoc_dev.layout->region_count)) {
+ dev_err(&vsoc_dev.dev->dev,
+ "invalid irq @%p reg_num=0x%04x\n",
+ region_data, reg_num);
+ return IRQ_NONE;
+ }
+ if (unlikely(vsoc_dev.regions_data + reg_num != region_data)) {
+ dev_err(&vsoc_dev.dev->dev,
+ "irq not aligned @%p reg_num=0x%04x\n",
+ region_data, reg_num);
+ return IRQ_NONE;
+ }
+ wake_up_interruptible(&region_data->interrupt_wait_queue);
+ return IRQ_HANDLED;
+}
+
+static int vsoc_probe_device(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int result;
+ int i;
+ resource_size_t reg_size;
+ dev_t devt;
+
+ vsoc_dev.dev = pdev;
+ result = pci_enable_device(pdev);
+ if (result) {
+ dev_err(&pdev->dev,
+ "pci_enable_device failed %s: error %d\n",
+ pci_name(pdev), result);
+ return result;
+ }
+ vsoc_dev.enabled_device = true;
+ result = pci_request_regions(pdev, "vsoc");
+ if (result < 0) {
+ dev_err(&pdev->dev, "pci_request_regions failed\n");
+ vsoc_remove_device(pdev);
+ return -EBUSY;
+ }
+ vsoc_dev.requested_regions = true;
+ /* Set up the control registers in BAR 0 */
+ reg_size = pci_resource_len(pdev, REGISTER_BAR);
+ if (reg_size > MAX_REGISTER_BAR_LEN)
+ vsoc_dev.regs =
+ pci_iomap(pdev, REGISTER_BAR, MAX_REGISTER_BAR_LEN);
+ else
+ vsoc_dev.regs = pci_iomap(pdev, REGISTER_BAR, reg_size);
+
+ if (!vsoc_dev.regs) {
+ dev_err(&pdev->dev,
+ "cannot map registers of size %zu\n",
+ (size_t)reg_size);
+ vsoc_remove_device(pdev);
+ return -EBUSY;
+ }
+
+ /* Map the shared memory in BAR 2 */
+ vsoc_dev.shm_phys_start = pci_resource_start(pdev, SHARED_MEMORY_BAR);
+ vsoc_dev.shm_size = pci_resource_len(pdev, SHARED_MEMORY_BAR);
+
+ dev_info(&pdev->dev, "shared memory @ DMA %pa size=0x%zx\n",
+ &vsoc_dev.shm_phys_start, vsoc_dev.shm_size);
+ vsoc_dev.kernel_mapped_shm = pci_iomap_wc(pdev, SHARED_MEMORY_BAR, 0);
+ if (!vsoc_dev.kernel_mapped_shm) {
+ dev_err(&vsoc_dev.dev->dev, "cannot iomap region\n");
+ vsoc_remove_device(pdev);
+ return -EBUSY;
+ }
+
+ vsoc_dev.layout = (struct vsoc_shm_layout_descriptor __force *)
+ vsoc_dev.kernel_mapped_shm;
+ dev_info(&pdev->dev, "major_version: %d\n",
+ vsoc_dev.layout->major_version);
+ dev_info(&pdev->dev, "minor_version: %d\n",
+ vsoc_dev.layout->minor_version);
+ dev_info(&pdev->dev, "size: 0x%x\n", vsoc_dev.layout->size);
+ dev_info(&pdev->dev, "regions: %d\n", vsoc_dev.layout->region_count);
+ if (vsoc_dev.layout->major_version !=
+ CURRENT_VSOC_LAYOUT_MAJOR_VERSION) {
+ dev_err(&vsoc_dev.dev->dev,
+ "driver supports only major_version %d\n",
+ CURRENT_VSOC_LAYOUT_MAJOR_VERSION);
+ vsoc_remove_device(pdev);
+ return -EBUSY;
+ }
+ result = alloc_chrdev_region(&devt, 0, vsoc_dev.layout->region_count,
+ VSOC_DEV_NAME);
+ if (result) {
+ dev_err(&vsoc_dev.dev->dev, "alloc_chrdev_region failed\n");
+ vsoc_remove_device(pdev);
+ return -EBUSY;
+ }
+ vsoc_dev.major = MAJOR(devt);
+ cdev_init(&vsoc_dev.cdev, &vsoc_ops);
+ vsoc_dev.cdev.owner = THIS_MODULE;
+ result = cdev_add(&vsoc_dev.cdev, devt, vsoc_dev.layout->region_count);
+ if (result) {
+ dev_err(&vsoc_dev.dev->dev, "cdev_add error\n");
+ vsoc_remove_device(pdev);
+ return -EBUSY;
+ }
+ vsoc_dev.cdev_added = true;
+ vsoc_dev.class = class_create(THIS_MODULE, VSOC_DEV_NAME);
+ if (IS_ERR(vsoc_dev.class)) {
+ dev_err(&vsoc_dev.dev->dev, "class_create failed\n");
+ vsoc_remove_device(pdev);
+ return PTR_ERR(vsoc_dev.class);
+ }
+ vsoc_dev.class_added = true;
+ vsoc_dev.regions = (struct vsoc_device_region __force *)
+ ((void *)vsoc_dev.layout +
+ vsoc_dev.layout->vsoc_region_desc_offset);
+ vsoc_dev.msix_entries = kcalloc(
+ vsoc_dev.layout->region_count,
+ sizeof(vsoc_dev.msix_entries[0]), GFP_KERNEL);
+ if (!vsoc_dev.msix_entries) {
+ dev_err(&vsoc_dev.dev->dev,
+ "unable to allocate msix_entries\n");
+ vsoc_remove_device(pdev);
+ return -ENOSPC;
+ }
+ vsoc_dev.regions_data = kcalloc(
+ vsoc_dev.layout->region_count,
+ sizeof(vsoc_dev.regions_data[0]), GFP_KERNEL);
+ if (!vsoc_dev.regions_data) {
+ dev_err(&vsoc_dev.dev->dev,
+ "unable to allocate regions' data\n");
+ vsoc_remove_device(pdev);
+ return -ENOSPC;
+ }
+ for (i = 0; i < vsoc_dev.layout->region_count; ++i)
+ vsoc_dev.msix_entries[i].entry = i;
+
+ result = pci_enable_msix_exact(vsoc_dev.dev, vsoc_dev.msix_entries,
+ vsoc_dev.layout->region_count);
+ if (result) {
+ dev_info(&pdev->dev, "pci_enable_msix failed: %d\n", result);
+ vsoc_remove_device(pdev);
+ return -ENOSPC;
+ }
+ /* Check that all regions are well formed */
+ for (i = 0; i < vsoc_dev.layout->region_count; ++i) {
+ const struct vsoc_device_region *region = vsoc_dev.regions + i;
+
+ if (!PAGE_ALIGNED(region->region_begin_offset) ||
+ !PAGE_ALIGNED(region->region_end_offset)) {
+ dev_err(&vsoc_dev.dev->dev,
+ "region %d not aligned (%x:%x)", i,
+ region->region_begin_offset,
+ region->region_end_offset);
+ vsoc_remove_device(pdev);
+ return -EFAULT;
+ }
+ if (region->region_begin_offset >= region->region_end_offset ||
+ region->region_end_offset > vsoc_dev.shm_size) {
+ dev_err(&vsoc_dev.dev->dev,
+ "region %d offsets are wrong: %x %x %zx",
+ i, region->region_begin_offset,
+ region->region_end_offset, vsoc_dev.shm_size);
+ vsoc_remove_device(pdev);
+ return -EFAULT;
+ }
+ if (region->managed_by >= vsoc_dev.layout->region_count) {
+ dev_err(&vsoc_dev.dev->dev,
+ "region %d has invalid owner: %u",
+ i, region->managed_by);
+ vsoc_remove_device(pdev);
+ return -EFAULT;
+ }
+ }
+ vsoc_dev.msix_enabled = true;
+ for (i = 0; i < vsoc_dev.layout->region_count; ++i) {
+ const struct vsoc_device_region *region = vsoc_dev.regions + i;
+ size_t name_sz = sizeof(vsoc_dev.regions_data[i].name) - 1;
+ const struct vsoc_signal_table_layout *h_to_g_signal_table =
+ &region->host_to_guest_signal_table;
+ const struct vsoc_signal_table_layout *g_to_h_signal_table =
+ &region->guest_to_host_signal_table;
+
+ vsoc_dev.regions_data[i].name[name_sz] = '\0';
+ memcpy(vsoc_dev.regions_data[i].name, region->device_name,
+ name_sz);
+ dev_info(&pdev->dev, "region %d name=%s\n",
+ i, vsoc_dev.regions_data[i].name);
+ init_waitqueue_head(
+ &vsoc_dev.regions_data[i].interrupt_wait_queue);
+ init_waitqueue_head(&vsoc_dev.regions_data[i].futex_wait_queue);
+ vsoc_dev.regions_data[i].incoming_signalled =
+ shm_off_to_virtual_addr(region->region_begin_offset) +
+ h_to_g_signal_table->interrupt_signalled_offset;
+ vsoc_dev.regions_data[i].outgoing_signalled =
+ shm_off_to_virtual_addr(region->region_begin_offset) +
+ g_to_h_signal_table->interrupt_signalled_offset;
+ result = request_irq(
+ vsoc_dev.msix_entries[i].vector,
+ vsoc_interrupt, 0,
+ vsoc_dev.regions_data[i].name,
+ vsoc_dev.regions_data + i);
+ if (result) {
+ dev_info(&pdev->dev,
+ "request_irq failed irq=%d vector=%d\n",
+ i, vsoc_dev.msix_entries[i].vector);
+ vsoc_remove_device(pdev);
+ return -ENOSPC;
+ }
+ vsoc_dev.regions_data[i].irq_requested = true;
+ if (!device_create(vsoc_dev.class, NULL,
+ MKDEV(vsoc_dev.major, i),
+ NULL, vsoc_dev.regions_data[i].name)) {
+ dev_err(&vsoc_dev.dev->dev, "device_create failed\n");
+ vsoc_remove_device(pdev);
+ return -EBUSY;
+ }
+ vsoc_dev.regions_data[i].device_created = true;
+ }
+ return 0;
+}
+
+/*
+ * This should undo all of the allocations in the probe function in reverse
+ * order.
+ *
+ * Notes:
+ *
+ * The device may have been partially initialized, so double check
+ * that the allocations happened.
+ *
+ * This function may be called multiple times, so mark resources as freed
+ * as they are deallocated.
+ */
+static void vsoc_remove_device(struct pci_dev *pdev)
+{
+ int i;
+ /*
+ * pdev is the first thing to be set on probe and the last thing
+ * to be cleared here. If it's NULL then there is no cleanup.
+ */
+ if (!pdev || !vsoc_dev.dev)
+ return;
+ dev_info(&pdev->dev, "remove_device\n");
+ if (vsoc_dev.regions_data) {
+ for (i = 0; i < vsoc_dev.layout->region_count; ++i) {
+ if (vsoc_dev.regions_data[i].device_created) {
+ device_destroy(vsoc_dev.class,
+ MKDEV(vsoc_dev.major, i));
+ vsoc_dev.regions_data[i].device_created = false;
+ }
+ if (vsoc_dev.regions_data[i].irq_requested)
+ free_irq(vsoc_dev.msix_entries[i].vector, NULL);
+ vsoc_dev.regions_data[i].irq_requested = false;
+ }
+ kfree(vsoc_dev.regions_data);
+ vsoc_dev.regions_data = NULL;
+ }
+ if (vsoc_dev.msix_enabled) {
+ pci_disable_msix(pdev);
+ vsoc_dev.msix_enabled = false;
+ }
+ kfree(vsoc_dev.msix_entries);
+ vsoc_dev.msix_entries = NULL;
+ vsoc_dev.regions = NULL;
+ if (vsoc_dev.class_added) {
+ class_destroy(vsoc_dev.class);
+ vsoc_dev.class_added = false;
+ }
+ if (vsoc_dev.cdev_added) {
+ cdev_del(&vsoc_dev.cdev);
+ vsoc_dev.cdev_added = false;
+ }
+ if (vsoc_dev.major && vsoc_dev.layout) {
+ unregister_chrdev_region(MKDEV(vsoc_dev.major, 0),
+ vsoc_dev.layout->region_count);
+ vsoc_dev.major = 0;
+ }
+ vsoc_dev.layout = NULL;
+ if (vsoc_dev.kernel_mapped_shm) {
+ pci_iounmap(pdev, vsoc_dev.kernel_mapped_shm);
+ vsoc_dev.kernel_mapped_shm = NULL;
+ }
+ if (vsoc_dev.regs) {
+ pci_iounmap(pdev, vsoc_dev.regs);
+ vsoc_dev.regs = NULL;
+ }
+ if (vsoc_dev.requested_regions) {
+ pci_release_regions(pdev);
+ vsoc_dev.requested_regions = false;
+ }
+ if (vsoc_dev.enabled_device) {
+ pci_disable_device(pdev);
+ vsoc_dev.enabled_device = false;
+ }
+ /* Do this last: it indicates that the device is not initialized. */
+ vsoc_dev.dev = NULL;
+}
+
+static void __exit vsoc_cleanup_module(void)
+{
+ vsoc_remove_device(vsoc_dev.dev);
+ pci_unregister_driver(&vsoc_pci_driver);
+}
+
+static int __init vsoc_init_module(void)
+{
+ int err = -ENOMEM;
+
+ INIT_LIST_HEAD(&vsoc_dev.permissions);
+ mutex_init(&vsoc_dev.mtx);
+
+ err = pci_register_driver(&vsoc_pci_driver);
+ if (err < 0)
+ return err;
+ return 0;
+}
+
+static int vsoc_open(struct inode *inode, struct file *filp)
+{
+ /* Can't use vsoc_validate_filep because filp is still incomplete */
+ int ret = vsoc_validate_inode(inode);
+
+ if (ret)
+ return ret;
+ filp->private_data =
+ kzalloc(sizeof(struct vsoc_private_data), GFP_KERNEL);
+ if (!filp->private_data)
+ return -ENOMEM;
+ return 0;
+}
+
+static int vsoc_release(struct inode *inode, struct file *filp)
+{
+ struct vsoc_private_data *private_data = NULL;
+ struct fd_scoped_permission_node *node = NULL;
+ struct vsoc_device_region *owner_region_p = NULL;
+ int retval = vsoc_validate_filep(filp);
+
+ if (retval)
+ return retval;
+ private_data = (struct vsoc_private_data *)filp->private_data;
+ if (!private_data)
+ return 0;
+
+ node = private_data->fd_scoped_permission_node;
+ if (node) {
+ owner_region_p = vsoc_region_from_inode(inode);
+ if (owner_region_p->managed_by != VSOC_REGION_WHOLE) {
+ owner_region_p =
+ &vsoc_dev.regions[owner_region_p->managed_by];
+ }
+ do_destroy_fd_scoped_permission_node(owner_region_p, node);
+ private_data->fd_scoped_permission_node = NULL;
+ }
+ kfree(private_data);
+ filp->private_data = NULL;
+
+ return 0;
+}
+
+/*
+ * Returns the device relative offset and length of the area specified by the
+ * fd scoped permission. If there is no fd scoped permission set, a default
+ * permission covering the entire region is assumed, unless the region is owned
+ * by another one, in which case the default is a permission with zero size.
+ */
+static ssize_t vsoc_get_area(struct file *filp, __u32 *area_offset)
+{
+ __u32 off = 0;
+ ssize_t length = 0;
+ struct vsoc_device_region *region_p;
+ struct fd_scoped_permission *perm;
+
+ region_p = vsoc_region_from_filep(filp);
+ off = region_p->region_begin_offset;
+ perm = &((struct vsoc_private_data *)filp->private_data)->
+ fd_scoped_permission_node->permission;
+ if (perm) {
+ off += perm->begin_offset;
+ length = perm->end_offset - perm->begin_offset;
+ } else if (region_p->managed_by == VSOC_REGION_WHOLE) {
+ /* No permission set and the regions is not owned by another,
+ * default to full region access.
+ */
+ length = vsoc_device_region_size(region_p);
+ } else {
+ /* return zero length, access is denied. */
+ length = 0;
+ }
+ if (area_offset)
+ *area_offset = off;
+ return length;
+}
+
+static int vsoc_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ unsigned long len = vma->vm_end - vma->vm_start;
+ __u32 area_off;
+ phys_addr_t mem_off;
+ ssize_t area_len;
+ int retval = vsoc_validate_filep(filp);
+
+ if (retval)
+ return retval;
+ area_len = vsoc_get_area(filp, &area_off);
+ /* Add the requested offset */
+ area_off += (vma->vm_pgoff << PAGE_SHIFT);
+ area_len -= (vma->vm_pgoff << PAGE_SHIFT);
+ if (area_len < len)
+ return -EINVAL;
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ mem_off = shm_off_to_phys_addr(area_off);
+ if (io_remap_pfn_range(vma, vma->vm_start, mem_off >> PAGE_SHIFT,
+ len, vma->vm_page_prot))
+ return -EAGAIN;
+ return 0;
+}
+
+module_init(vsoc_init_module);
+module_exit(vsoc_cleanup_module);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Greg Hartman <ghartman@google.com>");
+MODULE_DESCRIPTION("VSoC interpretation of QEmu's ivshmem device");
+MODULE_VERSION("1.0");
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index b63dd2ef78b5..1f398d06f4ee 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -484,8 +484,7 @@ unsigned int comedi_nsamples_left(struct comedi_subdevice *s,
struct comedi_cmd *cmd = &async->cmd;
if (cmd->stop_src == TRIG_COUNT) {
- unsigned int nscans = nsamples / cmd->scan_end_arg;
- unsigned int scans_left = __comedi_nscans_left(s, nscans);
+ unsigned int scans_left = __comedi_nscans_left(s, cmd->stop_arg);
unsigned int scan_pos =
comedi_bytes_to_samples(s, async->scan_progress);
unsigned long long samples_left = 0;
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
index c975f6e8be49..8f181caffca3 100644
--- a/drivers/staging/comedi/drivers/ni_mio_common.c
+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
@@ -1348,6 +1348,8 @@ static void ack_a_interrupt(struct comedi_device *dev, unsigned short a_status)
ack |= NISTC_INTA_ACK_AI_START;
if (a_status & NISTC_AI_STATUS1_STOP)
ack |= NISTC_INTA_ACK_AI_STOP;
+ if (a_status & NISTC_AI_STATUS1_OVER)
+ ack |= NISTC_INTA_ACK_AI_ERR;
if (ack)
ni_stc_writew(dev, ack, NISTC_INTA_ACK_REG);
}
diff --git a/drivers/staging/goldfish/Kconfig b/drivers/staging/goldfish/Kconfig
index c579141a7bed..c8871d0c0776 100644
--- a/drivers/staging/goldfish/Kconfig
+++ b/drivers/staging/goldfish/Kconfig
@@ -10,10 +10,3 @@ config GOLDFISH_SYNC
---help---
Emulated sync fences for the Goldfish Android Virtual Device
-config MTD_GOLDFISH_NAND
- tristate "Goldfish NAND device"
- depends on GOLDFISH
- depends on MTD
- help
- Drives the emulated NAND flash device on the Google Goldfish
- Android virtual device.
diff --git a/drivers/staging/goldfish/Makefile b/drivers/staging/goldfish/Makefile
index 0cf525588210..30db49141814 100644
--- a/drivers/staging/goldfish/Makefile
+++ b/drivers/staging/goldfish/Makefile
@@ -3,7 +3,6 @@
#
obj-$(CONFIG_GOLDFISH_AUDIO) += goldfish_audio.o
-obj-$(CONFIG_MTD_GOLDFISH_NAND) += goldfish_nand.o
# and sync
diff --git a/drivers/staging/goldfish/goldfish_nand.c b/drivers/staging/goldfish/goldfish_nand.c
deleted file mode 100644
index 623353db5a08..000000000000
--- a/drivers/staging/goldfish/goldfish_nand.c
+++ /dev/null
@@ -1,442 +0,0 @@
-/*
- * drivers/mtd/devices/goldfish_nand.c
- *
- * Copyright (C) 2007 Google, Inc.
- * Copyright (C) 2012 Intel, Inc.
- * Copyright (C) 2013 Intel, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/io.h>
-#include <linux/device.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/ioport.h>
-#include <linux/vmalloc.h>
-#include <linux/mtd/mtd.h>
-#include <linux/platform_device.h>
-#include <linux/mutex.h>
-#include <linux/goldfish.h>
-#include <asm/div64.h>
-
-#include "goldfish_nand_reg.h"
-
-struct goldfish_nand {
- /* lock protects access to the device registers */
- struct mutex lock;
- unsigned char __iomem *base;
- struct cmd_params *cmd_params;
- size_t mtd_count;
- struct mtd_info mtd[0];
-};
-
-static u32 goldfish_nand_cmd_with_params(struct mtd_info *mtd,
- enum nand_cmd cmd, u64 addr, u32 len,
- void *ptr, u32 *rv)
-{
- u32 cmdp;
- struct goldfish_nand *nand = mtd->priv;
- struct cmd_params *cps = nand->cmd_params;
- unsigned char __iomem *base = nand->base;
-
- if (!cps)
- return -1;
-
- switch (cmd) {
- case NAND_CMD_ERASE:
- cmdp = NAND_CMD_ERASE_WITH_PARAMS;
- break;
- case NAND_CMD_READ:
- cmdp = NAND_CMD_READ_WITH_PARAMS;
- break;
- case NAND_CMD_WRITE:
- cmdp = NAND_CMD_WRITE_WITH_PARAMS;
- break;
- default:
- return -1;
- }
- cps->dev = mtd - nand->mtd;
- cps->addr_high = (u32)(addr >> 32);
- cps->addr_low = (u32)addr;
- cps->transfer_size = len;
- cps->data = (unsigned long)ptr;
- writel(cmdp, base + NAND_COMMAND);
- *rv = cps->result;
- return 0;
-}
-
-static u32 goldfish_nand_cmd(struct mtd_info *mtd, enum nand_cmd cmd,
- u64 addr, u32 len, void *ptr)
-{
- struct goldfish_nand *nand = mtd->priv;
- u32 rv;
- unsigned char __iomem *base = nand->base;
-
- mutex_lock(&nand->lock);
- if (goldfish_nand_cmd_with_params(mtd, cmd, addr, len, ptr, &rv)) {
- writel(mtd - nand->mtd, base + NAND_DEV);
- writel((u32)(addr >> 32), base + NAND_ADDR_HIGH);
- writel((u32)addr, base + NAND_ADDR_LOW);
- writel(len, base + NAND_TRANSFER_SIZE);
- gf_write_ptr(ptr, base + NAND_DATA, base + NAND_DATA_HIGH);
- writel(cmd, base + NAND_COMMAND);
- rv = readl(base + NAND_RESULT);
- }
- mutex_unlock(&nand->lock);
- return rv;
-}
-
-static int goldfish_nand_erase(struct mtd_info *mtd, struct erase_info *instr)
-{
- loff_t ofs = instr->addr;
- u32 len = instr->len;
- u32 rem;
-
- if (ofs + len > mtd->size)
- goto invalid_arg;
- rem = do_div(ofs, mtd->writesize);
- if (rem)
- goto invalid_arg;
- ofs *= (mtd->writesize + mtd->oobsize);
-
- if (len % mtd->writesize)
- goto invalid_arg;
- len = len / mtd->writesize * (mtd->writesize + mtd->oobsize);
-
- if (goldfish_nand_cmd(mtd, NAND_CMD_ERASE, ofs, len, NULL) != len) {
- pr_err("goldfish_nand_erase: erase failed, start %llx, len %x, dev_size %llx, erase_size %x\n",
- ofs, len, mtd->size, mtd->erasesize);
- return -EIO;
- }
-
- instr->state = MTD_ERASE_DONE;
- mtd_erase_callback(instr);
-
- return 0;
-
-invalid_arg:
- pr_err("goldfish_nand_erase: invalid erase, start %llx, len %x, dev_size %llx, erase_size %x\n",
- ofs, len, mtd->size, mtd->erasesize);
- return -EINVAL;
-}
-
-static int goldfish_nand_read_oob(struct mtd_info *mtd, loff_t ofs,
- struct mtd_oob_ops *ops)
-{
- u32 rem;
-
- if (ofs + ops->len > mtd->size)
- goto invalid_arg;
- if (ops->datbuf && ops->len && ops->len != mtd->writesize)
- goto invalid_arg;
- if (ops->ooblen + ops->ooboffs > mtd->oobsize)
- goto invalid_arg;
-
- rem = do_div(ofs, mtd->writesize);
- if (rem)
- goto invalid_arg;
- ofs *= (mtd->writesize + mtd->oobsize);
-
- if (ops->datbuf)
- ops->retlen = goldfish_nand_cmd(mtd, NAND_CMD_READ, ofs,
- ops->len, ops->datbuf);
- ofs += mtd->writesize + ops->ooboffs;
- if (ops->oobbuf)
- ops->oobretlen = goldfish_nand_cmd(mtd, NAND_CMD_READ, ofs,
- ops->ooblen, ops->oobbuf);
- return 0;
-
-invalid_arg:
- pr_err("goldfish_nand_read_oob: invalid read, start %llx, len %zx, ooblen %zx, dev_size %llx, write_size %x\n",
- ofs, ops->len, ops->ooblen, mtd->size, mtd->writesize);
- return -EINVAL;
-}
-
-static int goldfish_nand_write_oob(struct mtd_info *mtd, loff_t ofs,
- struct mtd_oob_ops *ops)
-{
- u32 rem;
-
- if (ofs + ops->len > mtd->size)
- goto invalid_arg;
- if (ops->len && ops->len != mtd->writesize)
- goto invalid_arg;
- if (ops->ooblen + ops->ooboffs > mtd->oobsize)
- goto invalid_arg;
-
- rem = do_div(ofs, mtd->writesize);
- if (rem)
- goto invalid_arg;
- ofs *= (mtd->writesize + mtd->oobsize);
-
- if (ops->datbuf)
- ops->retlen = goldfish_nand_cmd(mtd, NAND_CMD_WRITE, ofs,
- ops->len, ops->datbuf);
- ofs += mtd->writesize + ops->ooboffs;
- if (ops->oobbuf)
- ops->oobretlen = goldfish_nand_cmd(mtd, NAND_CMD_WRITE, ofs,
- ops->ooblen, ops->oobbuf);
- return 0;
-
-invalid_arg:
- pr_err("goldfish_nand_write_oob: invalid write, start %llx, len %zx, ooblen %zx, dev_size %llx, write_size %x\n",
- ofs, ops->len, ops->ooblen, mtd->size, mtd->writesize);
- return -EINVAL;
-}
-
-static int goldfish_nand_read(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf)
-{
- u32 rem;
-
- if (from + len > mtd->size)
- goto invalid_arg;
-
- rem = do_div(from, mtd->writesize);
- if (rem)
- goto invalid_arg;
- from *= (mtd->writesize + mtd->oobsize);
-
- *retlen = goldfish_nand_cmd(mtd, NAND_CMD_READ, from, len, buf);
- return 0;
-
-invalid_arg:
- pr_err("goldfish_nand_read: invalid read, start %llx, len %zx, dev_size %llx, write_size %x\n",
- from, len, mtd->size, mtd->writesize);
- return -EINVAL;
-}
-
-static int goldfish_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf)
-{
- u32 rem;
-
- if (to + len > mtd->size)
- goto invalid_arg;
-
- rem = do_div(to, mtd->writesize);
- if (rem)
- goto invalid_arg;
- to *= (mtd->writesize + mtd->oobsize);
-
- *retlen = goldfish_nand_cmd(mtd, NAND_CMD_WRITE, to, len, (void *)buf);
- return 0;
-
-invalid_arg:
- pr_err("goldfish_nand_write: invalid write, start %llx, len %zx, dev_size %llx, write_size %x\n",
- to, len, mtd->size, mtd->writesize);
- return -EINVAL;
-}
-
-static int goldfish_nand_block_isbad(struct mtd_info *mtd, loff_t ofs)
-{
- u32 rem;
-
- if (ofs >= mtd->size)
- goto invalid_arg;
-
- rem = do_div(ofs, mtd->erasesize);
- if (rem)
- goto invalid_arg;
- ofs *= mtd->erasesize / mtd->writesize;
- ofs *= (mtd->writesize + mtd->oobsize);
-
- return goldfish_nand_cmd(mtd, NAND_CMD_BLOCK_BAD_GET, ofs, 0, NULL);
-
-invalid_arg:
- pr_err("goldfish_nand_block_isbad: invalid arg, ofs %llx, dev_size %llx, write_size %x\n",
- ofs, mtd->size, mtd->writesize);
- return -EINVAL;
-}
-
-static int goldfish_nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
-{
- u32 rem;
-
- if (ofs >= mtd->size)
- goto invalid_arg;
-
- rem = do_div(ofs, mtd->erasesize);
- if (rem)
- goto invalid_arg;
- ofs *= mtd->erasesize / mtd->writesize;
- ofs *= (mtd->writesize + mtd->oobsize);
-
- if (goldfish_nand_cmd(mtd, NAND_CMD_BLOCK_BAD_SET, ofs, 0, NULL) != 1)
- return -EIO;
- return 0;
-
-invalid_arg:
- pr_err("goldfish_nand_block_markbad: invalid arg, ofs %llx, dev_size %llx, write_size %x\n",
- ofs, mtd->size, mtd->writesize);
- return -EINVAL;
-}
-
-static int nand_setup_cmd_params(struct platform_device *pdev,
- struct goldfish_nand *nand)
-{
- u64 paddr;
- unsigned char __iomem *base = nand->base;
-
- nand->cmd_params = devm_kzalloc(&pdev->dev,
- sizeof(struct cmd_params), GFP_KERNEL);
- if (!nand->cmd_params)
- return -1;
-
- paddr = __pa(nand->cmd_params);
- writel((u32)(paddr >> 32), base + NAND_CMD_PARAMS_ADDR_HIGH);
- writel((u32)paddr, base + NAND_CMD_PARAMS_ADDR_LOW);
- return 0;
-}
-
-static int goldfish_nand_init_device(struct platform_device *pdev,
- struct goldfish_nand *nand, int id)
-{
- u32 name_len;
- u32 result;
- u32 flags;
- unsigned char __iomem *base = nand->base;
- struct mtd_info *mtd = &nand->mtd[id];
- char *name;
-
- mutex_lock(&nand->lock);
- writel(id, base + NAND_DEV);
- flags = readl(base + NAND_DEV_FLAGS);
- name_len = readl(base + NAND_DEV_NAME_LEN);
- mtd->writesize = readl(base + NAND_DEV_PAGE_SIZE);
- mtd->size = readl(base + NAND_DEV_SIZE_LOW);
- mtd->size |= (u64)readl(base + NAND_DEV_SIZE_HIGH) << 32;
- mtd->oobsize = readl(base + NAND_DEV_EXTRA_SIZE);
- mtd->oobavail = mtd->oobsize;
- mtd->erasesize = readl(base + NAND_DEV_ERASE_SIZE) /
- (mtd->writesize + mtd->oobsize) * mtd->writesize;
- do_div(mtd->size, mtd->writesize + mtd->oobsize);
- mtd->size *= mtd->writesize;
- dev_dbg(&pdev->dev,
- "goldfish nand dev%d: size %llx, page %d, extra %d, erase %d\n",
- id, mtd->size, mtd->writesize,
- mtd->oobsize, mtd->erasesize);
- mutex_unlock(&nand->lock);
-
- mtd->priv = nand;
-
- name = devm_kzalloc(&pdev->dev, name_len + 1, GFP_KERNEL);
- if (!name)
- return -ENOMEM;
- mtd->name = name;
-
- result = goldfish_nand_cmd(mtd, NAND_CMD_GET_DEV_NAME, 0, name_len,
- name);
- if (result != name_len) {
- dev_err(&pdev->dev,
- "goldfish_nand_init_device failed to get dev name %d != %d\n",
- result, name_len);
- return -ENODEV;
- }
- ((char *)mtd->name)[name_len] = '\0';
-
- /* Setup the MTD structure */
- mtd->type = MTD_NANDFLASH;
- mtd->flags = MTD_CAP_NANDFLASH;
- if (flags & NAND_DEV_FLAG_READ_ONLY)
- mtd->flags &= ~MTD_WRITEABLE;
- if (flags & NAND_DEV_FLAG_CMD_PARAMS_CAP)
- nand_setup_cmd_params(pdev, nand);
-
- mtd->owner = THIS_MODULE;
- mtd->_erase = goldfish_nand_erase;
- mtd->_read = goldfish_nand_read;
- mtd->_write = goldfish_nand_write;
- mtd->_read_oob = goldfish_nand_read_oob;
- mtd->_write_oob = goldfish_nand_write_oob;
- mtd->_block_isbad = goldfish_nand_block_isbad;
- mtd->_block_markbad = goldfish_nand_block_markbad;
-
- if (mtd_device_register(mtd, NULL, 0))
- return -EIO;
-
- return 0;
-}
-
-static int goldfish_nand_probe(struct platform_device *pdev)
-{
- u32 num_dev;
- int i;
- int err;
- u32 num_dev_working;
- u32 version;
- struct resource *r;
- struct goldfish_nand *nand;
- unsigned char __iomem *base;
-
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r)
- return -ENODEV;
-
- base = devm_ioremap(&pdev->dev, r->start, PAGE_SIZE);
- if (!base)
- return -ENOMEM;
-
- version = readl(base + NAND_VERSION);
- if (version != NAND_VERSION_CURRENT) {
- dev_err(&pdev->dev,
- "goldfish_nand_init: version mismatch, got %d, expected %d\n",
- version, NAND_VERSION_CURRENT);
- return -ENODEV;
- }
- num_dev = readl(base + NAND_NUM_DEV);
- if (num_dev == 0)
- return -ENODEV;
-
- nand = devm_kzalloc(&pdev->dev, sizeof(*nand) +
- sizeof(struct mtd_info) * num_dev, GFP_KERNEL);
- if (!nand)
- return -ENOMEM;
-
- mutex_init(&nand->lock);
- nand->base = base;
- nand->mtd_count = num_dev;
- platform_set_drvdata(pdev, nand);
-
- num_dev_working = 0;
- for (i = 0; i < num_dev; i++) {
- err = goldfish_nand_init_device(pdev, nand, i);
- if (err == 0)
- num_dev_working++;
- }
- if (num_dev_working == 0)
- return -ENODEV;
- return 0;
-}
-
-static int goldfish_nand_remove(struct platform_device *pdev)
-{
- struct goldfish_nand *nand = platform_get_drvdata(pdev);
- int i;
-
- for (i = 0; i < nand->mtd_count; i++) {
- if (nand->mtd[i].name)
- mtd_device_unregister(&nand->mtd[i]);
- }
- return 0;
-}
-
-static struct platform_driver goldfish_nand_driver = {
- .probe = goldfish_nand_probe,
- .remove = goldfish_nand_remove,
- .driver = {
- .name = "goldfish_nand"
- }
-};
-
-module_platform_driver(goldfish_nand_driver);
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/goldfish/goldfish_nand_reg.h b/drivers/staging/goldfish/goldfish_nand_reg.h
deleted file mode 100644
index 43aeba3a4c8f..000000000000
--- a/drivers/staging/goldfish/goldfish_nand_reg.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * drivers/mtd/devices/goldfish_nand_reg.h
- *
- * Copyright (C) 2007 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef GOLDFISH_NAND_REG_H
-#define GOLDFISH_NAND_REG_H
-
-enum nand_cmd {
- /* Write device name for NAND_DEV to NAND_DATA (vaddr) */
- NAND_CMD_GET_DEV_NAME,
- NAND_CMD_READ,
- NAND_CMD_WRITE,
- NAND_CMD_ERASE,
- /* NAND_RESULT is 1 if block is bad, 0 if it is not */
- NAND_CMD_BLOCK_BAD_GET,
- NAND_CMD_BLOCK_BAD_SET,
- NAND_CMD_READ_WITH_PARAMS,
- NAND_CMD_WRITE_WITH_PARAMS,
- NAND_CMD_ERASE_WITH_PARAMS
-};
-
-enum nand_dev_flags {
- NAND_DEV_FLAG_READ_ONLY = 0x00000001,
- NAND_DEV_FLAG_CMD_PARAMS_CAP = 0x00000002,
-};
-
-#define NAND_VERSION_CURRENT (1)
-
-enum nand_reg {
- /* Global */
- NAND_VERSION = 0x000,
- NAND_NUM_DEV = 0x004,
- NAND_DEV = 0x008,
-
- /* Dev info */
- NAND_DEV_FLAGS = 0x010,
- NAND_DEV_NAME_LEN = 0x014,
- NAND_DEV_PAGE_SIZE = 0x018,
- NAND_DEV_EXTRA_SIZE = 0x01c,
- NAND_DEV_ERASE_SIZE = 0x020,
- NAND_DEV_SIZE_LOW = 0x028,
- NAND_DEV_SIZE_HIGH = 0x02c,
-
- /* Command */
- NAND_RESULT = 0x040,
- NAND_COMMAND = 0x044,
- NAND_DATA = 0x048,
- NAND_DATA_HIGH = 0x100,
- NAND_TRANSFER_SIZE = 0x04c,
- NAND_ADDR_LOW = 0x050,
- NAND_ADDR_HIGH = 0x054,
- NAND_CMD_PARAMS_ADDR_LOW = 0x058,
- NAND_CMD_PARAMS_ADDR_HIGH = 0x05c,
-};
-
-struct cmd_params {
- u32 dev;
- u32 addr_low;
- u32 addr_high;
- u32 transfer_size;
- unsigned long data;
- u32 result;
-};
-#endif
diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c
index abc66908681d..6f032009f93f 100644
--- a/drivers/staging/iio/adc/ad7192.c
+++ b/drivers/staging/iio/adc/ad7192.c
@@ -124,6 +124,8 @@
#define AD7192_GPOCON_P1DAT BIT(1) /* P1 state */
#define AD7192_GPOCON_P0DAT BIT(0) /* P0 state */
+#define AD7192_EXT_FREQ_MHZ_MIN 2457600
+#define AD7192_EXT_FREQ_MHZ_MAX 5120000
#define AD7192_INT_FREQ_MHZ 4915200
/* NOTE:
@@ -199,6 +201,12 @@ static int ad7192_calibrate_all(struct ad7192_state *st)
ARRAY_SIZE(ad7192_calib_arr));
}
+static inline bool ad7192_valid_external_frequency(u32 freq)
+{
+ return (freq >= AD7192_EXT_FREQ_MHZ_MIN &&
+ freq <= AD7192_EXT_FREQ_MHZ_MAX);
+}
+
static int ad7192_setup(struct ad7192_state *st,
const struct ad7192_platform_data *pdata)
{
@@ -224,17 +232,20 @@ static int ad7192_setup(struct ad7192_state *st,
id);
switch (pdata->clock_source_sel) {
- case AD7192_CLK_EXT_MCLK1_2:
- case AD7192_CLK_EXT_MCLK2:
- st->mclk = AD7192_INT_FREQ_MHZ;
- break;
case AD7192_CLK_INT:
case AD7192_CLK_INT_CO:
- if (pdata->ext_clk_hz)
- st->mclk = pdata->ext_clk_hz;
- else
- st->mclk = AD7192_INT_FREQ_MHZ;
+ st->mclk = AD7192_INT_FREQ_MHZ;
break;
+ case AD7192_CLK_EXT_MCLK1_2:
+ case AD7192_CLK_EXT_MCLK2:
+ if (ad7192_valid_external_frequency(pdata->ext_clk_hz)) {
+ st->mclk = pdata->ext_clk_hz;
+ break;
+ }
+ dev_err(&st->sd.spi->dev, "Invalid frequency setting %u\n",
+ pdata->ext_clk_hz);
+ ret = -EINVAL;
+ goto out;
default:
ret = -EINVAL;
goto out;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec.c b/drivers/staging/lustre/lustre/ptlrpc/sec.c
index 39f5261c9854..5cf5b7334089 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec.c
@@ -824,7 +824,7 @@ void sptlrpc_request_out_callback(struct ptlrpc_request *req)
if (req->rq_pool || !req->rq_reqbuf)
return;
- kfree(req->rq_reqbuf);
+ kvfree(req->rq_reqbuf);
req->rq_reqbuf = NULL;
req->rq_reqbuf_len = 0;
}
diff --git a/drivers/staging/speakup/kobjects.c b/drivers/staging/speakup/kobjects.c
index fdfeb42b2b8f..06ef26872462 100644
--- a/drivers/staging/speakup/kobjects.c
+++ b/drivers/staging/speakup/kobjects.c
@@ -831,7 +831,9 @@ static ssize_t message_show(struct kobject *kobj,
struct msg_group_t *group = spk_find_msg_group(attr->attr.name);
unsigned long flags;
- BUG_ON(!group);
+ if (WARN_ON(!group))
+ return -EINVAL;
+
spin_lock_irqsave(&speakup_info.spinlock, flags);
retval = message_show_helper(buf, group->start, group->end);
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
@@ -843,7 +845,9 @@ static ssize_t message_store(struct kobject *kobj, struct kobj_attribute *attr,
{
struct msg_group_t *group = spk_find_msg_group(attr->attr.name);
- BUG_ON(!group);
+ if (WARN_ON(!group))
+ return -EINVAL;
+
return message_store_helper(buf, count, group);
}
diff --git a/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c b/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c
index 824d460911ec..58ccafb97344 100644
--- a/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c
+++ b/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c
@@ -1039,7 +1039,6 @@ static int synaptics_rmi4_remove(struct i2c_client *client)
return 0;
}
-#ifdef CONFIG_PM
/**
* synaptics_rmi4_suspend() - suspend the touch screen controller
* @dev: pointer to device structure
@@ -1047,7 +1046,7 @@ static int synaptics_rmi4_remove(struct i2c_client *client)
* This function is used to suspend the
* touch panel controller and returns integer
*/
-static int synaptics_rmi4_suspend(struct device *dev)
+static int __maybe_unused synaptics_rmi4_suspend(struct device *dev)
{
/* Touch sleep mode */
int retval;
@@ -1081,7 +1080,7 @@ static int synaptics_rmi4_suspend(struct device *dev)
* This function is used to resume the touch panel
* controller and returns integer.
*/
-static int synaptics_rmi4_resume(struct device *dev)
+static int __maybe_unused synaptics_rmi4_resume(struct device *dev)
{
int retval;
unsigned char intr_status;
@@ -1112,8 +1111,6 @@ static int synaptics_rmi4_resume(struct device *dev)
return 0;
}
-#endif
-
static SIMPLE_DEV_PM_OPS(synaptics_rmi4_dev_pm_ops, synaptics_rmi4_suspend,
synaptics_rmi4_resume);
diff --git a/drivers/staging/unisys/visorhba/visorhba_main.c b/drivers/staging/unisys/visorhba/visorhba_main.c
index c119f20dfd44..3f2ccf9d7358 100644
--- a/drivers/staging/unisys/visorhba/visorhba_main.c
+++ b/drivers/staging/unisys/visorhba/visorhba_main.c
@@ -792,7 +792,7 @@ static void
do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
{
struct scsi_device *scsidev;
- unsigned char buf[36];
+ unsigned char *buf;
struct scatterlist *sg;
unsigned int i;
char *this_page;
@@ -807,6 +807,10 @@ do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
if (cmdrsp->scsi.no_disk_result == 0)
return;
+ buf = kzalloc(sizeof(char) * 36, GFP_KERNEL);
+ if (!buf)
+ return;
+
/* Linux scsi code wants a device at Lun 0
* to issue report luns, but we don't want
* a disk there so we'll present a processor
@@ -820,6 +824,7 @@ do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
if (scsi_sg_count(scsicmd) == 0) {
memcpy(scsi_sglist(scsicmd), buf,
cmdrsp->scsi.bufflen);
+ kfree(buf);
return;
}
@@ -831,6 +836,7 @@ do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
memcpy(this_page, buf + bufind, sg[i].length);
kunmap_atomic(this_page_orig);
}
+ kfree(buf);
} else {
devdata = (struct visorhba_devdata *)scsidev->host->hostdata;
for_each_vdisk_match(vdisk, devdata, scsidev) {
diff --git a/drivers/staging/unisys/visorinput/Kconfig b/drivers/staging/unisys/visorinput/Kconfig
index d83deb4137e8..6baba2795ce7 100644
--- a/drivers/staging/unisys/visorinput/Kconfig
+++ b/drivers/staging/unisys/visorinput/Kconfig
@@ -4,7 +4,7 @@
config UNISYS_VISORINPUT
tristate "Unisys visorinput driver"
- depends on UNISYSSPAR && UNISYS_VISORBUS && FB
+ depends on UNISYSSPAR && UNISYS_VISORBUS && FB && INPUT
---help---
If you say Y here, you will enable the Unisys visorinput driver.
diff --git a/drivers/staging/wilc1000/host_interface.c b/drivers/staging/wilc1000/host_interface.c
index dbbe72c7e255..f78353ddeea5 100644
--- a/drivers/staging/wilc1000/host_interface.c
+++ b/drivers/staging/wilc1000/host_interface.c
@@ -2179,6 +2179,8 @@ static s32 Handle_Get_InActiveTime(struct host_if_drv *hif_drv,
wid.type = WID_STR;
wid.size = ETH_ALEN;
wid.val = kmalloc(wid.size, GFP_KERNEL);
+ if (!wid.val)
+ return -ENOMEM;
stamac = wid.val;
memcpy(stamac, strHostIfStaInactiveT->mac, ETH_ALEN);
diff --git a/drivers/staging/wilc1000/linux_mon.c b/drivers/staging/wilc1000/linux_mon.c
index 450af1b77f99..b2092c5ec7f3 100644
--- a/drivers/staging/wilc1000/linux_mon.c
+++ b/drivers/staging/wilc1000/linux_mon.c
@@ -251,6 +251,8 @@ static netdev_tx_t WILC_WFI_mon_xmit(struct sk_buff *skb,
if (skb->data[0] == 0xc0 && (!(memcmp(broadcast, &skb->data[4], 6)))) {
skb2 = dev_alloc_skb(skb->len + sizeof(struct wilc_wfi_radiotap_cb_hdr));
+ if (!skb2)
+ return -ENOMEM;
memcpy(skb_put(skb2, skb->len), skb->data, skb->len);
diff --git a/drivers/staging/wilc1000/wilc_wlan_if.h b/drivers/staging/wilc1000/wilc_wlan_if.h
index be972afe6e62..bfc3e96d8d25 100644
--- a/drivers/staging/wilc1000/wilc_wlan_if.h
+++ b/drivers/staging/wilc1000/wilc_wlan_if.h
@@ -12,6 +12,7 @@
#include <linux/semaphore.h>
#include "linux_wlan_common.h"
+#include <linux/netdevice.h>
/********************************************
*
diff --git a/drivers/staging/wlan-ng/prism2mgmt.c b/drivers/staging/wlan-ng/prism2mgmt.c
index 013a6240f193..c1ad0aea23b9 100644
--- a/drivers/staging/wlan-ng/prism2mgmt.c
+++ b/drivers/staging/wlan-ng/prism2mgmt.c
@@ -169,7 +169,7 @@ int prism2mgmt_scan(wlandevice_t *wlandev, void *msgp)
hw->ident_sta_fw.variant) >
HFA384x_FIRMWARE_VERSION(1, 5, 0)) {
if (msg->scantype.data != P80211ENUM_scantype_active)
- word = cpu_to_le16(msg->maxchanneltime.data);
+ word = msg->maxchanneltime.data;
else
word = 0;
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 2e35db7f4aac..c15af2fcf2ba 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -276,12 +276,11 @@ static int fd_do_rw(struct se_cmd *cmd, struct file *fd,
else
ret = vfs_iter_read(fd, &iter, &pos);
- kfree(bvec);
-
if (is_write) {
if (ret < 0 || ret != data_length) {
pr_err("%s() write returned %d\n", __func__, ret);
- return (ret < 0 ? ret : -EINVAL);
+ if (ret >= 0)
+ ret = -EINVAL;
}
} else {
/*
@@ -294,17 +293,29 @@ static int fd_do_rw(struct se_cmd *cmd, struct file *fd,
pr_err("%s() returned %d, expecting %u for "
"S_ISBLK\n", __func__, ret,
data_length);
- return (ret < 0 ? ret : -EINVAL);
+ if (ret >= 0)
+ ret = -EINVAL;
}
} else {
if (ret < 0) {
pr_err("%s() returned %d for non S_ISBLK\n",
__func__, ret);
- return ret;
+ } else if (ret != data_length) {
+ /*
+ * Short read case:
+ * Probably some one truncate file under us.
+ * We must explicitly zero sg-pages to prevent
+ * expose uninizialized pages to userspace.
+ */
+ if (ret < data_length)
+ ret += iov_iter_zero(data_length - ret, &iter);
+ else
+ ret = -EINVAL;
}
}
}
- return 1;
+ kfree(bvec);
+ return ret;
}
static sense_reason_t
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index a7d30e894cab..c43c942e1f87 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -900,7 +900,7 @@ static int tcmu_configure_device(struct se_device *dev)
info->version = __stringify(TCMU_MAILBOX_VERSION);
info->mem[0].name = "tcm-user command & data buffer";
- info->mem[0].addr = (phys_addr_t) udev->mb_addr;
+ info->mem[0].addr = (phys_addr_t)(uintptr_t)udev->mb_addr;
info->mem[0].size = TCMU_RING_SIZE;
info->mem[0].memtype = UIO_MEM_VIRTUAL;
diff --git a/drivers/tee/optee/Makefile b/drivers/tee/optee/Makefile
index 92fe5789bcce..220cf4298f0d 100644
--- a/drivers/tee/optee/Makefile
+++ b/drivers/tee/optee/Makefile
@@ -3,3 +3,4 @@ optee-objs += core.o
optee-objs += call.o
optee-objs += rpc.o
optee-objs += supp.o
+optee-objs += shm_pool.o
diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c
index f7b7b404c990..a5afbe6dee68 100644
--- a/drivers/tee/optee/call.c
+++ b/drivers/tee/optee/call.c
@@ -15,6 +15,7 @@
#include <linux/device.h>
#include <linux/err.h>
#include <linux/errno.h>
+#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/tee_drv.h>
#include <linux/types.h>
@@ -135,6 +136,7 @@ u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg)
struct optee *optee = tee_get_drvdata(ctx->teedev);
struct optee_call_waiter w;
struct optee_rpc_param param = { };
+ struct optee_call_ctx call_ctx = { };
u32 ret;
param.a0 = OPTEE_SMC_CALL_WITH_ARG;
@@ -159,13 +161,14 @@ u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg)
param.a1 = res.a1;
param.a2 = res.a2;
param.a3 = res.a3;
- optee_handle_rpc(ctx, &param);
+ optee_handle_rpc(ctx, &param, &call_ctx);
} else {
ret = res.a0;
break;
}
}
+ optee_rpc_finalize_call(&call_ctx);
/*
* We're done with our thread in secure world, if there's any
* thread waiters wake up one.
@@ -442,3 +445,218 @@ void optee_disable_shm_cache(struct optee *optee)
}
optee_cq_wait_final(&optee->call_queue, &w);
}
+
+#define PAGELIST_ENTRIES_PER_PAGE \
+ ((OPTEE_MSG_NONCONTIG_PAGE_SIZE / sizeof(u64)) - 1)
+
+/**
+ * optee_fill_pages_list() - write list of user pages to given shared
+ * buffer.
+ *
+ * @dst: page-aligned buffer where list of pages will be stored
+ * @pages: array of pages that represents shared buffer
+ * @num_pages: number of entries in @pages
+ * @page_offset: offset of user buffer from page start
+ *
+ * @dst should be big enough to hold list of user page addresses and
+ * links to the next pages of buffer
+ */
+void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages,
+ size_t page_offset)
+{
+ int n = 0;
+ phys_addr_t optee_page;
+ /*
+ * Refer to OPTEE_MSG_ATTR_NONCONTIG description in optee_msg.h
+ * for details.
+ */
+ struct {
+ u64 pages_list[PAGELIST_ENTRIES_PER_PAGE];
+ u64 next_page_data;
+ } *pages_data;
+
+ /*
+ * Currently OP-TEE uses 4k page size and it does not looks
+ * like this will change in the future. On other hand, there are
+ * no know ARM architectures with page size < 4k.
+ * Thus the next built assert looks redundant. But the following
+ * code heavily relies on this assumption, so it is better be
+ * safe than sorry.
+ */
+ BUILD_BUG_ON(PAGE_SIZE < OPTEE_MSG_NONCONTIG_PAGE_SIZE);
+
+ pages_data = (void *)dst;
+ /*
+ * If linux page is bigger than 4k, and user buffer offset is
+ * larger than 4k/8k/12k/etc this will skip first 4k pages,
+ * because they bear no value data for OP-TEE.
+ */
+ optee_page = page_to_phys(*pages) +
+ round_down(page_offset, OPTEE_MSG_NONCONTIG_PAGE_SIZE);
+
+ while (true) {
+ pages_data->pages_list[n++] = optee_page;
+
+ if (n == PAGELIST_ENTRIES_PER_PAGE) {
+ pages_data->next_page_data =
+ virt_to_phys(pages_data + 1);
+ pages_data++;
+ n = 0;
+ }
+
+ optee_page += OPTEE_MSG_NONCONTIG_PAGE_SIZE;
+ if (!(optee_page & ~PAGE_MASK)) {
+ if (!--num_pages)
+ break;
+ pages++;
+ optee_page = page_to_phys(*pages);
+ }
+ }
+}
+
+/*
+ * The final entry in each pagelist page is a pointer to the next
+ * pagelist page.
+ */
+static size_t get_pages_list_size(size_t num_entries)
+{
+ int pages = DIV_ROUND_UP(num_entries, PAGELIST_ENTRIES_PER_PAGE);
+
+ return pages * OPTEE_MSG_NONCONTIG_PAGE_SIZE;
+}
+
+u64 *optee_allocate_pages_list(size_t num_entries)
+{
+ return alloc_pages_exact(get_pages_list_size(num_entries), GFP_KERNEL);
+}
+
+void optee_free_pages_list(void *list, size_t num_entries)
+{
+ free_pages_exact(list, get_pages_list_size(num_entries));
+}
+
+static bool is_normal_memory(pgprot_t p)
+{
+#if defined(CONFIG_ARM)
+ return (pgprot_val(p) & L_PTE_MT_MASK) == L_PTE_MT_WRITEALLOC;
+#elif defined(CONFIG_ARM64)
+ return (pgprot_val(p) & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL);
+#else
+#error "Unuspported architecture"
+#endif
+}
+
+static int __check_mem_type(struct vm_area_struct *vma, unsigned long end)
+{
+ while (vma && is_normal_memory(vma->vm_page_prot)) {
+ if (vma->vm_end >= end)
+ return 0;
+ vma = vma->vm_next;
+ }
+
+ return -EINVAL;
+}
+
+static int check_mem_type(unsigned long start, size_t num_pages)
+{
+ struct mm_struct *mm = current->mm;
+ int rc;
+
+ down_read(&mm->mmap_sem);
+ rc = __check_mem_type(find_vma(mm, start),
+ start + num_pages * PAGE_SIZE);
+ up_read(&mm->mmap_sem);
+
+ return rc;
+}
+
+int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm,
+ struct page **pages, size_t num_pages,
+ unsigned long start)
+{
+ struct tee_shm *shm_arg = NULL;
+ struct optee_msg_arg *msg_arg;
+ u64 *pages_list;
+ phys_addr_t msg_parg;
+ int rc;
+
+ if (!num_pages)
+ return -EINVAL;
+
+ rc = check_mem_type(start, num_pages);
+ if (rc)
+ return rc;
+
+ pages_list = optee_allocate_pages_list(num_pages);
+ if (!pages_list)
+ return -ENOMEM;
+
+ shm_arg = get_msg_arg(ctx, 1, &msg_arg, &msg_parg);
+ if (IS_ERR(shm_arg)) {
+ rc = PTR_ERR(shm_arg);
+ goto out;
+ }
+
+ optee_fill_pages_list(pages_list, pages, num_pages,
+ tee_shm_get_page_offset(shm));
+
+ msg_arg->cmd = OPTEE_MSG_CMD_REGISTER_SHM;
+ msg_arg->params->attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
+ OPTEE_MSG_ATTR_NONCONTIG;
+ msg_arg->params->u.tmem.shm_ref = (unsigned long)shm;
+ msg_arg->params->u.tmem.size = tee_shm_get_size(shm);
+ /*
+ * In the least bits of msg_arg->params->u.tmem.buf_ptr we
+ * store buffer offset from 4k page, as described in OP-TEE ABI.
+ */
+ msg_arg->params->u.tmem.buf_ptr = virt_to_phys(pages_list) |
+ (tee_shm_get_page_offset(shm) & (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
+
+ if (optee_do_call_with_arg(ctx, msg_parg) ||
+ msg_arg->ret != TEEC_SUCCESS)
+ rc = -EINVAL;
+
+ tee_shm_free(shm_arg);
+out:
+ optee_free_pages_list(pages_list, num_pages);
+ return rc;
+}
+
+int optee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm)
+{
+ struct tee_shm *shm_arg;
+ struct optee_msg_arg *msg_arg;
+ phys_addr_t msg_parg;
+ int rc = 0;
+
+ shm_arg = get_msg_arg(ctx, 1, &msg_arg, &msg_parg);
+ if (IS_ERR(shm_arg))
+ return PTR_ERR(shm_arg);
+
+ msg_arg->cmd = OPTEE_MSG_CMD_UNREGISTER_SHM;
+
+ msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
+ msg_arg->params[0].u.rmem.shm_ref = (unsigned long)shm;
+
+ if (optee_do_call_with_arg(ctx, msg_parg) ||
+ msg_arg->ret != TEEC_SUCCESS)
+ rc = -EINVAL;
+ tee_shm_free(shm_arg);
+ return rc;
+}
+
+int optee_shm_register_supp(struct tee_context *ctx, struct tee_shm *shm,
+ struct page **pages, size_t num_pages,
+ unsigned long start)
+{
+ /*
+ * We don't want to register supplicant memory in OP-TEE.
+ * Instead information about it will be passed in RPC code.
+ */
+ return check_mem_type(start, num_pages);
+}
+
+int optee_shm_unregister_supp(struct tee_context *ctx, struct tee_shm *shm)
+{
+ return 0;
+}
diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
index edb6e4e9ef3a..e9843c53fe31 100644
--- a/drivers/tee/optee/core.c
+++ b/drivers/tee/optee/core.c
@@ -28,6 +28,7 @@
#include <linux/uaccess.h>
#include "optee_private.h"
#include "optee_smc.h"
+#include "shm_pool.h"
#define DRIVER_NAME "optee"
@@ -97,6 +98,25 @@ int optee_from_msg_param(struct tee_param *params, size_t num_params,
return rc;
}
break;
+ case OPTEE_MSG_ATTR_TYPE_RMEM_INPUT:
+ case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT:
+ case OPTEE_MSG_ATTR_TYPE_RMEM_INOUT:
+ p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT +
+ attr - OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
+ p->u.memref.size = mp->u.rmem.size;
+ shm = (struct tee_shm *)(unsigned long)
+ mp->u.rmem.shm_ref;
+
+ if (!shm) {
+ p->u.memref.shm_offs = 0;
+ p->u.memref.shm = NULL;
+ break;
+ }
+ p->u.memref.shm_offs = mp->u.rmem.offs;
+ p->u.memref.shm = shm;
+
+ break;
+
default:
return -EINVAL;
}
@@ -104,6 +124,46 @@ int optee_from_msg_param(struct tee_param *params, size_t num_params,
return 0;
}
+static int to_msg_param_tmp_mem(struct optee_msg_param *mp,
+ const struct tee_param *p)
+{
+ int rc;
+ phys_addr_t pa;
+
+ mp->attr = OPTEE_MSG_ATTR_TYPE_TMEM_INPUT + p->attr -
+ TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
+
+ mp->u.tmem.shm_ref = (unsigned long)p->u.memref.shm;
+ mp->u.tmem.size = p->u.memref.size;
+
+ if (!p->u.memref.shm) {
+ mp->u.tmem.buf_ptr = 0;
+ return 0;
+ }
+
+ rc = tee_shm_get_pa(p->u.memref.shm, p->u.memref.shm_offs, &pa);
+ if (rc)
+ return rc;
+
+ mp->u.tmem.buf_ptr = pa;
+ mp->attr |= OPTEE_MSG_ATTR_CACHE_PREDEFINED <<
+ OPTEE_MSG_ATTR_CACHE_SHIFT;
+
+ return 0;
+}
+
+static int to_msg_param_reg_mem(struct optee_msg_param *mp,
+ const struct tee_param *p)
+{
+ mp->attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT + p->attr -
+ TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
+
+ mp->u.rmem.shm_ref = (unsigned long)p->u.memref.shm;
+ mp->u.rmem.size = p->u.memref.size;
+ mp->u.rmem.offs = p->u.memref.shm_offs;
+ return 0;
+}
+
/**
* optee_to_msg_param() - convert from struct tee_params to OPTEE_MSG parameters
* @msg_params: OPTEE_MSG parameters
@@ -116,7 +176,6 @@ int optee_to_msg_param(struct optee_msg_param *msg_params, size_t num_params,
{
int rc;
size_t n;
- phys_addr_t pa;
for (n = 0; n < num_params; n++) {
const struct tee_param *p = params + n;
@@ -139,22 +198,12 @@ int optee_to_msg_param(struct optee_msg_param *msg_params, size_t num_params,
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
- mp->attr = OPTEE_MSG_ATTR_TYPE_TMEM_INPUT +
- p->attr -
- TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
- mp->u.tmem.shm_ref = (unsigned long)p->u.memref.shm;
- mp->u.tmem.size = p->u.memref.size;
- if (!p->u.memref.shm) {
- mp->u.tmem.buf_ptr = 0;
- break;
- }
- rc = tee_shm_get_pa(p->u.memref.shm,
- p->u.memref.shm_offs, &pa);
+ if (tee_shm_is_registered(p->u.memref.shm))
+ rc = to_msg_param_reg_mem(mp, p);
+ else
+ rc = to_msg_param_tmp_mem(mp, p);
if (rc)
return rc;
- mp->u.tmem.buf_ptr = pa;
- mp->attr |= OPTEE_MSG_ATTR_CACHE_PREDEFINED <<
- OPTEE_MSG_ATTR_CACHE_SHIFT;
break;
default:
return -EINVAL;
@@ -171,6 +220,10 @@ static void optee_get_version(struct tee_device *teedev,
.impl_caps = TEE_OPTEE_CAP_TZ,
.gen_caps = TEE_GEN_CAP_GP,
};
+ struct optee *optee = tee_get_drvdata(teedev);
+
+ if (optee->sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
+ v.gen_caps |= TEE_GEN_CAP_REG_MEM;
*vers = v;
}
@@ -187,12 +240,12 @@ static int optee_open(struct tee_context *ctx)
if (teedev == optee->supp_teedev) {
bool busy = true;
- mutex_lock(&optee->supp.ctx_mutex);
+ mutex_lock(&optee->supp.mutex);
if (!optee->supp.ctx) {
busy = false;
optee->supp.ctx = ctx;
}
- mutex_unlock(&optee->supp.ctx_mutex);
+ mutex_unlock(&optee->supp.mutex);
if (busy) {
kfree(ctxdata);
return -EBUSY;
@@ -252,11 +305,8 @@ static void optee_release(struct tee_context *ctx)
ctx->data = NULL;
- if (teedev == optee->supp_teedev) {
- mutex_lock(&optee->supp.ctx_mutex);
- optee->supp.ctx = NULL;
- mutex_unlock(&optee->supp.ctx_mutex);
- }
+ if (teedev == optee->supp_teedev)
+ optee_supp_release(&optee->supp);
}
static const struct tee_driver_ops optee_ops = {
@@ -267,6 +317,8 @@ static const struct tee_driver_ops optee_ops = {
.close_session = optee_close_session,
.invoke_func = optee_invoke_func,
.cancel_req = optee_cancel_req,
+ .shm_register = optee_shm_register,
+ .shm_unregister = optee_shm_unregister,
};
static const struct tee_desc optee_desc = {
@@ -281,6 +333,8 @@ static const struct tee_driver_ops optee_supp_ops = {
.release = optee_release,
.supp_recv = optee_supp_recv,
.supp_send = optee_supp_send,
+ .shm_register = optee_shm_register_supp,
+ .shm_unregister = optee_shm_unregister_supp,
};
static const struct tee_desc optee_supp_desc = {
@@ -345,21 +399,22 @@ static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn,
}
static struct tee_shm_pool *
-optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm)
+optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm,
+ u32 sec_caps)
{
union {
struct arm_smccc_res smccc;
struct optee_smc_get_shm_config_result result;
} res;
- struct tee_shm_pool *pool;
unsigned long vaddr;
phys_addr_t paddr;
size_t size;
phys_addr_t begin;
phys_addr_t end;
void *va;
- struct tee_shm_pool_mem_info priv_info;
- struct tee_shm_pool_mem_info dmabuf_info;
+ struct tee_shm_pool_mgr *priv_mgr;
+ struct tee_shm_pool_mgr *dmabuf_mgr;
+ void *rc;
invoke_fn(OPTEE_SMC_GET_SHM_CONFIG, 0, 0, 0, 0, 0, 0, 0, &res.smccc);
if (res.result.status != OPTEE_SMC_RETURN_OK) {
@@ -389,22 +444,49 @@ optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm)
}
vaddr = (unsigned long)va;
- priv_info.vaddr = vaddr;
- priv_info.paddr = paddr;
- priv_info.size = OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE;
- dmabuf_info.vaddr = vaddr + OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE;
- dmabuf_info.paddr = paddr + OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE;
- dmabuf_info.size = size - OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE;
-
- pool = tee_shm_pool_alloc_res_mem(&priv_info, &dmabuf_info);
- if (IS_ERR(pool)) {
- memunmap(va);
- goto out;
+ /*
+ * If OP-TEE can work with unregistered SHM, we will use own pool
+ * for private shm
+ */
+ if (sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM) {
+ rc = optee_shm_pool_alloc_pages();
+ if (IS_ERR(rc))
+ goto err_memunmap;
+ priv_mgr = rc;
+ } else {
+ const size_t sz = OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE;
+
+ rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, sz,
+ 3 /* 8 bytes aligned */);
+ if (IS_ERR(rc))
+ goto err_memunmap;
+ priv_mgr = rc;
+
+ vaddr += sz;
+ paddr += sz;
+ size -= sz;
}
+ rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, size, PAGE_SHIFT);
+ if (IS_ERR(rc))
+ goto err_free_priv_mgr;
+ dmabuf_mgr = rc;
+
+ rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
+ if (IS_ERR(rc))
+ goto err_free_dmabuf_mgr;
+
*memremaped_shm = va;
-out:
- return pool;
+
+ return rc;
+
+err_free_dmabuf_mgr:
+ tee_shm_pool_mgr_destroy(dmabuf_mgr);
+err_free_priv_mgr:
+ tee_shm_pool_mgr_destroy(priv_mgr);
+err_memunmap:
+ memunmap(va);
+ return rc;
}
/* Simple wrapper functions to be able to use a function pointer */
@@ -482,7 +564,7 @@ static struct optee *optee_probe(struct device_node *np)
if (!(sec_caps & OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM))
return ERR_PTR(-EINVAL);
- pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm);
+ pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm, sec_caps);
if (IS_ERR(pool))
return (void *)pool;
@@ -493,6 +575,7 @@ static struct optee *optee_probe(struct device_node *np)
}
optee->invoke_fn = invoke_fn;
+ optee->sec_caps = sec_caps;
teedev = tee_device_alloc(&optee_desc, NULL, pool, optee);
if (IS_ERR(teedev)) {
diff --git a/drivers/tee/optee/optee_msg.h b/drivers/tee/optee/optee_msg.h
index dd7a06ee0462..30504901be80 100644
--- a/drivers/tee/optee/optee_msg.h
+++ b/drivers/tee/optee/optee_msg.h
@@ -67,11 +67,32 @@
#define OPTEE_MSG_ATTR_META BIT(8)
/*
- * The temporary shared memory object is not physically contigous and this
- * temp memref is followed by another fragment until the last temp memref
- * that doesn't have this bit set.
+ * Pointer to a list of pages used to register user-defined SHM buffer.
+ * Used with OPTEE_MSG_ATTR_TYPE_TMEM_*.
+ * buf_ptr should point to the beginning of the buffer. Buffer will contain
+ * list of page addresses. OP-TEE core can reconstruct contiguous buffer from
+ * that page addresses list. Page addresses are stored as 64 bit values.
+ * Last entry on a page should point to the next page of buffer.
+ * Every entry in buffer should point to a 4k page beginning (12 least
+ * significant bits must be equal to zero).
+ *
+ * 12 least significant bints of optee_msg_param.u.tmem.buf_ptr should hold page
+ * offset of the user buffer.
+ *
+ * So, entries should be placed like members of this structure:
+ *
+ * struct page_data {
+ * uint64_t pages_array[OPTEE_MSG_NONCONTIG_PAGE_SIZE/sizeof(uint64_t) - 1];
+ * uint64_t next_page_data;
+ * };
+ *
+ * Structure is designed to exactly fit into the page size
+ * OPTEE_MSG_NONCONTIG_PAGE_SIZE which is a standard 4KB page.
+ *
+ * The size of 4KB is chosen because this is the smallest page size for ARM
+ * architectures. If REE uses larger pages, it should divide them to 4KB ones.
*/
-#define OPTEE_MSG_ATTR_FRAGMENT BIT(9)
+#define OPTEE_MSG_ATTR_NONCONTIG BIT(9)
/*
* Memory attributes for caching passed with temp memrefs. The actual value
@@ -94,6 +115,11 @@
#define OPTEE_MSG_LOGIN_APPLICATION_USER 0x00000005
#define OPTEE_MSG_LOGIN_APPLICATION_GROUP 0x00000006
+/*
+ * Page size used in non-contiguous buffer entries
+ */
+#define OPTEE_MSG_NONCONTIG_PAGE_SIZE 4096
+
/**
* struct optee_msg_param_tmem - temporary memory reference parameter
* @buf_ptr: Address of the buffer
@@ -145,8 +171,8 @@ struct optee_msg_param_value {
*
* @attr & OPTEE_MSG_ATTR_TYPE_MASK indicates if tmem, rmem or value is used in
* the union. OPTEE_MSG_ATTR_TYPE_VALUE_* indicates value,
- * OPTEE_MSG_ATTR_TYPE_TMEM_* indicates tmem and
- * OPTEE_MSG_ATTR_TYPE_RMEM_* indicates rmem.
+ * OPTEE_MSG_ATTR_TYPE_TMEM_* indicates @tmem and
+ * OPTEE_MSG_ATTR_TYPE_RMEM_* indicates @rmem,
* OPTEE_MSG_ATTR_TYPE_NONE indicates that none of the members are used.
*/
struct optee_msg_param {
diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h
index c374cd594314..35e79386c556 100644
--- a/drivers/tee/optee/optee_private.h
+++ b/drivers/tee/optee/optee_private.h
@@ -53,36 +53,24 @@ struct optee_wait_queue {
* @ctx the context of current connected supplicant.
* if !NULL the supplicant device is available for use,
* else busy
- * @ctx_mutex: held while accessing @ctx
- * @func: supplicant function id to call
- * @ret: call return value
- * @num_params: number of elements in @param
- * @param: parameters for @func
- * @req_posted: if true, a request has been posted to the supplicant
- * @supp_next_send: if true, next step is for supplicant to send response
- * @thrd_mutex: held by the thread doing a request to supplicant
- * @supp_mutex: held by supplicant while operating on this struct
- * @data_to_supp: supplicant is waiting on this for next request
- * @data_from_supp: requesting thread is waiting on this to get the result
+ * @mutex: held while accessing content of this struct
+ * @req_id: current request id if supplicant is doing synchronous
+ * communication, else -1
+ * @reqs: queued request not yet retrieved by supplicant
+ * @idr: IDR holding all requests currently being processed
+ * by supplicant
+ * @reqs_c: completion used by supplicant when waiting for a
+ * request to be queued.
*/
struct optee_supp {
+ /* Serializes access to this struct */
+ struct mutex mutex;
struct tee_context *ctx;
- /* Serializes access of ctx */
- struct mutex ctx_mutex;
-
- u32 func;
- u32 ret;
- size_t num_params;
- struct tee_param *param;
-
- bool req_posted;
- bool supp_next_send;
- /* Serializes access to this struct for requesting thread */
- struct mutex thrd_mutex;
- /* Serializes access to this struct for supplicant threads */
- struct mutex supp_mutex;
- struct completion data_to_supp;
- struct completion data_from_supp;
+
+ int req_id;
+ struct list_head reqs;
+ struct idr idr;
+ struct completion reqs_c;
};
/**
@@ -96,6 +84,8 @@ struct optee_supp {
* @supp: supplicant synchronization struct for RPC to supplicant
* @pool: shared memory pool
* @memremaped_shm virtual address of memory in shared memory pool
+ * @sec_caps: secure world capabilities defined by
+ * OPTEE_SMC_SEC_CAP_* in optee_smc.h
*/
struct optee {
struct tee_device *supp_teedev;
@@ -106,6 +96,7 @@ struct optee {
struct optee_supp supp;
struct tee_shm_pool *pool;
void *memremaped_shm;
+ u32 sec_caps;
};
struct optee_session {
@@ -130,7 +121,16 @@ struct optee_rpc_param {
u32 a7;
};
-void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param);
+/* Holds context that is preserved during one STD call */
+struct optee_call_ctx {
+ /* information about pages list used in last allocation */
+ void *pages_list;
+ size_t num_entries;
+};
+
+void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param,
+ struct optee_call_ctx *call_ctx);
+void optee_rpc_finalize_call(struct optee_call_ctx *call_ctx);
void optee_wait_queue_init(struct optee_wait_queue *wq);
void optee_wait_queue_exit(struct optee_wait_queue *wq);
@@ -142,6 +142,7 @@ int optee_supp_read(struct tee_context *ctx, void __user *buf, size_t len);
int optee_supp_write(struct tee_context *ctx, void __user *buf, size_t len);
void optee_supp_init(struct optee_supp *supp);
void optee_supp_uninit(struct optee_supp *supp);
+void optee_supp_release(struct optee_supp *supp);
int optee_supp_recv(struct tee_context *ctx, u32 *func, u32 *num_params,
struct tee_param *param);
@@ -160,11 +161,26 @@ int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session);
void optee_enable_shm_cache(struct optee *optee);
void optee_disable_shm_cache(struct optee *optee);
+int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm,
+ struct page **pages, size_t num_pages,
+ unsigned long start);
+int optee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm);
+
+int optee_shm_register_supp(struct tee_context *ctx, struct tee_shm *shm,
+ struct page **pages, size_t num_pages,
+ unsigned long start);
+int optee_shm_unregister_supp(struct tee_context *ctx, struct tee_shm *shm);
+
int optee_from_msg_param(struct tee_param *params, size_t num_params,
const struct optee_msg_param *msg_params);
int optee_to_msg_param(struct optee_msg_param *msg_params, size_t num_params,
const struct tee_param *params);
+u64 *optee_allocate_pages_list(size_t num_entries);
+void optee_free_pages_list(void *array, size_t num_entries);
+void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages,
+ size_t page_offset);
+
/*
* Small helpers
*/
diff --git a/drivers/tee/optee/optee_smc.h b/drivers/tee/optee/optee_smc.h
index 069c8e1429de..7cd327243ada 100644
--- a/drivers/tee/optee/optee_smc.h
+++ b/drivers/tee/optee/optee_smc.h
@@ -222,6 +222,13 @@ struct optee_smc_get_shm_config_result {
#define OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM BIT(0)
/* Secure world can communicate via previously unregistered shared memory */
#define OPTEE_SMC_SEC_CAP_UNREGISTERED_SHM BIT(1)
+
+/*
+ * Secure world supports commands "register/unregister shared memory",
+ * secure world accepts command buffers located in any parts of non-secure RAM
+ */
+#define OPTEE_SMC_SEC_CAP_DYNAMIC_SHM BIT(2)
+
#define OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES 9
#define OPTEE_SMC_EXCHANGE_CAPABILITIES \
OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES)
diff --git a/drivers/tee/optee/rpc.c b/drivers/tee/optee/rpc.c
index cef417f4f4d2..41aea12e2bcc 100644
--- a/drivers/tee/optee/rpc.c
+++ b/drivers/tee/optee/rpc.c
@@ -192,15 +192,16 @@ static struct tee_shm *cmd_alloc_suppl(struct tee_context *ctx, size_t sz)
if (ret)
return ERR_PTR(-ENOMEM);
- mutex_lock(&optee->supp.ctx_mutex);
+ mutex_lock(&optee->supp.mutex);
/* Increases count as secure world doesn't have a reference */
shm = tee_shm_get_from_id(optee->supp.ctx, param.u.value.c);
- mutex_unlock(&optee->supp.ctx_mutex);
+ mutex_unlock(&optee->supp.mutex);
return shm;
}
static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
- struct optee_msg_arg *arg)
+ struct optee_msg_arg *arg,
+ struct optee_call_ctx *call_ctx)
{
phys_addr_t pa;
struct tee_shm *shm;
@@ -245,10 +246,49 @@ static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
goto bad;
}
- arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT;
- arg->params[0].u.tmem.buf_ptr = pa;
- arg->params[0].u.tmem.size = sz;
- arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
+ sz = tee_shm_get_size(shm);
+
+ if (tee_shm_is_registered(shm)) {
+ struct page **pages;
+ u64 *pages_list;
+ size_t page_num;
+
+ pages = tee_shm_get_pages(shm, &page_num);
+ if (!pages || !page_num) {
+ arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
+ goto bad;
+ }
+
+ pages_list = optee_allocate_pages_list(page_num);
+ if (!pages_list) {
+ arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
+ goto bad;
+ }
+
+ call_ctx->pages_list = pages_list;
+ call_ctx->num_entries = page_num;
+
+ arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
+ OPTEE_MSG_ATTR_NONCONTIG;
+ /*
+ * In the least bits of u.tmem.buf_ptr we store buffer offset
+ * from 4k page, as described in OP-TEE ABI.
+ */
+ arg->params[0].u.tmem.buf_ptr = virt_to_phys(pages_list) |
+ (tee_shm_get_page_offset(shm) &
+ (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
+ arg->params[0].u.tmem.size = tee_shm_get_size(shm);
+ arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
+
+ optee_fill_pages_list(pages_list, pages, page_num,
+ tee_shm_get_page_offset(shm));
+ } else {
+ arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT;
+ arg->params[0].u.tmem.buf_ptr = pa;
+ arg->params[0].u.tmem.size = sz;
+ arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
+ }
+
arg->ret = TEEC_SUCCESS;
return;
bad:
@@ -307,8 +347,24 @@ static void handle_rpc_func_cmd_shm_free(struct tee_context *ctx,
arg->ret = TEEC_SUCCESS;
}
+static void free_pages_list(struct optee_call_ctx *call_ctx)
+{
+ if (call_ctx->pages_list) {
+ optee_free_pages_list(call_ctx->pages_list,
+ call_ctx->num_entries);
+ call_ctx->pages_list = NULL;
+ call_ctx->num_entries = 0;
+ }
+}
+
+void optee_rpc_finalize_call(struct optee_call_ctx *call_ctx)
+{
+ free_pages_list(call_ctx);
+}
+
static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee,
- struct tee_shm *shm)
+ struct tee_shm *shm,
+ struct optee_call_ctx *call_ctx)
{
struct optee_msg_arg *arg;
@@ -329,7 +385,8 @@ static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee,
handle_rpc_func_cmd_wait(arg);
break;
case OPTEE_MSG_RPC_CMD_SHM_ALLOC:
- handle_rpc_func_cmd_shm_alloc(ctx, arg);
+ free_pages_list(call_ctx);
+ handle_rpc_func_cmd_shm_alloc(ctx, arg, call_ctx);
break;
case OPTEE_MSG_RPC_CMD_SHM_FREE:
handle_rpc_func_cmd_shm_free(ctx, arg);
@@ -343,10 +400,12 @@ static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee,
* optee_handle_rpc() - handle RPC from secure world
* @ctx: context doing the RPC
* @param: value of registers for the RPC
+ * @call_ctx: call context. Preserved during one OP-TEE invocation
*
* Result of RPC is written back into @param.
*/
-void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param)
+void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param,
+ struct optee_call_ctx *call_ctx)
{
struct tee_device *teedev = ctx->teedev;
struct optee *optee = tee_get_drvdata(teedev);
@@ -381,7 +440,7 @@ void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param)
break;
case OPTEE_SMC_RPC_FUNC_CMD:
shm = reg_pair_to_ptr(param->a1, param->a2);
- handle_rpc_func_cmd(ctx, optee, shm);
+ handle_rpc_func_cmd(ctx, optee, shm, call_ctx);
break;
default:
pr_warn("Unknown RPC func 0x%x\n",
diff --git a/drivers/tee/optee/shm_pool.c b/drivers/tee/optee/shm_pool.c
new file mode 100644
index 000000000000..49397813fff1
--- /dev/null
+++ b/drivers/tee/optee/shm_pool.c
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * Copyright (c) 2017, EPAM Systems
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/device.h>
+#include <linux/dma-buf.h>
+#include <linux/genalloc.h>
+#include <linux/slab.h>
+#include <linux/tee_drv.h>
+#include "optee_private.h"
+#include "optee_smc.h"
+#include "shm_pool.h"
+
+static int pool_op_alloc(struct tee_shm_pool_mgr *poolm,
+ struct tee_shm *shm, size_t size)
+{
+ unsigned int order = get_order(size);
+ struct page *page;
+
+ page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
+ if (!page)
+ return -ENOMEM;
+
+ shm->kaddr = page_address(page);
+ shm->paddr = page_to_phys(page);
+ shm->size = PAGE_SIZE << order;
+
+ return 0;
+}
+
+static void pool_op_free(struct tee_shm_pool_mgr *poolm,
+ struct tee_shm *shm)
+{
+ free_pages((unsigned long)shm->kaddr, get_order(shm->size));
+ shm->kaddr = NULL;
+}
+
+static void pool_op_destroy_poolmgr(struct tee_shm_pool_mgr *poolm)
+{
+ kfree(poolm);
+}
+
+static const struct tee_shm_pool_mgr_ops pool_ops = {
+ .alloc = pool_op_alloc,
+ .free = pool_op_free,
+ .destroy_poolmgr = pool_op_destroy_poolmgr,
+};
+
+/**
+ * optee_shm_pool_alloc_pages() - create page-based allocator pool
+ *
+ * This pool is used when OP-TEE supports dymanic SHM. In this case
+ * command buffers and such are allocated from kernel's own memory.
+ */
+struct tee_shm_pool_mgr *optee_shm_pool_alloc_pages(void)
+{
+ struct tee_shm_pool_mgr *mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
+
+ if (!mgr)
+ return ERR_PTR(-ENOMEM);
+
+ mgr->ops = &pool_ops;
+
+ return mgr;
+}
diff --git a/drivers/tee/optee/shm_pool.h b/drivers/tee/optee/shm_pool.h
new file mode 100644
index 000000000000..4e753c3bf7ec
--- /dev/null
+++ b/drivers/tee/optee/shm_pool.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * Copyright (c) 2016, EPAM Systems
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef SHM_POOL_H
+#define SHM_POOL_H
+
+#include <linux/tee_drv.h>
+
+struct tee_shm_pool_mgr *optee_shm_pool_alloc_pages(void);
+
+#endif
diff --git a/drivers/tee/optee/supp.c b/drivers/tee/optee/supp.c
index b4ea0678a436..df35fc01fd3e 100644
--- a/drivers/tee/optee/supp.c
+++ b/drivers/tee/optee/supp.c
@@ -16,21 +16,61 @@
#include <linux/uaccess.h>
#include "optee_private.h"
+struct optee_supp_req {
+ struct list_head link;
+
+ bool busy;
+ u32 func;
+ u32 ret;
+ size_t num_params;
+ struct tee_param *param;
+
+ struct completion c;
+};
+
void optee_supp_init(struct optee_supp *supp)
{
memset(supp, 0, sizeof(*supp));
- mutex_init(&supp->ctx_mutex);
- mutex_init(&supp->thrd_mutex);
- mutex_init(&supp->supp_mutex);
- init_completion(&supp->data_to_supp);
- init_completion(&supp->data_from_supp);
+ mutex_init(&supp->mutex);
+ init_completion(&supp->reqs_c);
+ idr_init(&supp->idr);
+ INIT_LIST_HEAD(&supp->reqs);
+ supp->req_id = -1;
}
void optee_supp_uninit(struct optee_supp *supp)
{
- mutex_destroy(&supp->ctx_mutex);
- mutex_destroy(&supp->thrd_mutex);
- mutex_destroy(&supp->supp_mutex);
+ mutex_destroy(&supp->mutex);
+ idr_destroy(&supp->idr);
+}
+
+void optee_supp_release(struct optee_supp *supp)
+{
+ int id;
+ struct optee_supp_req *req;
+ struct optee_supp_req *req_tmp;
+
+ mutex_lock(&supp->mutex);
+
+ /* Abort all request retrieved by supplicant */
+ idr_for_each_entry(&supp->idr, req, id) {
+ req->busy = false;
+ idr_remove(&supp->idr, id);
+ req->ret = TEEC_ERROR_COMMUNICATION;
+ complete(&req->c);
+ }
+
+ /* Abort all queued requests */
+ list_for_each_entry_safe(req, req_tmp, &supp->reqs, link) {
+ list_del(&req->link);
+ req->ret = TEEC_ERROR_COMMUNICATION;
+ complete(&req->c);
+ }
+
+ supp->ctx = NULL;
+ supp->req_id = -1;
+
+ mutex_unlock(&supp->mutex);
}
/**
@@ -44,53 +84,42 @@ void optee_supp_uninit(struct optee_supp *supp)
*/
u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
struct tee_param *param)
+
{
- bool interruptable;
struct optee *optee = tee_get_drvdata(ctx->teedev);
struct optee_supp *supp = &optee->supp;
+ struct optee_supp_req *req = kzalloc(sizeof(*req), GFP_KERNEL);
+ bool interruptable;
u32 ret;
- /*
- * Other threads blocks here until we've copied our answer from
- * supplicant.
- */
- while (mutex_lock_interruptible(&supp->thrd_mutex)) {
- /* See comment below on when the RPC can be interrupted. */
- mutex_lock(&supp->ctx_mutex);
- interruptable = !supp->ctx;
- mutex_unlock(&supp->ctx_mutex);
- if (interruptable)
- return TEEC_ERROR_COMMUNICATION;
- }
+ if (!req)
+ return TEEC_ERROR_OUT_OF_MEMORY;
- /*
- * We have exclusive access now since the supplicant at this
- * point is either doing a
- * wait_for_completion_interruptible(&supp->data_to_supp) or is in
- * userspace still about to do the ioctl() to enter
- * optee_supp_recv() below.
- */
+ init_completion(&req->c);
+ req->func = func;
+ req->num_params = num_params;
+ req->param = param;
- supp->func = func;
- supp->num_params = num_params;
- supp->param = param;
- supp->req_posted = true;
+ /* Insert the request in the request list */
+ mutex_lock(&supp->mutex);
+ list_add_tail(&req->link, &supp->reqs);
+ mutex_unlock(&supp->mutex);
- /* Let supplicant get the data */
- complete(&supp->data_to_supp);
+ /* Tell an eventual waiter there's a new request */
+ complete(&supp->reqs_c);
/*
* Wait for supplicant to process and return result, once we've
- * returned from wait_for_completion(data_from_supp) we have
+ * returned from wait_for_completion(&req->c) successfully we have
* exclusive access again.
*/
- while (wait_for_completion_interruptible(&supp->data_from_supp)) {
- mutex_lock(&supp->ctx_mutex);
+ while (wait_for_completion_interruptible(&req->c)) {
+ mutex_lock(&supp->mutex);
interruptable = !supp->ctx;
if (interruptable) {
/*
* There's no supplicant available and since the
- * supp->ctx_mutex currently is held none can
+ * supp->mutex currently is held none can
* become available until the mutex released
* again.
*
@@ -101,24 +130,91 @@ u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
* will serve all requests in a timely manner and
* interrupting then wouldn't make sense.
*/
- supp->ret = TEEC_ERROR_COMMUNICATION;
- init_completion(&supp->data_to_supp);
+ interruptable = !req->busy;
+ if (!req->busy)
+ list_del(&req->link);
}
- mutex_unlock(&supp->ctx_mutex);
- if (interruptable)
+ mutex_unlock(&supp->mutex);
+
+ if (interruptable) {
+ req->ret = TEEC_ERROR_COMMUNICATION;
break;
+ }
}
- ret = supp->ret;
- supp->param = NULL;
- supp->req_posted = false;
-
- /* We're done, let someone else talk to the supplicant now. */
- mutex_unlock(&supp->thrd_mutex);
+ ret = req->ret;
+ kfree(req);
return ret;
}
+static struct optee_supp_req *supp_pop_entry(struct optee_supp *supp,
+ int num_params, int *id)
+{
+ struct optee_supp_req *req;
+
+ if (supp->req_id != -1) {
+ /*
+ * Supplicant should not mix synchronous and asnynchronous
+ * requests.
+ */
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (list_empty(&supp->reqs))
+ return NULL;
+
+ req = list_first_entry(&supp->reqs, struct optee_supp_req, link);
+
+ if (num_params < req->num_params) {
+ /* Not enough room for parameters */
+ return ERR_PTR(-EINVAL);
+ }
+
+ *id = idr_alloc(&supp->idr, req, 1, 0, GFP_KERNEL);
+ if (*id < 0)
+ return ERR_PTR(-ENOMEM);
+
+ list_del(&req->link);
+ req->busy = true;
+
+ return req;
+}
+
+static int supp_check_recv_params(size_t num_params, struct tee_param *params,
+ size_t *num_meta)
+{
+ size_t n;
+
+ if (!num_params)
+ return -EINVAL;
+
+ /*
+ * If there's memrefs we need to decrease those as they where
+ * increased earlier and we'll even refuse to accept any below.
+ */
+ for (n = 0; n < num_params; n++)
+ if (tee_param_is_memref(params + n) && params[n].u.memref.shm)
+ tee_shm_put(params[n].u.memref.shm);
+
+ /*
+ * We only expect parameters as TEE_IOCTL_PARAM_ATTR_TYPE_NONE with
+ * or without the TEE_IOCTL_PARAM_ATTR_META bit set.
+ */
+ for (n = 0; n < num_params; n++)
+ if (params[n].attr &&
+ params[n].attr != TEE_IOCTL_PARAM_ATTR_META)
+ return -EINVAL;
+
+ /* At most we'll need one meta parameter so no need to check for more */
+ if (params->attr == TEE_IOCTL_PARAM_ATTR_META)
+ *num_meta = 1;
+ else
+ *num_meta = 0;
+
+ return 0;
+}
+
/**
* optee_supp_recv() - receive request for supplicant
* @ctx: context receiving the request
@@ -135,65 +231,99 @@ int optee_supp_recv(struct tee_context *ctx, u32 *func, u32 *num_params,
struct tee_device *teedev = ctx->teedev;
struct optee *optee = tee_get_drvdata(teedev);
struct optee_supp *supp = &optee->supp;
+ struct optee_supp_req *req = NULL;
+ int id;
+ size_t num_meta;
int rc;
- /*
- * In case two threads in one supplicant is calling this function
- * simultaneously we need to protect the data with a mutex which
- * we'll release before returning.
- */
- mutex_lock(&supp->supp_mutex);
+ rc = supp_check_recv_params(*num_params, param, &num_meta);
+ if (rc)
+ return rc;
+
+ while (true) {
+ mutex_lock(&supp->mutex);
+ req = supp_pop_entry(supp, *num_params - num_meta, &id);
+ mutex_unlock(&supp->mutex);
+
+ if (req) {
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+ break;
+ }
- if (supp->supp_next_send) {
/*
- * optee_supp_recv() has been called again without
- * a optee_supp_send() in between. Supplicant has
- * probably been restarted before it was able to
- * write back last result. Abort last request and
- * wait for a new.
+ * If we didn't get a request we'll block in
+ * wait_for_completion() to avoid needless spinning.
+ *
+ * This is where supplicant will be hanging most of
+ * the time, let's make this interruptable so we
+ * can easily restart supplicant if needed.
*/
- if (supp->req_posted) {
- supp->ret = TEEC_ERROR_COMMUNICATION;
- supp->supp_next_send = false;
- complete(&supp->data_from_supp);
- }
+ if (wait_for_completion_interruptible(&supp->reqs_c))
+ return -ERESTARTSYS;
}
- /*
- * This is where supplicant will be hanging most of the
- * time, let's make this interruptable so we can easily
- * restart supplicant if needed.
- */
- if (wait_for_completion_interruptible(&supp->data_to_supp)) {
- rc = -ERESTARTSYS;
- goto out;
+ if (num_meta) {
+ /*
+ * tee-supplicant support meta parameters -> requsts can be
+ * processed asynchronously.
+ */
+ param->attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT |
+ TEE_IOCTL_PARAM_ATTR_META;
+ param->u.value.a = id;
+ param->u.value.b = 0;
+ param->u.value.c = 0;
+ } else {
+ mutex_lock(&supp->mutex);
+ supp->req_id = id;
+ mutex_unlock(&supp->mutex);
}
- /* We have exlusive access to the data */
+ *func = req->func;
+ *num_params = req->num_params + num_meta;
+ memcpy(param + num_meta, req->param,
+ sizeof(struct tee_param) * req->num_params);
- if (*num_params < supp->num_params) {
- /*
- * Not enough room for parameters, tell supplicant
- * it failed and abort last request.
- */
- supp->ret = TEEC_ERROR_COMMUNICATION;
- rc = -EINVAL;
- complete(&supp->data_from_supp);
- goto out;
+ return 0;
+}
+
+static struct optee_supp_req *supp_pop_req(struct optee_supp *supp,
+ size_t num_params,
+ struct tee_param *param,
+ size_t *num_meta)
+{
+ struct optee_supp_req *req;
+ int id;
+ size_t nm;
+ const u32 attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT |
+ TEE_IOCTL_PARAM_ATTR_META;
+
+ if (!num_params)
+ return ERR_PTR(-EINVAL);
+
+ if (supp->req_id == -1) {
+ if (param->attr != attr)
+ return ERR_PTR(-EINVAL);
+ id = param->u.value.a;
+ nm = 1;
+ } else {
+ id = supp->req_id;
+ nm = 0;
}
- *func = supp->func;
- *num_params = supp->num_params;
- memcpy(param, supp->param,
- sizeof(struct tee_param) * supp->num_params);
+ req = idr_find(&supp->idr, id);
+ if (!req)
+ return ERR_PTR(-ENOENT);
- /* Allow optee_supp_send() below to do its work */
- supp->supp_next_send = true;
+ if ((num_params - nm) != req->num_params)
+ return ERR_PTR(-EINVAL);
- rc = 0;
-out:
- mutex_unlock(&supp->supp_mutex);
- return rc;
+ req->busy = false;
+ idr_remove(&supp->idr, id);
+ supp->req_id = -1;
+ *num_meta = nm;
+
+ return req;
}
/**
@@ -211,63 +341,42 @@ int optee_supp_send(struct tee_context *ctx, u32 ret, u32 num_params,
struct tee_device *teedev = ctx->teedev;
struct optee *optee = tee_get_drvdata(teedev);
struct optee_supp *supp = &optee->supp;
+ struct optee_supp_req *req;
size_t n;
- int rc = 0;
+ size_t num_meta;
- /*
- * We still have exclusive access to the data since that's how we
- * left it when returning from optee_supp_read().
- */
-
- /* See comment on mutex in optee_supp_read() above */
- mutex_lock(&supp->supp_mutex);
-
- if (!supp->supp_next_send) {
- /*
- * Something strange is going on, supplicant shouldn't
- * enter optee_supp_send() in this state
- */
- rc = -ENOENT;
- goto out;
- }
+ mutex_lock(&supp->mutex);
+ req = supp_pop_req(supp, num_params, param, &num_meta);
+ mutex_unlock(&supp->mutex);
- if (num_params != supp->num_params) {
- /*
- * Something is wrong, let supplicant restart. Next call to
- * optee_supp_recv() will give an error to the requesting
- * thread and release it.
- */
- rc = -EINVAL;
- goto out;
+ if (IS_ERR(req)) {
+ /* Something is wrong, let supplicant restart. */
+ return PTR_ERR(req);
}
/* Update out and in/out parameters */
- for (n = 0; n < num_params; n++) {
- struct tee_param *p = supp->param + n;
+ for (n = 0; n < req->num_params; n++) {
+ struct tee_param *p = req->param + n;
- switch (p->attr) {
+ switch (p->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
- p->u.value.a = param[n].u.value.a;
- p->u.value.b = param[n].u.value.b;
- p->u.value.c = param[n].u.value.c;
+ p->u.value.a = param[n + num_meta].u.value.a;
+ p->u.value.b = param[n + num_meta].u.value.b;
+ p->u.value.c = param[n + num_meta].u.value.c;
break;
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
- p->u.memref.size = param[n].u.memref.size;
+ p->u.memref.size = param[n + num_meta].u.memref.size;
break;
default:
break;
}
}
- supp->ret = ret;
-
- /* Allow optee_supp_recv() above to do its work */
- supp->supp_next_send = false;
+ req->ret = ret;
/* Let the requesting thread continue */
- complete(&supp->data_from_supp);
-out:
- mutex_unlock(&supp->supp_mutex);
- return rc;
+ complete(&req->c);
+
+ return 0;
}
diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c
index 58a5009eacc3..6c4b200a4560 100644
--- a/drivers/tee/tee_core.c
+++ b/drivers/tee/tee_core.c
@@ -54,6 +54,7 @@ static int tee_open(struct inode *inode, struct file *filp)
goto err;
}
+ kref_init(&ctx->refcount);
ctx->teedev = teedev;
INIT_LIST_HEAD(&ctx->list_shm);
filp->private_data = ctx;
@@ -68,19 +69,40 @@ err:
return rc;
}
-static int tee_release(struct inode *inode, struct file *filp)
+void teedev_ctx_get(struct tee_context *ctx)
{
- struct tee_context *ctx = filp->private_data;
- struct tee_device *teedev = ctx->teedev;
- struct tee_shm *shm;
+ if (ctx->releasing)
+ return;
+ kref_get(&ctx->refcount);
+}
+
+static void teedev_ctx_release(struct kref *ref)
+{
+ struct tee_context *ctx = container_of(ref, struct tee_context,
+ refcount);
+ ctx->releasing = true;
ctx->teedev->desc->ops->release(ctx);
- mutex_lock(&ctx->teedev->mutex);
- list_for_each_entry(shm, &ctx->list_shm, link)
- shm->ctx = NULL;
- mutex_unlock(&ctx->teedev->mutex);
kfree(ctx);
- tee_device_put(teedev);
+}
+
+void teedev_ctx_put(struct tee_context *ctx)
+{
+ if (ctx->releasing)
+ return;
+
+ kref_put(&ctx->refcount, teedev_ctx_release);
+}
+
+static void teedev_close_context(struct tee_context *ctx)
+{
+ tee_device_put(ctx->teedev);
+ teedev_ctx_put(ctx);
+}
+
+static int tee_release(struct inode *inode, struct file *filp)
+{
+ teedev_close_context(filp->private_data);
return 0;
}
@@ -114,8 +136,6 @@ static int tee_ioctl_shm_alloc(struct tee_context *ctx,
if (data.flags)
return -EINVAL;
- data.id = -1;
-
shm = tee_shm_alloc(ctx, data.size, TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
if (IS_ERR(shm))
return PTR_ERR(shm);
@@ -138,6 +158,43 @@ static int tee_ioctl_shm_alloc(struct tee_context *ctx,
return ret;
}
+static int
+tee_ioctl_shm_register(struct tee_context *ctx,
+ struct tee_ioctl_shm_register_data __user *udata)
+{
+ long ret;
+ struct tee_ioctl_shm_register_data data;
+ struct tee_shm *shm;
+
+ if (copy_from_user(&data, udata, sizeof(data)))
+ return -EFAULT;
+
+ /* Currently no input flags are supported */
+ if (data.flags)
+ return -EINVAL;
+
+ shm = tee_shm_register(ctx, data.addr, data.length,
+ TEE_SHM_DMA_BUF | TEE_SHM_USER_MAPPED);
+ if (IS_ERR(shm))
+ return PTR_ERR(shm);
+
+ data.id = shm->id;
+ data.flags = shm->flags;
+ data.length = shm->size;
+
+ if (copy_to_user(udata, &data, sizeof(data)))
+ ret = -EFAULT;
+ else
+ ret = tee_shm_get_fd(shm);
+ /*
+ * When user space closes the file descriptor the shared memory
+ * should be freed or if tee_shm_get_fd() failed then it will
+ * be freed immediately.
+ */
+ tee_shm_put(shm);
+ return ret;
+}
+
static int params_from_user(struct tee_context *ctx, struct tee_param *params,
size_t num_params,
struct tee_ioctl_param __user *uparams)
@@ -152,11 +209,11 @@ static int params_from_user(struct tee_context *ctx, struct tee_param *params,
return -EFAULT;
/* All unused attribute bits has to be zero */
- if (ip.attr & ~TEE_IOCTL_PARAM_ATTR_TYPE_MASK)
+ if (ip.attr & ~TEE_IOCTL_PARAM_ATTR_MASK)
return -EINVAL;
params[n].attr = ip.attr;
- switch (ip.attr) {
+ switch (ip.attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
case TEE_IOCTL_PARAM_ATTR_TYPE_NONE:
case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
break;
@@ -221,18 +278,6 @@ static int params_to_user(struct tee_ioctl_param __user *uparams,
return 0;
}
-static bool param_is_memref(struct tee_param *param)
-{
- switch (param->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
- case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
- case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
- case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
- return true;
- default:
- return false;
- }
-}
-
static int tee_ioctl_open_session(struct tee_context *ctx,
struct tee_ioctl_buf_data __user *ubuf)
{
@@ -296,7 +341,7 @@ out:
if (params) {
/* Decrease ref count for all valid shared memory pointers */
for (n = 0; n < arg.num_params; n++)
- if (param_is_memref(params + n) &&
+ if (tee_param_is_memref(params + n) &&
params[n].u.memref.shm)
tee_shm_put(params[n].u.memref.shm);
kfree(params);
@@ -358,7 +403,7 @@ out:
if (params) {
/* Decrease ref count for all valid shared memory pointers */
for (n = 0; n < arg.num_params; n++)
- if (param_is_memref(params + n) &&
+ if (tee_param_is_memref(params + n) &&
params[n].u.memref.shm)
tee_shm_put(params[n].u.memref.shm);
kfree(params);
@@ -406,8 +451,8 @@ static int params_to_supp(struct tee_context *ctx,
struct tee_ioctl_param ip;
struct tee_param *p = params + n;
- ip.attr = p->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK;
- switch (p->attr) {
+ ip.attr = p->attr;
+ switch (p->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
ip.a = p->u.value.a;
@@ -471,6 +516,10 @@ static int tee_ioctl_supp_recv(struct tee_context *ctx,
if (!params)
return -ENOMEM;
+ rc = params_from_user(ctx, params, num_params, uarg->params);
+ if (rc)
+ goto out;
+
rc = ctx->teedev->desc->ops->supp_recv(ctx, &func, &num_params, params);
if (rc)
goto out;
@@ -500,11 +549,11 @@ static int params_from_supp(struct tee_param *params, size_t num_params,
return -EFAULT;
/* All unused attribute bits has to be zero */
- if (ip.attr & ~TEE_IOCTL_PARAM_ATTR_TYPE_MASK)
+ if (ip.attr & ~TEE_IOCTL_PARAM_ATTR_MASK)
return -EINVAL;
p->attr = ip.attr;
- switch (ip.attr) {
+ switch (ip.attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
/* Only out and in/out values can be updated */
@@ -586,6 +635,8 @@ static long tee_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return tee_ioctl_version(ctx, uarg);
case TEE_IOC_SHM_ALLOC:
return tee_ioctl_shm_alloc(ctx, uarg);
+ case TEE_IOC_SHM_REGISTER:
+ return tee_ioctl_shm_register(ctx, uarg);
case TEE_IOC_OPEN_SESSION:
return tee_ioctl_open_session(ctx, uarg);
case TEE_IOC_INVOKE:
diff --git a/drivers/tee/tee_private.h b/drivers/tee/tee_private.h
index 21cb6be8bce9..85d99d621603 100644
--- a/drivers/tee/tee_private.h
+++ b/drivers/tee/tee_private.h
@@ -21,68 +21,15 @@
#include <linux/mutex.h>
#include <linux/types.h>
-struct tee_device;
-
-/**
- * struct tee_shm - shared memory object
- * @teedev: device used to allocate the object
- * @ctx: context using the object, if NULL the context is gone
- * @link link element
- * @paddr: physical address of the shared memory
- * @kaddr: virtual address of the shared memory
- * @size: size of shared memory
- * @dmabuf: dmabuf used to for exporting to user space
- * @flags: defined by TEE_SHM_* in tee_drv.h
- * @id: unique id of a shared memory object on this device
- */
-struct tee_shm {
- struct tee_device *teedev;
- struct tee_context *ctx;
- struct list_head link;
- phys_addr_t paddr;
- void *kaddr;
- size_t size;
- struct dma_buf *dmabuf;
- u32 flags;
- int id;
-};
-
-struct tee_shm_pool_mgr;
-
-/**
- * struct tee_shm_pool_mgr_ops - shared memory pool manager operations
- * @alloc: called when allocating shared memory
- * @free: called when freeing shared memory
- */
-struct tee_shm_pool_mgr_ops {
- int (*alloc)(struct tee_shm_pool_mgr *poolmgr, struct tee_shm *shm,
- size_t size);
- void (*free)(struct tee_shm_pool_mgr *poolmgr, struct tee_shm *shm);
-};
-
-/**
- * struct tee_shm_pool_mgr - shared memory manager
- * @ops: operations
- * @private_data: private data for the shared memory manager
- */
-struct tee_shm_pool_mgr {
- const struct tee_shm_pool_mgr_ops *ops;
- void *private_data;
-};
-
/**
* struct tee_shm_pool - shared memory pool
* @private_mgr: pool manager for shared memory only between kernel
* and secure world
* @dma_buf_mgr: pool manager for shared memory exported to user space
- * @destroy: called when destroying the pool
- * @private_data: private data for the pool
*/
struct tee_shm_pool {
- struct tee_shm_pool_mgr private_mgr;
- struct tee_shm_pool_mgr dma_buf_mgr;
- void (*destroy)(struct tee_shm_pool *pool);
- void *private_data;
+ struct tee_shm_pool_mgr *private_mgr;
+ struct tee_shm_pool_mgr *dma_buf_mgr;
};
#define TEE_DEVICE_FLAG_REGISTERED 0x1
@@ -126,4 +73,7 @@ int tee_shm_get_fd(struct tee_shm *shm);
bool tee_device_get(struct tee_device *teedev);
void tee_device_put(struct tee_device *teedev);
+void teedev_ctx_get(struct tee_context *ctx);
+void teedev_ctx_put(struct tee_context *ctx);
+
#endif /*TEE_PRIVATE_H*/
diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c
index 37207ae2de7b..ed2d71c3337d 100644
--- a/drivers/tee/tee_shm.c
+++ b/drivers/tee/tee_shm.c
@@ -23,7 +23,6 @@
static void tee_shm_release(struct tee_shm *shm)
{
struct tee_device *teedev = shm->teedev;
- struct tee_shm_pool_mgr *poolm;
mutex_lock(&teedev->mutex);
idr_remove(&teedev->idr, shm->id);
@@ -31,12 +30,32 @@ static void tee_shm_release(struct tee_shm *shm)
list_del(&shm->link);
mutex_unlock(&teedev->mutex);
- if (shm->flags & TEE_SHM_DMA_BUF)
- poolm = &teedev->pool->dma_buf_mgr;
- else
- poolm = &teedev->pool->private_mgr;
+ if (shm->flags & TEE_SHM_POOL) {
+ struct tee_shm_pool_mgr *poolm;
+
+ if (shm->flags & TEE_SHM_DMA_BUF)
+ poolm = teedev->pool->dma_buf_mgr;
+ else
+ poolm = teedev->pool->private_mgr;
+
+ poolm->ops->free(poolm, shm);
+ } else if (shm->flags & TEE_SHM_REGISTER) {
+ size_t n;
+ int rc = teedev->desc->ops->shm_unregister(shm->ctx, shm);
+
+ if (rc)
+ dev_err(teedev->dev.parent,
+ "unregister shm %p failed: %d", shm, rc);
+
+ for (n = 0; n < shm->num_pages; n++)
+ put_page(shm->pages[n]);
+
+ kfree(shm->pages);
+ }
+
+ if (shm->ctx)
+ teedev_ctx_put(shm->ctx);
- poolm->ops->free(poolm, shm);
kfree(shm);
tee_device_put(teedev);
@@ -76,6 +95,10 @@ static int tee_shm_op_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
struct tee_shm *shm = dmabuf->priv;
size_t size = vma->vm_end - vma->vm_start;
+ /* Refuse sharing shared memory provided by application */
+ if (shm->flags & TEE_SHM_REGISTER)
+ return -EINVAL;
+
return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT,
size, vma->vm_page_prot);
}
@@ -89,26 +112,20 @@ static const struct dma_buf_ops tee_shm_dma_buf_ops = {
.mmap = tee_shm_op_mmap,
};
-/**
- * tee_shm_alloc() - Allocate shared memory
- * @ctx: Context that allocates the shared memory
- * @size: Requested size of shared memory
- * @flags: Flags setting properties for the requested shared memory.
- *
- * Memory allocated as global shared memory is automatically freed when the
- * TEE file pointer is closed. The @flags field uses the bits defined by
- * TEE_SHM_* in <linux/tee_drv.h>. TEE_SHM_MAPPED must currently always be
- * set. If TEE_SHM_DMA_BUF global shared memory will be allocated and
- * associated with a dma-buf handle, else driver private memory.
- */
-struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
+static struct tee_shm *__tee_shm_alloc(struct tee_context *ctx,
+ struct tee_device *teedev,
+ size_t size, u32 flags)
{
- struct tee_device *teedev = ctx->teedev;
struct tee_shm_pool_mgr *poolm = NULL;
struct tee_shm *shm;
void *ret;
int rc;
+ if (ctx && ctx->teedev != teedev) {
+ dev_err(teedev->dev.parent, "ctx and teedev mismatch\n");
+ return ERR_PTR(-EINVAL);
+ }
+
if (!(flags & TEE_SHM_MAPPED)) {
dev_err(teedev->dev.parent,
"only mapped allocations supported\n");
@@ -135,13 +152,13 @@ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
goto err_dev_put;
}
- shm->flags = flags;
+ shm->flags = flags | TEE_SHM_POOL;
shm->teedev = teedev;
shm->ctx = ctx;
if (flags & TEE_SHM_DMA_BUF)
- poolm = &teedev->pool->dma_buf_mgr;
+ poolm = teedev->pool->dma_buf_mgr;
else
- poolm = &teedev->pool->private_mgr;
+ poolm = teedev->pool->private_mgr;
rc = poolm->ops->alloc(poolm, shm, size);
if (rc) {
@@ -171,9 +188,13 @@ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
goto err_rem;
}
}
- mutex_lock(&teedev->mutex);
- list_add_tail(&shm->link, &ctx->list_shm);
- mutex_unlock(&teedev->mutex);
+
+ if (ctx) {
+ teedev_ctx_get(ctx);
+ mutex_lock(&teedev->mutex);
+ list_add_tail(&shm->link, &ctx->list_shm);
+ mutex_unlock(&teedev->mutex);
+ }
return shm;
err_rem:
@@ -188,8 +209,145 @@ err_dev_put:
tee_device_put(teedev);
return ret;
}
+
+/**
+ * tee_shm_alloc() - Allocate shared memory
+ * @ctx: Context that allocates the shared memory
+ * @size: Requested size of shared memory
+ * @flags: Flags setting properties for the requested shared memory.
+ *
+ * Memory allocated as global shared memory is automatically freed when the
+ * TEE file pointer is closed. The @flags field uses the bits defined by
+ * TEE_SHM_* in <linux/tee_drv.h>. TEE_SHM_MAPPED must currently always be
+ * set. If TEE_SHM_DMA_BUF global shared memory will be allocated and
+ * associated with a dma-buf handle, else driver private memory.
+ */
+struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
+{
+ return __tee_shm_alloc(ctx, ctx->teedev, size, flags);
+}
EXPORT_SYMBOL_GPL(tee_shm_alloc);
+struct tee_shm *tee_shm_priv_alloc(struct tee_device *teedev, size_t size)
+{
+ return __tee_shm_alloc(NULL, teedev, size, TEE_SHM_MAPPED);
+}
+EXPORT_SYMBOL_GPL(tee_shm_priv_alloc);
+
+struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
+ size_t length, u32 flags)
+{
+ struct tee_device *teedev = ctx->teedev;
+ const u32 req_flags = TEE_SHM_DMA_BUF | TEE_SHM_USER_MAPPED;
+ struct tee_shm *shm;
+ void *ret;
+ int rc;
+ int num_pages;
+ unsigned long start;
+
+ if (flags != req_flags)
+ return ERR_PTR(-ENOTSUPP);
+
+ if (!tee_device_get(teedev))
+ return ERR_PTR(-EINVAL);
+
+ if (!teedev->desc->ops->shm_register ||
+ !teedev->desc->ops->shm_unregister) {
+ tee_device_put(teedev);
+ return ERR_PTR(-ENOTSUPP);
+ }
+
+ teedev_ctx_get(ctx);
+
+ shm = kzalloc(sizeof(*shm), GFP_KERNEL);
+ if (!shm) {
+ ret = ERR_PTR(-ENOMEM);
+ goto err;
+ }
+
+ shm->flags = flags | TEE_SHM_REGISTER;
+ shm->teedev = teedev;
+ shm->ctx = ctx;
+ shm->id = -1;
+ start = rounddown(addr, PAGE_SIZE);
+ shm->offset = addr - start;
+ shm->size = length;
+ num_pages = (roundup(addr + length, PAGE_SIZE) - start) / PAGE_SIZE;
+ shm->pages = kcalloc(num_pages, sizeof(*shm->pages), GFP_KERNEL);
+ if (!shm->pages) {
+ ret = ERR_PTR(-ENOMEM);
+ goto err;
+ }
+
+ rc = get_user_pages_fast(start, num_pages, 1, shm->pages);
+ if (rc > 0)
+ shm->num_pages = rc;
+ if (rc != num_pages) {
+ if (rc >= 0)
+ rc = -ENOMEM;
+ ret = ERR_PTR(rc);
+ goto err;
+ }
+
+ mutex_lock(&teedev->mutex);
+ shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL);
+ mutex_unlock(&teedev->mutex);
+
+ if (shm->id < 0) {
+ ret = ERR_PTR(shm->id);
+ goto err;
+ }
+
+ rc = teedev->desc->ops->shm_register(ctx, shm, shm->pages,
+ shm->num_pages, start);
+ if (rc) {
+ ret = ERR_PTR(rc);
+ goto err;
+ }
+
+ if (flags & TEE_SHM_DMA_BUF) {
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+
+ exp_info.ops = &tee_shm_dma_buf_ops;
+ exp_info.size = shm->size;
+ exp_info.flags = O_RDWR;
+ exp_info.priv = shm;
+
+ shm->dmabuf = dma_buf_export(&exp_info);
+ if (IS_ERR(shm->dmabuf)) {
+ ret = ERR_CAST(shm->dmabuf);
+ teedev->desc->ops->shm_unregister(ctx, shm);
+ goto err;
+ }
+ }
+
+ mutex_lock(&teedev->mutex);
+ list_add_tail(&shm->link, &ctx->list_shm);
+ mutex_unlock(&teedev->mutex);
+
+ return shm;
+err:
+ if (shm) {
+ size_t n;
+
+ if (shm->id >= 0) {
+ mutex_lock(&teedev->mutex);
+ idr_remove(&teedev->idr, shm->id);
+ mutex_unlock(&teedev->mutex);
+ }
+ if (shm->pages) {
+ for (n = 0; n < shm->num_pages; n++)
+ put_page(shm->pages[n]);
+ kfree(shm->pages);
+ }
+ }
+ kfree(shm);
+ teedev_ctx_put(ctx);
+ tee_device_put(teedev);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tee_shm_register);
+
/**
* tee_shm_get_fd() - Increase reference count and return file descriptor
* @shm: Shared memory handle
@@ -197,10 +355,9 @@ EXPORT_SYMBOL_GPL(tee_shm_alloc);
*/
int tee_shm_get_fd(struct tee_shm *shm)
{
- u32 req_flags = TEE_SHM_MAPPED | TEE_SHM_DMA_BUF;
int fd;
- if ((shm->flags & req_flags) != req_flags)
+ if (!(shm->flags & TEE_SHM_DMA_BUF))
return -EINVAL;
fd = dma_buf_fd(shm->dmabuf, O_CLOEXEC);
@@ -238,6 +395,8 @@ EXPORT_SYMBOL_GPL(tee_shm_free);
*/
int tee_shm_va2pa(struct tee_shm *shm, void *va, phys_addr_t *pa)
{
+ if (!(shm->flags & TEE_SHM_MAPPED))
+ return -EINVAL;
/* Check that we're in the range of the shm */
if ((char *)va < (char *)shm->kaddr)
return -EINVAL;
@@ -258,6 +417,8 @@ EXPORT_SYMBOL_GPL(tee_shm_va2pa);
*/
int tee_shm_pa2va(struct tee_shm *shm, phys_addr_t pa, void **va)
{
+ if (!(shm->flags & TEE_SHM_MAPPED))
+ return -EINVAL;
/* Check that we're in the range of the shm */
if (pa < shm->paddr)
return -EINVAL;
@@ -284,6 +445,8 @@ EXPORT_SYMBOL_GPL(tee_shm_pa2va);
*/
void *tee_shm_get_va(struct tee_shm *shm, size_t offs)
{
+ if (!(shm->flags & TEE_SHM_MAPPED))
+ return ERR_PTR(-EINVAL);
if (offs >= shm->size)
return ERR_PTR(-EINVAL);
return (char *)shm->kaddr + offs;
@@ -336,17 +499,6 @@ struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id)
EXPORT_SYMBOL_GPL(tee_shm_get_from_id);
/**
- * tee_shm_get_id() - Get id of a shared memory object
- * @shm: Shared memory handle
- * @returns id
- */
-int tee_shm_get_id(struct tee_shm *shm)
-{
- return shm->id;
-}
-EXPORT_SYMBOL_GPL(tee_shm_get_id);
-
-/**
* tee_shm_put() - Decrease reference count on a shared memory handle
* @shm: Shared memory handle
*/
diff --git a/drivers/tee/tee_shm_pool.c b/drivers/tee/tee_shm_pool.c
index fb4f8522a526..e6d4b9e4a864 100644
--- a/drivers/tee/tee_shm_pool.c
+++ b/drivers/tee/tee_shm_pool.c
@@ -44,49 +44,18 @@ static void pool_op_gen_free(struct tee_shm_pool_mgr *poolm,
shm->kaddr = NULL;
}
+static void pool_op_gen_destroy_poolmgr(struct tee_shm_pool_mgr *poolm)
+{
+ gen_pool_destroy(poolm->private_data);
+ kfree(poolm);
+}
+
static const struct tee_shm_pool_mgr_ops pool_ops_generic = {
.alloc = pool_op_gen_alloc,
.free = pool_op_gen_free,
+ .destroy_poolmgr = pool_op_gen_destroy_poolmgr,
};
-static void pool_res_mem_destroy(struct tee_shm_pool *pool)
-{
- gen_pool_destroy(pool->private_mgr.private_data);
- gen_pool_destroy(pool->dma_buf_mgr.private_data);
-}
-
-static int pool_res_mem_mgr_init(struct tee_shm_pool_mgr *mgr,
- struct tee_shm_pool_mem_info *info,
- int min_alloc_order)
-{
- size_t page_mask = PAGE_SIZE - 1;
- struct gen_pool *genpool = NULL;
- int rc;
-
- /*
- * Start and end must be page aligned
- */
- if ((info->vaddr & page_mask) || (info->paddr & page_mask) ||
- (info->size & page_mask))
- return -EINVAL;
-
- genpool = gen_pool_create(min_alloc_order, -1);
- if (!genpool)
- return -ENOMEM;
-
- gen_pool_set_algo(genpool, gen_pool_best_fit, NULL);
- rc = gen_pool_add_virt(genpool, info->vaddr, info->paddr, info->size,
- -1);
- if (rc) {
- gen_pool_destroy(genpool);
- return rc;
- }
-
- mgr->private_data = genpool;
- mgr->ops = &pool_ops_generic;
- return 0;
-}
-
/**
* tee_shm_pool_alloc_res_mem() - Create a shared memory pool from reserved
* memory range
@@ -104,42 +73,109 @@ struct tee_shm_pool *
tee_shm_pool_alloc_res_mem(struct tee_shm_pool_mem_info *priv_info,
struct tee_shm_pool_mem_info *dmabuf_info)
{
- struct tee_shm_pool *pool = NULL;
- int ret;
-
- pool = kzalloc(sizeof(*pool), GFP_KERNEL);
- if (!pool) {
- ret = -ENOMEM;
- goto err;
- }
+ struct tee_shm_pool_mgr *priv_mgr;
+ struct tee_shm_pool_mgr *dmabuf_mgr;
+ void *rc;
/*
* Create the pool for driver private shared memory
*/
- ret = pool_res_mem_mgr_init(&pool->private_mgr, priv_info,
- 3 /* 8 byte aligned */);
- if (ret)
- goto err;
+ rc = tee_shm_pool_mgr_alloc_res_mem(priv_info->vaddr, priv_info->paddr,
+ priv_info->size,
+ 3 /* 8 byte aligned */);
+ if (IS_ERR(rc))
+ return rc;
+ priv_mgr = rc;
/*
* Create the pool for dma_buf shared memory
*/
- ret = pool_res_mem_mgr_init(&pool->dma_buf_mgr, dmabuf_info,
- PAGE_SHIFT);
- if (ret)
+ rc = tee_shm_pool_mgr_alloc_res_mem(dmabuf_info->vaddr,
+ dmabuf_info->paddr,
+ dmabuf_info->size, PAGE_SHIFT);
+ if (IS_ERR(rc))
+ goto err_free_priv_mgr;
+ dmabuf_mgr = rc;
+
+ rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
+ if (IS_ERR(rc))
+ goto err_free_dmabuf_mgr;
+
+ return rc;
+
+err_free_dmabuf_mgr:
+ tee_shm_pool_mgr_destroy(dmabuf_mgr);
+err_free_priv_mgr:
+ tee_shm_pool_mgr_destroy(priv_mgr);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tee_shm_pool_alloc_res_mem);
+
+struct tee_shm_pool_mgr *tee_shm_pool_mgr_alloc_res_mem(unsigned long vaddr,
+ phys_addr_t paddr,
+ size_t size,
+ int min_alloc_order)
+{
+ const size_t page_mask = PAGE_SIZE - 1;
+ struct tee_shm_pool_mgr *mgr;
+ int rc;
+
+ /* Start and end must be page aligned */
+ if (vaddr & page_mask || paddr & page_mask || size & page_mask)
+ return ERR_PTR(-EINVAL);
+
+ mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
+ if (!mgr)
+ return ERR_PTR(-ENOMEM);
+
+ mgr->private_data = gen_pool_create(min_alloc_order, -1);
+ if (!mgr->private_data) {
+ rc = -ENOMEM;
goto err;
+ }
- pool->destroy = pool_res_mem_destroy;
- return pool;
+ gen_pool_set_algo(mgr->private_data, gen_pool_best_fit, NULL);
+ rc = gen_pool_add_virt(mgr->private_data, vaddr, paddr, size, -1);
+ if (rc) {
+ gen_pool_destroy(mgr->private_data);
+ goto err;
+ }
+
+ mgr->ops = &pool_ops_generic;
+
+ return mgr;
err:
- if (ret == -ENOMEM)
- pr_err("%s: can't allocate memory for res_mem shared memory pool\n", __func__);
- if (pool && pool->private_mgr.private_data)
- gen_pool_destroy(pool->private_mgr.private_data);
- kfree(pool);
- return ERR_PTR(ret);
+ kfree(mgr);
+
+ return ERR_PTR(rc);
}
-EXPORT_SYMBOL_GPL(tee_shm_pool_alloc_res_mem);
+EXPORT_SYMBOL_GPL(tee_shm_pool_mgr_alloc_res_mem);
+
+static bool check_mgr_ops(struct tee_shm_pool_mgr *mgr)
+{
+ return mgr && mgr->ops && mgr->ops->alloc && mgr->ops->free &&
+ mgr->ops->destroy_poolmgr;
+}
+
+struct tee_shm_pool *tee_shm_pool_alloc(struct tee_shm_pool_mgr *priv_mgr,
+ struct tee_shm_pool_mgr *dmabuf_mgr)
+{
+ struct tee_shm_pool *pool;
+
+ if (!check_mgr_ops(priv_mgr) || !check_mgr_ops(dmabuf_mgr))
+ return ERR_PTR(-EINVAL);
+
+ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ return ERR_PTR(-ENOMEM);
+
+ pool->private_mgr = priv_mgr;
+ pool->dma_buf_mgr = dmabuf_mgr;
+
+ return pool;
+}
+EXPORT_SYMBOL_GPL(tee_shm_pool_alloc);
/**
* tee_shm_pool_free() - Free a shared memory pool
@@ -150,7 +186,10 @@ EXPORT_SYMBOL_GPL(tee_shm_pool_alloc_res_mem);
*/
void tee_shm_pool_free(struct tee_shm_pool *pool)
{
- pool->destroy(pool);
+ if (pool->private_mgr)
+ tee_shm_pool_mgr_destroy(pool->private_mgr);
+ if (pool->dma_buf_mgr)
+ tee_shm_pool_mgr_destroy(pool->dma_buf_mgr);
kfree(pool);
}
EXPORT_SYMBOL_GPL(tee_shm_pool_free);
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index aa122340c717..d87a3b084457 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -339,7 +339,7 @@ config X86_PKG_TEMP_THERMAL
config INTEL_SOC_DTS_IOSF_CORE
tristate
- depends on X86
+ depends on X86 && PCI
select IOSF_MBI
help
This is becoming a common feature for Intel SoCs to expose the additional
@@ -349,7 +349,7 @@ config INTEL_SOC_DTS_IOSF_CORE
config INTEL_SOC_DTS_THERMAL
tristate "Intel SoCs DTS thermal driver"
- depends on X86
+ depends on X86 && PCI
select INTEL_SOC_DTS_IOSF_CORE
select THERMAL_WRITABLE_TRIPS
help
diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c
index c5547bd711db..6a8300108148 100644
--- a/drivers/thermal/imx_thermal.c
+++ b/drivers/thermal/imx_thermal.c
@@ -589,6 +589,9 @@ static int imx_thermal_probe(struct platform_device *pdev)
regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_POWER_DOWN);
regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_MEASURE_TEMP);
+ data->irq_enabled = true;
+ data->mode = THERMAL_DEVICE_ENABLED;
+
ret = devm_request_threaded_irq(&pdev->dev, data->irq,
imx_thermal_alarm_irq, imx_thermal_alarm_irq_thread,
0, "imx_thermal", data);
@@ -600,9 +603,6 @@ static int imx_thermal_probe(struct platform_device *pdev)
return ret;
}
- data->irq_enabled = true;
- data->mode = THERMAL_DEVICE_ENABLED;
-
return 0;
}
diff --git a/drivers/thermal/msm-tsens.c b/drivers/thermal/msm-tsens.c
index 5f955af4671a..db49c33264d5 100644
--- a/drivers/thermal/msm-tsens.c
+++ b/drivers/thermal/msm-tsens.c
@@ -89,6 +89,7 @@
#define TSENS_TM_CRITICAL_INT_EN BIT(2)
#define TSENS_TM_UPPER_INT_EN BIT(1)
#define TSENS_TM_LOWER_INT_EN BIT(0)
+#define TSENS_TM_UPPER_LOWER_INT_DISABLE 0xffffffff
#define TSENS_TM_UPPER_INT_MASK(n) (((n) & 0xffff0000) >> 16)
#define TSENS_TM_LOWER_INT_MASK(n) ((n) & 0xffff)
@@ -269,8 +270,8 @@ struct tsens_tm_device {
uint32_t wd_bark_val;
int tsens_irq;
int tsens_critical_irq;
- void *tsens_addr;
- void *tsens_calib_addr;
+ void __iomem *tsens_addr;
+ void __iomem *tsens_calib_addr;
int tsens_len;
int calib_len;
struct resource *res_tsens_mem;
@@ -2079,6 +2080,7 @@ static int tsens_hw_init(struct tsens_tm_device *tmdev)
void __iomem *sensor_int_mask_addr;
unsigned int srot_val;
int crit_mask;
+ void __iomem *int_mask_addr;
if (!tmdev) {
pr_err("Invalid tsens device\n");
@@ -2104,6 +2106,10 @@ static int tsens_hw_init(struct tsens_tm_device *tmdev)
/*Update critical cycle monitoring*/
mb();
}
+ int_mask_addr = TSENS_TM_UPPER_LOWER_INT_MASK
+ (tmdev->tsens_addr);
+ writel_relaxed(TSENS_TM_UPPER_LOWER_INT_DISABLE,
+ int_mask_addr);
writel_relaxed(TSENS_TM_CRITICAL_INT_EN |
TSENS_TM_UPPER_INT_EN | TSENS_TM_LOWER_INT_EN,
TSENS_TM_INT_EN(tmdev->tsens_addr));
diff --git a/drivers/thermal/msm_thermal.c b/drivers/thermal/msm_thermal.c
index 0a3ff5791488..7beef24035f8 100644
--- a/drivers/thermal/msm_thermal.c
+++ b/drivers/thermal/msm_thermal.c
@@ -188,6 +188,7 @@ static bool ocr_nodes_called;
static bool ocr_probed;
static bool ocr_reg_init_defer;
static bool hotplug_enabled;
+static bool interrupt_mode_enable;
static bool msm_thermal_probed;
static bool gfx_crit_phase_ctrl_enabled;
static bool gfx_warm_phase_ctrl_enabled;
@@ -4929,9 +4930,10 @@ static void __ref disable_msm_thermal(void)
static void interrupt_mode_init(void)
{
- if (!msm_thermal_probed)
+ if (!msm_thermal_probed) {
+ interrupt_mode_enable = true;
return;
-
+ }
if (polling_enabled) {
polling_enabled = 0;
create_sensor_zone_id_map();
@@ -7455,6 +7457,10 @@ static int msm_thermal_dev_probe(struct platform_device *pdev)
if (ret)
goto probe_exit;
msm_thermal_probed = true;
+ if (interrupt_mode_enable) {
+ interrupt_mode_init();
+ interrupt_mode_enable = false;
+ }
probe_exit:
return ret;
@@ -7591,6 +7597,7 @@ int __init msm_thermal_late_init(void)
}
}
msm_thermal_add_mx_nodes();
+ interrupt_mode_init();
create_cpu_topology_sysfs();
create_thermal_debugfs();
msm_thermal_add_bucket_info_nodes();
diff --git a/drivers/thermal/power_allocator.c b/drivers/thermal/power_allocator.c
index 1246aa6fcab0..737635f0bec0 100644
--- a/drivers/thermal/power_allocator.c
+++ b/drivers/thermal/power_allocator.c
@@ -523,6 +523,7 @@ static void allow_maximum_power(struct thermal_zone_device *tz)
struct thermal_instance *instance;
struct power_allocator_params *params = tz->governor_data;
+ mutex_lock(&tz->lock);
list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
if ((instance->trip != params->trip_max_desired_temperature) ||
(!cdev_is_power_actor(instance->cdev)))
@@ -532,6 +533,7 @@ static void allow_maximum_power(struct thermal_zone_device *tz)
instance->cdev->updated = false;
thermal_cdev_update(instance->cdev);
}
+ mutex_unlock(&tz->lock);
}
/**
diff --git a/drivers/thermal/spear_thermal.c b/drivers/thermal/spear_thermal.c
index 534dd9136662..81b35aace9de 100644
--- a/drivers/thermal/spear_thermal.c
+++ b/drivers/thermal/spear_thermal.c
@@ -54,8 +54,7 @@ static struct thermal_zone_device_ops ops = {
.get_temp = thermal_get_temp,
};
-#ifdef CONFIG_PM
-static int spear_thermal_suspend(struct device *dev)
+static int __maybe_unused spear_thermal_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct thermal_zone_device *spear_thermal = platform_get_drvdata(pdev);
@@ -72,7 +71,7 @@ static int spear_thermal_suspend(struct device *dev)
return 0;
}
-static int spear_thermal_resume(struct device *dev)
+static int __maybe_unused spear_thermal_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct thermal_zone_device *spear_thermal = platform_get_drvdata(pdev);
@@ -94,7 +93,6 @@ static int spear_thermal_resume(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(spear_thermal_pm_ops, spear_thermal_suspend,
spear_thermal_resume);
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index 20a41f7de76f..6713fd1958e7 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -627,6 +627,7 @@ static const struct dev_pm_ops nhi_pm_ops = {
* we just disable hotplug, the
* pci-tunnels stay alive.
*/
+ .thaw_noirq = nhi_resume_noirq,
.restore_noirq = nhi_resume_noirq,
};
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
index c01f45095877..4537abf6425d 100644
--- a/drivers/tty/Kconfig
+++ b/drivers/tty/Kconfig
@@ -226,7 +226,7 @@ config CYCLADES
config CYZ_INTR
bool "Cyclades-Z interrupt mode operation"
- depends on CYCLADES
+ depends on CYCLADES && PCI
help
The Cyclades-Z family of multiport cards allows 2 (two) driver op
modes: polling and interrupt. In polling mode, the driver will check
@@ -403,9 +403,16 @@ config PPC_EARLY_DEBUG_EHV_BC_HANDLE
config GOLDFISH_TTY
tristate "Goldfish TTY Driver"
depends on GOLDFISH
+ select SERIAL_CORE
+ select SERIAL_CORE_CONSOLE
help
Console and system TTY driver for the Goldfish virtual platform.
+config GOLDFISH_TTY_EARLY_CONSOLE
+ bool
+ default y if GOLDFISH_TTY=y
+ select SERIAL_EARLYCON
+
config DA_TTY
bool "DA TTY"
depends on METAG_DA
diff --git a/drivers/tty/goldfish.c b/drivers/tty/goldfish.c
index 1e332855b933..4686e93aaf94 100644
--- a/drivers/tty/goldfish.c
+++ b/drivers/tty/goldfish.c
@@ -1,6 +1,7 @@
/*
* Copyright (C) 2007 Google, Inc.
* Copyright (C) 2012 Intel, Inc.
+ * Copyright (C) 2017 Imagination Technologies Ltd.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -22,20 +23,27 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/goldfish.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+#include <linux/serial_core.h>
+#include <linux/of.h>
+/* Goldfish tty register's offsets */
enum {
- GOLDFISH_TTY_PUT_CHAR = 0x00,
- GOLDFISH_TTY_BYTES_READY = 0x04,
- GOLDFISH_TTY_CMD = 0x08,
-
- GOLDFISH_TTY_DATA_PTR = 0x10,
- GOLDFISH_TTY_DATA_LEN = 0x14,
- GOLDFISH_TTY_DATA_PTR_HIGH = 0x18,
-
- GOLDFISH_TTY_CMD_INT_DISABLE = 0,
- GOLDFISH_TTY_CMD_INT_ENABLE = 1,
- GOLDFISH_TTY_CMD_WRITE_BUFFER = 2,
- GOLDFISH_TTY_CMD_READ_BUFFER = 3,
+ GOLDFISH_TTY_REG_BYTES_READY = 0x04,
+ GOLDFISH_TTY_REG_CMD = 0x08,
+ GOLDFISH_TTY_REG_DATA_PTR = 0x10,
+ GOLDFISH_TTY_REG_DATA_LEN = 0x14,
+ GOLDFISH_TTY_REG_DATA_PTR_HIGH = 0x18,
+ GOLDFISH_TTY_REG_VERSION = 0x20,
+};
+
+/* Goldfish tty commands */
+enum {
+ GOLDFISH_TTY_CMD_INT_DISABLE = 0,
+ GOLDFISH_TTY_CMD_INT_ENABLE = 1,
+ GOLDFISH_TTY_CMD_WRITE_BUFFER = 2,
+ GOLDFISH_TTY_CMD_READ_BUFFER = 3,
};
struct goldfish_tty {
@@ -45,6 +53,8 @@ struct goldfish_tty {
u32 irq;
int opencount;
struct console console;
+ u32 version;
+ struct device *dev;
};
static DEFINE_MUTEX(goldfish_tty_lock);
@@ -53,38 +63,107 @@ static u32 goldfish_tty_line_count = 8;
static u32 goldfish_tty_current_line_count;
static struct goldfish_tty *goldfish_ttys;
-static void goldfish_tty_do_write(int line, const char *buf, unsigned count)
+static void do_rw_io(struct goldfish_tty *qtty,
+ unsigned long address,
+ unsigned int count,
+ int is_write)
{
unsigned long irq_flags;
- struct goldfish_tty *qtty = &goldfish_ttys[line];
void __iomem *base = qtty->base;
+
spin_lock_irqsave(&qtty->lock, irq_flags);
- gf_write_ptr(buf, base + GOLDFISH_TTY_DATA_PTR,
- base + GOLDFISH_TTY_DATA_PTR_HIGH);
- writel(count, base + GOLDFISH_TTY_DATA_LEN);
- writel(GOLDFISH_TTY_CMD_WRITE_BUFFER, base + GOLDFISH_TTY_CMD);
+ gf_write_ptr((void *)address, base + GOLDFISH_TTY_REG_DATA_PTR,
+ base + GOLDFISH_TTY_REG_DATA_PTR_HIGH);
+ writel(count, base + GOLDFISH_TTY_REG_DATA_LEN);
+
+ if (is_write)
+ writel(GOLDFISH_TTY_CMD_WRITE_BUFFER,
+ base + GOLDFISH_TTY_REG_CMD);
+ else
+ writel(GOLDFISH_TTY_CMD_READ_BUFFER,
+ base + GOLDFISH_TTY_REG_CMD);
+
spin_unlock_irqrestore(&qtty->lock, irq_flags);
}
+static void goldfish_tty_rw(struct goldfish_tty *qtty,
+ const void *address_ptr,
+ unsigned int count,
+ int is_write)
+{
+ dma_addr_t dma_handle;
+ enum dma_data_direction dma_dir;
+ uintptr_t address;
+
+ address = (uintptr_t)address_ptr;
+ dma_dir = (is_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+
+ if (qtty->version > 0) {
+ /*
+ * Goldfish TTY for Ranchu platform uses
+ * physical addresses and DMA for read/write operations
+ */
+ uintptr_t address_end = address + count;
+
+ while (address < address_end) {
+ uintptr_t page_end = (address & PAGE_MASK) + PAGE_SIZE;
+ uintptr_t next = page_end < address_end ?
+ page_end : address_end;
+ uintptr_t avail = next - address;
+
+ /*
+ * Map the buffer's virtual address to the DMA address
+ * so the buffer can be accessed by the device.
+ */
+ dma_handle = dma_map_single(qtty->dev, (void *)address,
+ avail, dma_dir);
+
+ if (dma_mapping_error(qtty->dev, dma_handle)) {
+ dev_err(qtty->dev, "tty: DMA mapping error.\n");
+ return;
+ }
+ do_rw_io(qtty, dma_handle, avail, is_write);
+
+ /*
+ * Unmap the previously mapped region after
+ * the completion of the read/write operation.
+ */
+ dma_unmap_single(qtty->dev, dma_handle, avail, dma_dir);
+
+ address += avail;
+ }
+ } else {
+ /*
+ * Old style Goldfish TTY used on the Goldfish platform
+ * uses virtual addresses.
+ */
+ do_rw_io(qtty, address, count, is_write);
+ }
+
+}
+
+static void goldfish_tty_do_write(int line, const char *buf,
+ unsigned int count)
+{
+ struct goldfish_tty *qtty = &goldfish_ttys[line];
+
+ goldfish_tty_rw(qtty, buf, count, 1);
+}
+
static irqreturn_t goldfish_tty_interrupt(int irq, void *dev_id)
{
struct goldfish_tty *qtty = dev_id;
void __iomem *base = qtty->base;
- unsigned long irq_flags;
unsigned char *buf;
u32 count;
- count = readl(base + GOLDFISH_TTY_BYTES_READY);
+ count = readl(base + GOLDFISH_TTY_REG_BYTES_READY);
if (count == 0)
return IRQ_NONE;
count = tty_prepare_flip_string(&qtty->port, &buf, count);
- spin_lock_irqsave(&qtty->lock, irq_flags);
- gf_write_ptr(buf, base + GOLDFISH_TTY_DATA_PTR,
- base + GOLDFISH_TTY_DATA_PTR_HIGH);
- writel(count, base + GOLDFISH_TTY_DATA_LEN);
- writel(GOLDFISH_TTY_CMD_READ_BUFFER, base + GOLDFISH_TTY_CMD);
- spin_unlock_irqrestore(&qtty->lock, irq_flags);
+ goldfish_tty_rw(qtty, buf, count, 0);
+
tty_schedule_flip(&qtty->port);
return IRQ_HANDLED;
}
@@ -93,7 +172,7 @@ static int goldfish_tty_activate(struct tty_port *port, struct tty_struct *tty)
{
struct goldfish_tty *qtty = container_of(port, struct goldfish_tty,
port);
- writel(GOLDFISH_TTY_CMD_INT_ENABLE, qtty->base + GOLDFISH_TTY_CMD);
+ writel(GOLDFISH_TTY_CMD_INT_ENABLE, qtty->base + GOLDFISH_TTY_REG_CMD);
return 0;
}
@@ -101,12 +180,13 @@ static void goldfish_tty_shutdown(struct tty_port *port)
{
struct goldfish_tty *qtty = container_of(port, struct goldfish_tty,
port);
- writel(GOLDFISH_TTY_CMD_INT_DISABLE, qtty->base + GOLDFISH_TTY_CMD);
+ writel(GOLDFISH_TTY_CMD_INT_DISABLE, qtty->base + GOLDFISH_TTY_REG_CMD);
}
static int goldfish_tty_open(struct tty_struct *tty, struct file *filp)
{
struct goldfish_tty *qtty = &goldfish_ttys[tty->index];
+
return tty_port_open(&qtty->port, tty, filp);
}
@@ -136,7 +216,8 @@ static int goldfish_tty_chars_in_buffer(struct tty_struct *tty)
{
struct goldfish_tty *qtty = &goldfish_ttys[tty->index];
void __iomem *base = qtty->base;
- return readl(base + GOLDFISH_TTY_BYTES_READY);
+
+ return readl(base + GOLDFISH_TTY_REG_BYTES_READY);
}
static void goldfish_tty_console_write(struct console *co, const char *b,
@@ -227,7 +308,7 @@ static void goldfish_tty_delete_driver(void)
static int goldfish_tty_probe(struct platform_device *pdev)
{
struct goldfish_tty *qtty;
- int ret = -EINVAL;
+ int ret = -ENODEV;
struct resource *r;
struct device *ttydev;
void __iomem *base;
@@ -235,16 +316,22 @@ static int goldfish_tty_probe(struct platform_device *pdev)
unsigned int line;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (r == NULL)
- return -EINVAL;
+ if (!r) {
+ pr_err("goldfish_tty: No MEM resource available!\n");
+ return -ENOMEM;
+ }
base = ioremap(r->start, 0x1000);
- if (base == NULL)
- pr_err("goldfish_tty: unable to remap base\n");
+ if (!base) {
+ pr_err("goldfish_tty: Unable to ioremap base!\n");
+ return -ENOMEM;
+ }
r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (r == NULL)
+ if (!r) {
+ pr_err("goldfish_tty: No IRQ resource available!\n");
goto err_unmap;
+ }
irq = r->start;
@@ -255,13 +342,17 @@ static int goldfish_tty_probe(struct platform_device *pdev)
else
line = pdev->id;
- if (line >= goldfish_tty_line_count)
- goto err_create_driver_failed;
+ if (line >= goldfish_tty_line_count) {
+ pr_err("goldfish_tty: Reached maximum tty number of %d.\n",
+ goldfish_tty_current_line_count);
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
if (goldfish_tty_current_line_count == 0) {
ret = goldfish_tty_create_driver();
if (ret)
- goto err_create_driver_failed;
+ goto err_unlock;
}
goldfish_tty_current_line_count++;
@@ -271,17 +362,45 @@ static int goldfish_tty_probe(struct platform_device *pdev)
qtty->port.ops = &goldfish_port_ops;
qtty->base = base;
qtty->irq = irq;
+ qtty->dev = &pdev->dev;
+
+ /*
+ * Goldfish TTY device used by the Goldfish emulator
+ * should identify itself with 0, forcing the driver
+ * to use virtual addresses. Goldfish TTY device
+ * on Ranchu emulator (qemu2) returns 1 here and
+ * driver will use physical addresses.
+ */
+ qtty->version = readl(base + GOLDFISH_TTY_REG_VERSION);
+
+ /*
+ * Goldfish TTY device on Ranchu emulator (qemu2)
+ * will use DMA for read/write IO operations.
+ */
+ if (qtty->version > 0) {
+ /*
+ * Initialize dma_mask to 32-bits.
+ */
+ if (!pdev->dev.dma_mask)
+ pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+ ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(&pdev->dev, "No suitable DMA available.\n");
+ goto err_dec_line_count;
+ }
+ }
- writel(GOLDFISH_TTY_CMD_INT_DISABLE, base + GOLDFISH_TTY_CMD);
+ writel(GOLDFISH_TTY_CMD_INT_DISABLE, base + GOLDFISH_TTY_REG_CMD);
ret = request_irq(irq, goldfish_tty_interrupt, IRQF_SHARED,
- "goldfish_tty", qtty);
- if (ret)
- goto err_request_irq_failed;
-
+ "goldfish_tty", qtty);
+ if (ret) {
+ pr_err("goldfish_tty: No IRQ available!\n");
+ goto err_dec_line_count;
+ }
ttydev = tty_port_register_device(&qtty->port, goldfish_tty_driver,
- line, &pdev->dev);
+ line, &pdev->dev);
if (IS_ERR(ttydev)) {
ret = PTR_ERR(ttydev);
goto err_tty_register_device_failed;
@@ -300,12 +419,12 @@ static int goldfish_tty_probe(struct platform_device *pdev)
return 0;
err_tty_register_device_failed:
- free_irq(irq, pdev);
-err_request_irq_failed:
+ free_irq(irq, qtty);
+err_dec_line_count:
goldfish_tty_current_line_count--;
if (goldfish_tty_current_line_count == 0)
goldfish_tty_delete_driver();
-err_create_driver_failed:
+err_unlock:
mutex_unlock(&goldfish_tty_lock);
err_unmap:
iounmap(base);
@@ -330,6 +449,32 @@ static int goldfish_tty_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_GOLDFISH_TTY_EARLY_CONSOLE
+static void gf_early_console_putchar(struct uart_port *port, int ch)
+{
+ __raw_writel(ch, port->membase);
+}
+
+static void gf_early_write(struct console *con, const char *s, unsigned int n)
+{
+ struct earlycon_device *dev = con->data;
+
+ uart_console_write(&dev->port, s, n, gf_early_console_putchar);
+}
+
+static int __init gf_earlycon_setup(struct earlycon_device *device,
+ const char *opt)
+{
+ if (!device->port.membase)
+ return -ENODEV;
+
+ device->con->write = gf_early_write;
+ return 0;
+}
+
+OF_EARLYCON_DECLARE(early_gf_tty, "google,goldfish-tty", gf_earlycon_setup);
+#endif
+
static const struct of_device_id goldfish_tty_of_match[] = {
{ .compatible = "google,goldfish-tty", },
{},
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
index fa816b7193b6..11725422dacb 100644
--- a/drivers/tty/hvc/hvc_xen.c
+++ b/drivers/tty/hvc/hvc_xen.c
@@ -323,6 +323,7 @@ void xen_console_resume(void)
}
}
+#ifdef CONFIG_HVC_XEN_FRONTEND
static void xencons_disconnect_backend(struct xencons_info *info)
{
if (info->irq > 0)
@@ -363,7 +364,6 @@ static int xen_console_remove(struct xencons_info *info)
return 0;
}
-#ifdef CONFIG_HVC_XEN_FRONTEND
static int xencons_remove(struct xenbus_device *dev)
{
return xen_console_remove(dev_get_drvdata(&dev->dev));
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 9aff37186246..6060c3e8925e 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -137,6 +137,9 @@ struct gsm_dlci {
struct mutex mutex;
/* Link layer */
+ int mode;
+#define DLCI_MODE_ABM 0 /* Normal Asynchronous Balanced Mode */
+#define DLCI_MODE_ADM 1 /* Asynchronous Disconnected Mode */
spinlock_t lock; /* Protects the internal state */
struct timer_list t1; /* Retransmit timer for SABM and UA */
int retries;
@@ -1380,7 +1383,13 @@ retry:
ctrl->data = data;
ctrl->len = clen;
gsm->pending_cmd = ctrl;
- gsm->cretries = gsm->n2;
+
+ /* If DLCI0 is in ADM mode skip retries, it won't respond */
+ if (gsm->dlci[0]->mode == DLCI_MODE_ADM)
+ gsm->cretries = 1;
+ else
+ gsm->cretries = gsm->n2;
+
mod_timer(&gsm->t2_timer, jiffies + gsm->t2 * HZ / 100);
gsm_control_transmit(gsm, ctrl);
spin_unlock_irqrestore(&gsm->control_lock, flags);
@@ -1467,6 +1476,10 @@ static void gsm_dlci_open(struct gsm_dlci *dlci)
* in which case an opening port goes back to closed and a closing port
* is simply put into closed state (any further frames from the other
* end will get a DM response)
+ *
+ * Some control dlci can stay in ADM mode with other dlci working just
+ * fine. In that case we can just keep the control dlci open after the
+ * DLCI_OPENING retries time out.
*/
static void gsm_dlci_t1(unsigned long data)
@@ -1480,8 +1493,16 @@ static void gsm_dlci_t1(unsigned long data)
if (dlci->retries) {
gsm_command(dlci->gsm, dlci->addr, SABM|PF);
mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100);
- } else
+ } else if (!dlci->addr && gsm->control == (DM | PF)) {
+ if (debug & 8)
+ pr_info("DLCI %d opening in ADM mode.\n",
+ dlci->addr);
+ dlci->mode = DLCI_MODE_ADM;
+ gsm_dlci_open(dlci);
+ } else {
gsm_dlci_close(dlci);
+ }
+
break;
case DLCI_CLOSING:
dlci->retries--;
@@ -1499,8 +1520,8 @@ static void gsm_dlci_t1(unsigned long data)
* @dlci: DLCI to open
*
* Commence opening a DLCI from the Linux side. We issue SABM messages
- * to the modem which should then reply with a UA, at which point we
- * will move into open state. Opening is done asynchronously with retry
+ * to the modem which should then reply with a UA or ADM, at which point
+ * we will move into open state. Opening is done asynchronously with retry
* running off timers and the responses.
*/
@@ -2870,11 +2891,22 @@ static int gsmtty_modem_update(struct gsm_dlci *dlci, u8 brk)
static int gsm_carrier_raised(struct tty_port *port)
{
struct gsm_dlci *dlci = container_of(port, struct gsm_dlci, port);
+ struct gsm_mux *gsm = dlci->gsm;
+
/* Not yet open so no carrier info */
if (dlci->state != DLCI_OPEN)
return 0;
if (debug & 2)
return 1;
+
+ /*
+ * Basic mode with control channel in ADM mode may not respond
+ * to CMD_MSC at all and modem_rx is empty.
+ */
+ if (gsm->encoding == 0 && gsm->dlci[0]->mode == DLCI_MODE_ADM &&
+ !dlci->modem_rx)
+ return 1;
+
return dlci->modem_rx & TIOCM_CD;
}
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 41dda25da049..190e5dc15738 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -2238,6 +2238,12 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
}
if (tty_hung_up_p(file))
break;
+ /*
+ * Abort readers for ttys which never actually
+ * get hung up. See __tty_hangup().
+ */
+ if (test_bit(TTY_HUPPING, &tty->flags))
+ break;
if (!timeout)
break;
if (file->f_flags & O_NONBLOCK) {
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index e8dd296fb25b..c4383573cf66 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -608,6 +608,10 @@ static int omap_8250_startup(struct uart_port *port)
up->lsr_saved_flags = 0;
up->msr_saved_flags = 0;
+ /* Disable DMA for console UART */
+ if (uart_console(port))
+ up->dma = NULL;
+
if (up->dma) {
ret = serial8250_request_dma(up);
if (ret) {
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 7025f47fa284..746c76b358a0 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -5300,6 +5300,17 @@ static struct pci_device_id serial_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID, 0, 0, /* 135a.0dc0 */
pbn_b2_4_115200 },
/*
+ * BrainBoxes UC-260
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x0D21,
+ PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00,
+ pbn_b2_4_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x0E34,
+ PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00,
+ pbn_b2_4_115200 },
+ /*
* Perle PCI-RAS cards
*/
{ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030,
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index 6412f1455beb..6f4c180aadc1 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -372,7 +372,7 @@ config SERIAL_8250_MID
tristate "Support for serial ports on Intel MID platforms"
depends on SERIAL_8250 && PCI
select HSU_DMA if SERIAL_8250_DMA
- select HSU_DMA_PCI if X86_INTEL_MID
+ select HSU_DMA_PCI if (HSU_DMA && X86_INTEL_MID)
select RATIONAL
help
Selecting this option will enable handling of the extra features
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 53e4d5056db7..e0277cf0bf58 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -1783,6 +1783,7 @@ static void atmel_get_ip_name(struct uart_port *port)
switch (version) {
case 0x302:
case 0x10213:
+ case 0x10302:
dev_dbg(port->dev, "This version is usart\n");
atmel_port->is_usart = true;
break;
diff --git a/drivers/tty/serial/msm_serial_hs.c b/drivers/tty/serial/msm_serial_hs.c
index 5da2f1406546..064494366f01 100644
--- a/drivers/tty/serial/msm_serial_hs.c
+++ b/drivers/tty/serial/msm_serial_hs.c
@@ -3,7 +3,7 @@
* MSM 7k High speed uart driver
*
* Copyright (c) 2008 Google Inc.
- * Copyright (c) 2007-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2007-2018, The Linux Foundation. All rights reserved.
* Modified: Nick Pelly <npelly@google.com>
*
* All source code in this file is licensed under the following license
@@ -66,6 +66,7 @@
#include <linux/msm-sps.h>
#include <linux/platform_data/msm_serial_hs.h>
#include <linux/msm-bus.h>
+#include <soc/qcom/boot_stats.h>
#include "msm_serial_hs_hwreg.h"
#define UART_SPS_CONS_PERIPHERAL 0
@@ -2656,6 +2657,7 @@ static int msm_hs_startup(struct uart_port *uport)
int ret;
int rfr_level;
unsigned long flags;
+ u32 irq_type;
unsigned int data;
struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
struct circ_buf *tx_buf = &uport->state->xmit;
@@ -2676,8 +2678,11 @@ static int msm_hs_startup(struct uart_port *uport)
msm_hs_resource_vote(msm_uport);
if (is_use_low_power_wakeup(msm_uport)) {
+ irq_type = irq_get_trigger_type(msm_uport->wakeup.irq);
+ if (irq_type == IRQ_TYPE_NONE)
+ irq_type = IRQ_TYPE_EDGE_FALLING;
ret = request_irq(msm_uport->wakeup.irq, msm_hs_wakeup_isr,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ irq_type | IRQF_ONESHOT,
"msm_hs_wakeup", msm_uport);
if (unlikely(ret)) {
MSM_HS_ERR("%s():Err getting uart wakeup_irq %d\n",
@@ -3413,6 +3418,7 @@ static int msm_hs_probe(struct platform_device *pdev)
struct msm_serial_hs_platform_data *pdata = pdev->dev.platform_data;
unsigned long data;
char name[30];
+ char boot_marker[40];
if (pdev->dev.of_node) {
dev_dbg(&pdev->dev, "device tree enabled\n");
@@ -3438,6 +3444,10 @@ static int msm_hs_probe(struct platform_device *pdev)
pdev->dev.platform_data = pdata;
}
+ snprintf(boot_marker, sizeof(boot_marker),
+ "M - DRIVER MSM HS-UART_%d Init", pdev->id);
+ place_marker(boot_marker);
+
if (pdev->id < 0 || pdev->id >= UARTDM_NR) {
dev_err(&pdev->dev, "Invalid plaform device ID = %d\n",
pdev->id);
@@ -3671,6 +3681,9 @@ static int msm_hs_probe(struct platform_device *pdev)
if (!ret) {
msm_hs_clk_bus_unvote(msm_uport);
msm_serial_hs_rt_init(uport);
+ snprintf(boot_marker, sizeof(boot_marker),
+ "M - DRIVER MSM HS-UART_%d Ready", pdev->id);
+ place_marker(boot_marker);
return ret;
}
diff --git a/drivers/tty/serial/sccnxp.c b/drivers/tty/serial/sccnxp.c
index fcf803ffad19..cdd2f942317c 100644
--- a/drivers/tty/serial/sccnxp.c
+++ b/drivers/tty/serial/sccnxp.c
@@ -884,14 +884,19 @@ static int sccnxp_probe(struct platform_device *pdev)
clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(clk)) {
- if (PTR_ERR(clk) == -EPROBE_DEFER) {
- ret = -EPROBE_DEFER;
+ ret = PTR_ERR(clk);
+ if (ret == -EPROBE_DEFER)
goto err_out;
- }
+ uartclk = 0;
+ } else {
+ clk_prepare_enable(clk);
+ uartclk = clk_get_rate(clk);
+ }
+
+ if (!uartclk) {
dev_notice(&pdev->dev, "Using default clock frequency\n");
uartclk = s->chip->freq_std;
- } else
- uartclk = clk_get_rate(clk);
+ }
/* Check input frequency */
if ((uartclk < s->chip->freq_min) || (uartclk > s->chip->freq_max)) {
diff --git a/drivers/tty/serial/serial_mctrl_gpio.c b/drivers/tty/serial/serial_mctrl_gpio.c
index 3eb57eb532f1..02147361eaa9 100644
--- a/drivers/tty/serial/serial_mctrl_gpio.c
+++ b/drivers/tty/serial/serial_mctrl_gpio.c
@@ -20,6 +20,7 @@
#include <linux/gpio/consumer.h>
#include <linux/termios.h>
#include <linux/serial_core.h>
+#include <linux/module.h>
#include "serial_mctrl_gpio.h"
@@ -193,6 +194,7 @@ struct mctrl_gpios *mctrl_gpio_init(struct uart_port *port, unsigned int idx)
return gpios;
}
+EXPORT_SYMBOL_GPL(mctrl_gpio_init);
void mctrl_gpio_free(struct device *dev, struct mctrl_gpios *gpios)
{
@@ -247,3 +249,6 @@ void mctrl_gpio_disable_ms(struct mctrl_gpios *gpios)
disable_irq(gpios->irq[i]);
}
}
+EXPORT_SYMBOL_GPL(mctrl_gpio_disable_ms);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 80d0ffe7abc1..8dd822feb972 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -847,6 +847,8 @@ static void sci_receive_chars(struct uart_port *port)
/* Tell the rest of the system the news. New characters! */
tty_flip_buffer_push(tport);
} else {
+ /* TTY buffers full; read from RX reg to prevent lockup */
+ serial_port_in(port, SCxRDR);
serial_port_in(port, SCxSR); /* dummy read */
sci_clear_SCxSR(port, SCxSR_RDxF_CLEAR(port));
}
@@ -1455,7 +1457,16 @@ static void sci_free_dma(struct uart_port *port)
if (s->chan_rx)
sci_rx_dma_release(s, false);
}
-#else
+
+static void sci_flush_buffer(struct uart_port *port)
+{
+ /*
+ * In uart_flush_buffer(), the xmit circular buffer has just been
+ * cleared, so we have to reset tx_dma_len accordingly.
+ */
+ to_sci_port(port)->tx_dma_len = 0;
+}
+#else /* !CONFIG_SERIAL_SH_SCI_DMA */
static inline void sci_request_dma(struct uart_port *port)
{
}
@@ -1463,7 +1474,9 @@ static inline void sci_request_dma(struct uart_port *port)
static inline void sci_free_dma(struct uart_port *port)
{
}
-#endif
+
+#define sci_flush_buffer NULL
+#endif /* !CONFIG_SERIAL_SH_SCI_DMA */
static irqreturn_t sci_rx_interrupt(int irq, void *ptr)
{
@@ -2203,6 +2216,7 @@ static struct uart_ops sci_uart_ops = {
.break_ctl = sci_break_ctl,
.startup = sci_startup,
.shutdown = sci_shutdown,
+ .flush_buffer = sci_flush_buffer,
.set_termios = sci_set_termios,
.pm = sci_pm,
.type = sci_type,
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 1bb629ab8ecc..198451fa9e5d 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -702,6 +702,14 @@ static void __tty_hangup(struct tty_struct *tty, int exit_session)
return;
}
+ /*
+ * Some console devices aren't actually hung up for technical and
+ * historical reasons, which can lead to indefinite interruptible
+ * sleep in n_tty_read(). The following explicitly tells
+ * n_tty_read() to abort readers.
+ */
+ set_bit(TTY_HUPPING, &tty->flags);
+
/* inuse_filps is protected by the single tty lock,
this really needs to change if we want to flush the
workqueue with the lock held */
@@ -757,6 +765,7 @@ static void __tty_hangup(struct tty_struct *tty, int exit_session)
* can't yet guarantee all that.
*/
set_bit(TTY_HUPPED, &tty->flags);
+ clear_bit(TTY_HUPPING, &tty->flags);
tty_unlock(tty);
if (f)
@@ -1694,6 +1703,8 @@ static void release_tty(struct tty_struct *tty, int idx)
if (tty->link)
tty->link->port->itty = NULL;
tty_buffer_cancel_work(tty->port);
+ if (tty->link)
+ tty_buffer_cancel_work(tty->link->port);
tty_kref_put(tty->link);
tty_kref_put(tty);
@@ -3143,7 +3154,10 @@ struct tty_struct *alloc_tty_struct(struct tty_driver *driver, int idx)
kref_init(&tty->kref);
tty->magic = TTY_MAGIC;
- tty_ldisc_init(tty);
+ if (tty_ldisc_init(tty)) {
+ kfree(tty);
+ return NULL;
+ }
tty->session = NULL;
tty->pgrp = NULL;
mutex_init(&tty->legacy_mutex);
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 9bee25cfa0be..d9e013dc2c08 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -168,12 +168,11 @@ static struct tty_ldisc *tty_ldisc_get(struct tty_struct *tty, int disc)
return ERR_CAST(ldops);
}
- ld = kmalloc(sizeof(struct tty_ldisc), GFP_KERNEL);
- if (ld == NULL) {
- put_ldops(ldops);
- return ERR_PTR(-ENOMEM);
- }
-
+ /*
+ * There is no way to handle allocation failure of only 16 bytes.
+ * Let's simplify error handling and save more memory.
+ */
+ ld = kmalloc(sizeof(struct tty_ldisc), GFP_KERNEL | __GFP_NOFAIL);
ld->ops = ldops;
ld->tty = tty;
@@ -804,12 +803,13 @@ void tty_ldisc_release(struct tty_struct *tty)
* the tty structure is not completely set up when this call is made.
*/
-void tty_ldisc_init(struct tty_struct *tty)
+int tty_ldisc_init(struct tty_struct *tty)
{
struct tty_ldisc *ld = tty_ldisc_get(tty, N_TTY);
if (IS_ERR(ld))
- panic("n_tty: init_tty");
+ return PTR_ERR(ld);
tty->ldisc = ld;
+ return 0;
}
/**
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index e4f69bddcfb1..ff3286fc22d8 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -1312,6 +1312,11 @@ static void csi_m(struct vc_data *vc)
case 3:
vc->vc_italic = 1;
break;
+ case 21:
+ /*
+ * No console drivers support double underline, so
+ * convert it to a single underline.
+ */
case 4:
vc->vc_underline = 1;
break;
@@ -1348,7 +1353,6 @@ static void csi_m(struct vc_data *vc)
vc->vc_disp_ctrl = 1;
vc->vc_toggle_meta = 1;
break;
- case 21:
case 22:
vc->vc_intensity = 1;
break;
@@ -1725,7 +1729,7 @@ static void reset_terminal(struct vc_data *vc, int do_clear)
default_attr(vc);
update_attr(vc);
- vc->vc_tab_stop[0] = 0x01010100;
+ vc->vc_tab_stop[0] =
vc->vc_tab_stop[1] =
vc->vc_tab_stop[2] =
vc->vc_tab_stop[3] =
@@ -1769,7 +1773,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
vc->vc_pos -= (vc->vc_x << 1);
while (vc->vc_x < vc->vc_cols - 1) {
vc->vc_x++;
- if (vc->vc_tab_stop[vc->vc_x >> 5] & (1 << (vc->vc_x & 31)))
+ if (vc->vc_tab_stop[7 & (vc->vc_x >> 5)] & (1 << (vc->vc_x & 31)))
break;
}
vc->vc_pos += (vc->vc_x << 1);
@@ -1829,7 +1833,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
lf(vc);
return;
case 'H':
- vc->vc_tab_stop[vc->vc_x >> 5] |= (1 << (vc->vc_x & 31));
+ vc->vc_tab_stop[7 & (vc->vc_x >> 5)] |= (1 << (vc->vc_x & 31));
return;
case 'Z':
respond_ID(tty);
@@ -2022,7 +2026,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
return;
case 'g':
if (!vc->vc_par[0])
- vc->vc_tab_stop[vc->vc_x >> 5] &= ~(1 << (vc->vc_x & 31));
+ vc->vc_tab_stop[7 & (vc->vc_x >> 5)] &= ~(1 << (vc->vc_x & 31));
else if (vc->vc_par[0] == 3) {
vc->vc_tab_stop[0] =
vc->vc_tab_stop[1] =
diff --git a/drivers/uio/msm_sharedmem/msm_sharedmem.c b/drivers/uio/msm_sharedmem/msm_sharedmem.c
index b10c40b3e1fc..84623c9b41d3 100644
--- a/drivers/uio/msm_sharedmem/msm_sharedmem.c
+++ b/drivers/uio/msm_sharedmem/msm_sharedmem.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -75,6 +75,24 @@ static int sharedmem_mmap(struct uio_info *info, struct vm_area_struct *vma)
return result;
}
+static void free_shared_ram_perms(u32 client_id, phys_addr_t addr, u32 size)
+{
+ int ret;
+ u32 source_vmlist[2] = {VMID_HLOS, VMID_MSS_MSA};
+ int dest_vmids[1] = {VMID_HLOS};
+ int dest_perms[1] = {PERM_READ|PERM_WRITE|PERM_EXEC};
+
+ if (client_id != MPSS_RMTS_CLIENT_ID)
+ return;
+
+ ret = hyp_assign_phys(addr, size, source_vmlist, 2, dest_vmids,
+ dest_perms, 1);
+ if (ret != 0) {
+ pr_err("hyp_assign_phys failed IPA=0x016%pa size=%u err=%d\n",
+ &addr, size, ret);
+ }
+}
+
/* Setup the shared ram permissions.
* This function currently supports the mpss client only.
*/
@@ -184,6 +202,17 @@ out:
return ret;
}
+static void msm_sharedmem_shutdown(struct platform_device *pdev)
+{
+ struct uio_info *info = dev_get_drvdata(&pdev->dev);
+
+ phys_addr_t shared_mem_addr = info->mem[0].addr;
+ u32 shared_mem_size = info->mem[0].size;
+
+ free_shared_ram_perms(MPSS_RMTS_CLIENT_ID, shared_mem_addr,
+ shared_mem_size);
+}
+
static int msm_sharedmem_remove(struct platform_device *pdev)
{
struct uio_info *info = dev_get_drvdata(&pdev->dev);
@@ -202,6 +231,7 @@ MODULE_DEVICE_TABLE(of, msm_sharedmem_of_match);
static struct platform_driver msm_sharedmem_driver = {
.probe = msm_sharedmem_probe,
.remove = msm_sharedmem_remove,
+ .shutdown = msm_sharedmem_shutdown,
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 939c6ad71068..57ee43512992 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -851,7 +851,7 @@ static inline void ci_role_destroy(struct ci_hdrc *ci)
{
ci_hdrc_gadget_destroy(ci);
ci_hdrc_host_destroy(ci);
- if (ci->is_otg)
+ if (ci->is_otg && ci->roles[CI_ROLE_GADGET])
ci_hdrc_otg_destroy(ci);
}
@@ -951,27 +951,35 @@ static int ci_hdrc_probe(struct platform_device *pdev)
/* initialize role(s) before the interrupt is requested */
if (dr_mode == USB_DR_MODE_OTG || dr_mode == USB_DR_MODE_HOST) {
ret = ci_hdrc_host_init(ci);
- if (ret)
- dev_info(dev, "doesn't support host\n");
+ if (ret) {
+ if (ret == -ENXIO)
+ dev_info(dev, "doesn't support host\n");
+ else
+ goto deinit_phy;
+ }
}
if (dr_mode == USB_DR_MODE_OTG || dr_mode == USB_DR_MODE_PERIPHERAL) {
ret = ci_hdrc_gadget_init(ci);
- if (ret)
- dev_info(dev, "doesn't support gadget\n");
+ if (ret) {
+ if (ret == -ENXIO)
+ dev_info(dev, "doesn't support gadget\n");
+ else
+ goto deinit_host;
+ }
}
if (!ci->roles[CI_ROLE_HOST] && !ci->roles[CI_ROLE_GADGET]) {
dev_err(dev, "no supported roles\n");
ret = -ENODEV;
- goto deinit_phy;
+ goto deinit_gadget;
}
if (ci->is_otg && ci->roles[CI_ROLE_GADGET]) {
ret = ci_hdrc_otg_init(ci);
if (ret) {
dev_err(dev, "init otg fails, ret = %d\n", ret);
- goto stop;
+ goto deinit_gadget;
}
}
@@ -1036,7 +1044,12 @@ static int ci_hdrc_probe(struct platform_device *pdev)
ci_extcon_unregister(ci);
stop:
- ci_role_destroy(ci);
+ if (ci->is_otg && ci->roles[CI_ROLE_GADGET])
+ ci_hdrc_otg_destroy(ci);
+deinit_gadget:
+ ci_hdrc_gadget_destroy(ci);
+deinit_host:
+ ci_hdrc_host_destroy(ci);
deinit_phy:
ci_usb_phy_exit(ci);
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index c3f97972f61a..dbc9b1a41d67 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -157,7 +157,9 @@ static const unsigned short full_speed_maxpacket_maxes[4] = {
static const unsigned short high_speed_maxpacket_maxes[4] = {
[USB_ENDPOINT_XFER_CONTROL] = 64,
[USB_ENDPOINT_XFER_ISOC] = 1024,
- [USB_ENDPOINT_XFER_BULK] = 512,
+
+ /* Bulk should be 512, but some devices use 1024: we will warn below */
+ [USB_ENDPOINT_XFER_BULK] = 1024,
[USB_ENDPOINT_XFER_INT] = 1024,
};
static const unsigned short super_speed_maxpacket_maxes[4] = {
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index ee33c0d796b5..55322469084d 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -1495,9 +1495,10 @@ int usb_resume(struct device *dev, pm_message_t msg)
* Some buses would like to keep their devices in suspend
* state after system resume. Their resume happen when
* a remote wakeup is detected or interface driver start
- * I/O.
+ * I/O. And in the case when the system is restoring from
+ * hibernation, make sure all the devices are resumed.
*/
- if (udev->bus->skip_resume)
+ if (udev->bus->skip_resume && msg.event != PM_EVENT_RESTORE)
return 0;
/* For all calls, take the device back to full power and
diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c
index 72d1109f13eb..619e5446cbe8 100644
--- a/drivers/usb/core/generic.c
+++ b/drivers/usb/core/generic.c
@@ -242,8 +242,13 @@ static int generic_suspend(struct usb_device *udev, pm_message_t msg)
if (!udev->parent)
rc = hcd_bus_suspend(udev, msg);
- /* Non-root devices don't need to do anything for FREEZE or PRETHAW */
- else if (msg.event == PM_EVENT_FREEZE || msg.event == PM_EVENT_PRETHAW)
+ /*
+ * Non-root USB2 devices don't need to do anything for FREEZE
+ * or PRETHAW. USB3 devices don't support global suspend and
+ * needs to be selectively suspended.
+ */
+ else if ((msg.event == PM_EVENT_FREEZE || msg.event == PM_EVENT_PRETHAW)
+ && (udev->speed < USB_SPEED_SUPER))
rc = 0;
else
rc = usb_port_suspend(udev, msg);
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 592f45e6dbac..8d732e9f74fa 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -2242,25 +2242,26 @@ usb_hcd_get_sec_event_ring_dma_addr(struct usb_device *udev,
}
dma_addr_t
-usb_hcd_get_xfer_ring_dma_addr(struct usb_device *udev,
- struct usb_host_endpoint *ep)
+usb_hcd_get_dcba_dma_addr(struct usb_device *udev)
{
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
if (!HCD_RH_RUNNING(hcd))
return 0;
- return hcd->driver->get_xfer_ring_dma_addr(hcd, udev, ep);
+ return hcd->driver->get_dcba_dma_addr(hcd, udev);
}
-int usb_hcd_get_controller_id(struct usb_device *udev)
+dma_addr_t
+usb_hcd_get_xfer_ring_dma_addr(struct usb_device *udev,
+ struct usb_host_endpoint *ep)
{
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
if (!HCD_RH_RUNNING(hcd))
- return -EINVAL;
+ return 0;
- return hcd->driver->get_core_id(hcd);
+ return hcd->driver->get_xfer_ring_dma_addr(hcd, udev, ep);
}
#ifdef CONFIG_PM
@@ -2395,6 +2396,7 @@ void usb_hcd_resume_root_hub (struct usb_hcd *hcd)
spin_lock_irqsave (&hcd_root_hub_lock, flags);
if (hcd->rh_registered) {
+ pm_wakeup_event(&hcd->self.root_hub->dev, 0);
set_bit(HCD_FLAG_WAKEUP_PENDING, &hcd->flags);
queue_work(pm_wq, &hcd->wakeup_work);
}
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 5df314dd5f3c..cc7ab666d650 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -643,12 +643,17 @@ void usb_wakeup_notification(struct usb_device *hdev,
unsigned int portnum)
{
struct usb_hub *hub;
+ struct usb_port *port_dev;
if (!hdev)
return;
hub = usb_hub_to_struct_hub(hdev);
if (hub) {
+ port_dev = hub->ports[portnum - 1];
+ if (port_dev && port_dev->child)
+ pm_wakeup_event(&port_dev->child->dev, 0);
+
set_bit(portnum, hub->wakeup_bits);
kick_hub_wq(hub);
}
@@ -3372,8 +3377,11 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
/* Skip the initial Clear-Suspend step for a remote wakeup */
status = hub_port_status(hub, port1, &portstatus, &portchange);
- if (status == 0 && !port_is_suspended(hub, portstatus))
+ if (status == 0 && !port_is_suspended(hub, portstatus)) {
+ if (portchange & USB_PORT_STAT_C_SUSPEND)
+ pm_wakeup_event(&udev->dev, 0);
goto SuspendCleared;
+ }
/* see 7.1.7.7; affects power usage, but not budgeting */
if (hub_is_superspeed(hub->hdev))
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 8e641b5893ed..29adabdb305f 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -147,6 +147,10 @@ int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
ret = usb_internal_control_msg(dev, pipe, dr, data, size, timeout);
+ /* Linger a bit, prior to the next control message. */
+ if (dev->quirks & USB_QUIRK_DELAY_CTRL_MSG)
+ msleep(200);
+
kfree(dr);
return ret;
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index c05c4f877750..40ce175655e6 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -45,6 +45,9 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x03f0, 0x0701), .driver_info =
USB_QUIRK_STRING_FETCH_255 },
+ /* HP v222w 16GB Mini USB Drive */
+ { USB_DEVICE(0x03f0, 0x3f40), .driver_info = USB_QUIRK_DELAY_INIT },
+
/* Creative SB Audigy 2 NX */
{ USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME },
@@ -225,8 +228,12 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x1a0a, 0x0200), .driver_info =
USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
+ /* Corsair K70 RGB */
+ { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT },
+
/* Corsair Strafe RGB */
- { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT },
+ { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT |
+ USB_QUIRK_DELAY_CTRL_MSG },
/* Corsair K70 LUX */
{ USB_DEVICE(0x1b1c, 0x1b36), .driver_info = USB_QUIRK_DELAY_INIT },
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 65bf86f18a34..062677f8e91d 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -697,24 +697,25 @@ usb_get_sec_event_ring_dma_addr(struct usb_device *dev,
}
EXPORT_SYMBOL(usb_get_sec_event_ring_dma_addr);
-dma_addr_t usb_get_xfer_ring_dma_addr(struct usb_device *dev,
- struct usb_host_endpoint *ep)
+dma_addr_t
+usb_get_dcba_dma_addr(struct usb_device *dev)
{
if (dev->state == USB_STATE_NOTATTACHED)
return 0;
- return usb_hcd_get_xfer_ring_dma_addr(dev, ep);
+ return usb_hcd_get_dcba_dma_addr(dev);
}
-EXPORT_SYMBOL(usb_get_xfer_ring_dma_addr);
+EXPORT_SYMBOL(usb_get_dcba_dma_addr);
-int usb_get_controller_id(struct usb_device *dev)
+dma_addr_t usb_get_xfer_ring_dma_addr(struct usb_device *dev,
+ struct usb_host_endpoint *ep)
{
if (dev->state == USB_STATE_NOTATTACHED)
- return -EINVAL;
+ return 0;
- return usb_hcd_get_controller_id(dev);
+ return usb_hcd_get_xfer_ring_dma_addr(dev, ep);
}
-EXPORT_SYMBOL(usb_get_controller_id);
+EXPORT_SYMBOL(usb_get_xfer_ring_dma_addr);
/*-------------------------------------------------------------------*/
/*
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index 571c21727ff9..85fb6226770c 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -1402,8 +1402,12 @@ static void dwc2_conn_id_status_change(struct work_struct *work)
if (count > 250)
dev_err(hsotg->dev,
"Connection id status change timed out\n");
- hsotg->op_state = OTG_STATE_A_HOST;
+ spin_lock_irqsave(&hsotg->lock, flags);
+ dwc2_hsotg_disconnect(hsotg);
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+
+ hsotg->op_state = OTG_STATE_A_HOST;
/* Initialize the Core for Host mode */
dwc2_core_init(hsotg, false, -1);
dwc2_enable_global_interrupts(hsotg);
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 0744b14e120b..3191825710af 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -1435,8 +1435,26 @@ err_usb2phy_init:
return ret;
}
+static int dwc3_pm_restore(struct device *dev)
+{
+ /*
+ * Set the core as runtime active to prevent the runtime
+ * PM ops being called before the PM restore is completed.
+ */
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ return 0;
+}
+
static const struct dev_pm_ops dwc3_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(dwc3_suspend, dwc3_resume)
+ .suspend = dwc3_suspend,
+ .resume = dwc3_resume,
+ .freeze = dwc3_suspend,
+ .thaw = dwc3_pm_restore,
+ .poweroff = dwc3_suspend,
+ .restore = dwc3_pm_restore,
};
#define DWC3_PM_OPS &(dwc3_dev_pm_ops)
diff --git a/drivers/usb/dwc3/dwc3-keystone.c b/drivers/usb/dwc3/dwc3-keystone.c
index 2be268d2423d..03a926ebf34b 100644
--- a/drivers/usb/dwc3/dwc3-keystone.c
+++ b/drivers/usb/dwc3/dwc3-keystone.c
@@ -112,6 +112,10 @@ static int kdwc3_probe(struct platform_device *pdev)
dev->dma_mask = &kdwc3_dma_mask;
kdwc->clk = devm_clk_get(kdwc->dev, "usb");
+ if (IS_ERR(kdwc->clk)) {
+ dev_err(kdwc->dev, "unable to get usb clock\n");
+ return PTR_ERR(kdwc->clk);
+ }
error = clk_prepare_enable(kdwc->clk);
if (error < 0) {
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index 2cd600a58fd7..f94d0ba2f966 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -1908,6 +1908,18 @@ static void dwc3_msm_power_collapse_por(struct dwc3_msm *mdwc)
dwc3_core_init(dwc);
/* Re-configure event buffers */
dwc3_event_buffers_setup(dwc);
+
+ /* Get initial P3 status and enable IN_P3 event */
+ val = dwc3_msm_read_reg_field(mdwc->base,
+ DWC3_GDBGLTSSM, DWC3_GDBGLTSSM_LINKSTATE_MASK);
+ atomic_set(&mdwc->in_p3, val == DWC3_LINK_STATE_U3);
+ dwc3_msm_write_reg_field(mdwc->base, PWR_EVNT_IRQ_MASK_REG,
+ PWR_EVNT_POWERDOWN_IN_P3_MASK, 1);
+ if (mdwc->otg_state == OTG_STATE_A_HOST) {
+ dev_dbg(mdwc->dev, "%s: set the core in host mode\n",
+ __func__);
+ dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_HOST);
+ }
}
static int dwc3_msm_prepare_suspend(struct dwc3_msm *mdwc)
@@ -1993,7 +2005,7 @@ static void dwc3_set_phy_speed_flags(struct dwc3_msm *mdwc)
static void msm_dwc3_perf_vote_update(struct dwc3_msm *mdwc,
bool perf_mode);
-static int dwc3_msm_suspend(struct dwc3_msm *mdwc)
+static int dwc3_msm_suspend(struct dwc3_msm *mdwc, bool hibernation)
{
int ret, i;
struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
@@ -2115,7 +2127,8 @@ static int dwc3_msm_suspend(struct dwc3_msm *mdwc)
clk_disable_unprepare(mdwc->xo_clk);
/* Perform controller power collapse */
- if (!mdwc->in_host_mode && (!mdwc->vbus_active || mdwc->in_restart)) {
+ if ((!mdwc->in_host_mode && (!mdwc->vbus_active || mdwc->in_restart)) ||
+ hibernation) {
mdwc->lpm_flags |= MDWC3_POWER_COLLAPSE;
dev_dbg(mdwc->dev, "%s: power collapse\n", __func__);
dwc3_msm_config_gdsc(mdwc, 0);
@@ -2254,19 +2267,10 @@ static int dwc3_msm_resume(struct dwc3_msm *mdwc)
/* Recover from controller power collapse */
if (mdwc->lpm_flags & MDWC3_POWER_COLLAPSE) {
- u32 tmp;
-
dev_dbg(mdwc->dev, "%s: exit power collapse\n", __func__);
dwc3_msm_power_collapse_por(mdwc);
- /* Get initial P3 status and enable IN_P3 event */
- tmp = dwc3_msm_read_reg_field(mdwc->base,
- DWC3_GDBGLTSSM, DWC3_GDBGLTSSM_LINKSTATE_MASK);
- atomic_set(&mdwc->in_p3, tmp == DWC3_LINK_STATE_U3);
- dwc3_msm_write_reg_field(mdwc->base, PWR_EVNT_IRQ_MASK_REG,
- PWR_EVNT_POWERDOWN_IN_P3_MASK, 1);
-
mdwc->lpm_flags &= ~MDWC3_POWER_COLLAPSE;
}
@@ -3487,6 +3491,7 @@ static int dwc3_otg_start_host(struct dwc3_msm *mdwc, int on)
if (on) {
dev_dbg(mdwc->dev, "%s: turn on host\n", __func__);
+ pm_runtime_get_sync(mdwc->dev);
mdwc->hs_phy->flags |= PHY_HOST_MODE;
if (dwc->maximum_speed == USB_SPEED_SUPER) {
mdwc->ss_phy->flags |= PHY_HOST_MODE;
@@ -3495,7 +3500,6 @@ static int dwc3_otg_start_host(struct dwc3_msm *mdwc, int on)
}
usb_phy_notify_connect(mdwc->hs_phy, USB_SPEED_HIGH);
- pm_runtime_get_sync(mdwc->dev);
dbg_event(0xFF, "StrtHost gync",
atomic_read(&mdwc->dev->power.usage_count));
if (!IS_ERR(mdwc->vbus_reg))
@@ -4014,7 +4018,39 @@ static int dwc3_msm_pm_suspend(struct device *dev)
return -EBUSY;
}
- ret = dwc3_msm_suspend(mdwc);
+ ret = dwc3_msm_suspend(mdwc, false);
+ if (!ret)
+ atomic_set(&mdwc->pm_suspended, 1);
+
+ return ret;
+}
+
+static int dwc3_msm_pm_freeze(struct device *dev)
+{
+ int ret = 0;
+ struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "dwc3-msm PM freeze\n");
+ dbg_event(0xFF, "PM Freeze", 0);
+
+ flush_workqueue(mdwc->dwc3_wq);
+
+ /* Resume the core to make sure we can power collapse it */
+ ret = dwc3_msm_resume(mdwc);
+
+ /*
+ * PHYs also need to be power collapsed, so call the notify_disconnect
+ * before suspend to ensure it.
+ */
+ usb_phy_notify_disconnect(mdwc->hs_phy, USB_SPEED_HIGH);
+ if (mdwc->ss_phy->flags & PHY_HOST_MODE) {
+ usb_phy_notify_disconnect(mdwc->ss_phy, USB_SPEED_SUPER);
+ mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
+ }
+
+ mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
+
+ ret = dwc3_msm_suspend(mdwc, true);
if (!ret)
atomic_set(&mdwc->pm_suspended, 1);
@@ -4043,6 +4079,35 @@ static int dwc3_msm_pm_resume(struct device *dev)
return 0;
}
+
+static int dwc3_msm_pm_restore(struct device *dev)
+{
+ struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "dwc3-msm PM restore\n");
+ dbg_event(0xFF, "PM Restore", 0);
+
+ atomic_set(&mdwc->pm_suspended, 0);
+
+ dwc3_msm_resume(mdwc);
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ /* Restore PHY flags if hibernated in host mode */
+ if (mdwc->otg_state == OTG_STATE_A_HOST) {
+ mdwc->hs_phy->flags |= PHY_HOST_MODE;
+ if (mdwc->ss_phy) {
+ mdwc->ss_phy->flags |= PHY_HOST_MODE;
+ usb_phy_notify_connect(mdwc->ss_phy,
+ USB_SPEED_SUPER);
+ }
+
+ usb_phy_notify_connect(mdwc->hs_phy, USB_SPEED_HIGH);
+ }
+
+ return 0;
+}
#endif
#ifdef CONFIG_PM
@@ -4061,7 +4126,7 @@ static int dwc3_msm_runtime_suspend(struct device *dev)
dev_dbg(dev, "DWC3-msm runtime suspend\n");
dbg_event(0xFF, "RT Sus", 0);
- return dwc3_msm_suspend(mdwc);
+ return dwc3_msm_suspend(mdwc, false);
}
static int dwc3_msm_runtime_resume(struct device *dev)
@@ -4076,8 +4141,13 @@ static int dwc3_msm_runtime_resume(struct device *dev)
#endif
static const struct dev_pm_ops dwc3_msm_dev_pm_ops = {
- .prepare = dwc3_msm_pm_prepare,
- SET_SYSTEM_SLEEP_PM_OPS(dwc3_msm_pm_suspend, dwc3_msm_pm_resume)
+ .prepare = dwc3_msm_pm_prepare,
+ .suspend = dwc3_msm_pm_suspend,
+ .resume = dwc3_msm_pm_resume,
+ .freeze = dwc3_msm_pm_freeze,
+ .thaw = dwc3_msm_pm_restore,
+ .poweroff = dwc3_msm_pm_suspend,
+ .restore = dwc3_msm_pm_restore,
SET_RUNTIME_PM_OPS(dwc3_msm_runtime_suspend, dwc3_msm_runtime_resume,
dwc3_msm_runtime_idle)
};
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index d2c0c1a8d979..68230adf2449 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -167,7 +167,7 @@ static int dwc3_pci_probe(struct pci_dev *pci,
ret = platform_device_add_resources(dwc3, res, ARRAY_SIZE(res));
if (ret) {
dev_err(dev, "couldn't add resources to dwc3 device\n");
- return ret;
+ goto err;
}
pci_set_drvdata(pci, dwc3);
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index f108aecbfe52..f6117ac0e301 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -2984,6 +2984,8 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
break;
}
+ dwc->eps[1]->endpoint.maxpacket = dwc->gadget.ep0->maxpacket;
+
/* Enable USB2 LPM Capability */
if ((dwc->revision > DWC3_REVISION_194A)
diff --git a/drivers/usb/gadget/function/f_accessory.c b/drivers/usb/gadget/function/f_accessory.c
index a412f024d834..61dfceb336d6 100644
--- a/drivers/usb/gadget/function/f_accessory.c
+++ b/drivers/usb/gadget/function/f_accessory.c
@@ -887,6 +887,12 @@ int acc_ctrlrequest(struct usb_composite_dev *cdev,
u16 w_length = le16_to_cpu(ctrl->wLength);
unsigned long flags;
+ /*
+ * If instance is not created which is the case in power off charging
+ * mode, dev will be NULL. Hence return error if it is the case.
+ */
+ if (!dev)
+ return -ENODEV;
/*
* printk(KERN_INFO "acc_ctrlrequest "
* "%02x.%02x v%04x i%04x l%u\n",
diff --git a/drivers/usb/gadget/function/f_audio_source.c b/drivers/usb/gadget/function/f_audio_source.c
index ec73b67096c5..7d8bfe62b148 100644
--- a/drivers/usb/gadget/function/f_audio_source.c
+++ b/drivers/usb/gadget/function/f_audio_source.c
@@ -591,14 +591,38 @@ static int audio_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
pr_debug("audio_set_alt intf %d, alt %d\n", intf, alt);
+ if (!alt) {
+ usb_ep_disable(audio->in_ep);
+ return 0;
+ }
+
ret = config_ep_by_speed(cdev->gadget, f, audio->in_ep);
- if (ret)
+ if (ret) {
+ audio->in_ep->desc = NULL;
+ pr_err("config_ep fail for audio ep ret %d\n", ret);
+ return ret;
+ }
+ ret = usb_ep_enable(audio->in_ep);
+ if (ret) {
+ audio->in_ep->desc = NULL;
+ pr_err("failed to enable audio ret %d\n", ret);
return ret;
+ }
- usb_ep_enable(audio->in_ep);
return 0;
}
+/*
+ * Because the data interface supports multiple altsettings,
+ * this audio_source function *MUST* implement a get_alt() method.
+ */
+static int audio_get_alt(struct usb_function *f, unsigned int intf)
+{
+ struct audio_dev *audio = func_to_audio(f);
+
+ return audio->in_ep->enabled ? 1 : 0;
+}
+
static void audio_disable(struct usb_function *f)
{
struct audio_dev *audio = func_to_audio(f);
@@ -862,6 +886,7 @@ static struct audio_dev _audio_dev = {
.bind = audio_bind,
.unbind = audio_unbind,
.set_alt = audio_set_alt,
+ .get_alt = audio_get_alt,
.setup = audio_setup,
.disable = audio_disable,
.free_func = audio_free_func,
diff --git a/drivers/usb/gadget/function/f_cdev.c b/drivers/usb/gadget/function/f_cdev.c
index 34ec15ab9010..233221fed424 100644
--- a/drivers/usb/gadget/function/f_cdev.c
+++ b/drivers/usb/gadget/function/f_cdev.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2013-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011, 2013-2018, The Linux Foundation. All rights reserved.
* Linux Foundation chooses to take subject only to the GPLv2 license terms,
* and distributes only under these terms.
*
@@ -1251,6 +1251,7 @@ ssize_t f_cdev_write(struct file *file,
ret = -EFAULT;
} else {
req->length = xfer_size;
+ req->zero = 1;
ret = usb_ep_queue(in, req, GFP_KERNEL);
if (ret) {
pr_err("EP QUEUE failed:%d\n", ret);
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 7d7197e2cfc4..9edc01692142 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -68,18 +68,27 @@ __ffs_data_got_descs(struct ffs_data *ffs, char *data, size_t len);
static int __must_check
__ffs_data_got_strings(struct ffs_data *ffs, char *data, size_t len);
+static LIST_HEAD(inst_list);
+
/* ffs instance status */
-static DEFINE_MUTEX(ffs_ep_lock);
-static bool ffs_inst_exist;
-static struct f_fs_opts *g_opts;
+#define INST_NAME_SIZE 16
-/* Free instance structures */
-static void ffs_inst_clean(struct f_fs_opts *opts);
-static void ffs_inst_clean_delay(void);
-static int ffs_inst_exist_check(void);
+struct ffs_inst_status {
+ char inst_name[INST_NAME_SIZE];
+ struct list_head list;
+ struct mutex ffs_lock;
+ bool inst_exist;
+ struct f_fs_opts *opts;
+ struct ffs_data *ffs_data;
+};
-/* Global ffs_data pointer */
-static struct ffs_data *g_ffs_data;
+/* Free instance structures */
+static void ffs_inst_clean(struct f_fs_opts *opts,
+ const char *inst_name);
+static void ffs_inst_clean_delay(const char *inst_name);
+static int ffs_inst_exist_check(const char *inst_name);
+static struct ffs_inst_status *name_to_inst_status(
+ const char *inst_name, bool create_inst);
/* The function structure ***************************************************/
@@ -300,7 +309,7 @@ static ssize_t ffs_ep0_write(struct file *file, const char __user *buf,
ffs_log("enter:len %zu state %d setup_state %d flags %lu", len,
ffs->state, ffs->setup_state, ffs->flags);
- ret = ffs_inst_exist_check();
+ ret = ffs_inst_exist_check(ffs->dev_name);
if (ret < 0)
return ret;
@@ -490,7 +499,7 @@ static ssize_t ffs_ep0_read(struct file *file, char __user *buf,
ffs_log("enter:len %zu state %d setup_state %d flags %lu", len,
ffs->state, ffs->setup_state, ffs->flags);
- ret = ffs_inst_exist_check();
+ ret = ffs_inst_exist_check(ffs->dev_name);
if (ret < 0)
return ret;
@@ -601,7 +610,7 @@ static int ffs_ep0_open(struct inode *inode, struct file *file)
ffs_log("state %d setup_state %d flags %lu opened %d", ffs->state,
ffs->setup_state, ffs->flags, atomic_read(&ffs->opened));
- ret = ffs_inst_exist_check();
+ ret = ffs_inst_exist_check(ffs->dev_name);
if (ret < 0)
return ret;
@@ -643,7 +652,7 @@ static long ffs_ep0_ioctl(struct file *file, unsigned code, unsigned long value)
ffs_log("state %d setup_state %d flags %lu opened %d", ffs->state,
ffs->setup_state, ffs->flags, atomic_read(&ffs->opened));
- ret = ffs_inst_exist_check();
+ ret = ffs_inst_exist_check(ffs->dev_name);
if (ret < 0)
return ret;
@@ -668,7 +677,7 @@ static unsigned int ffs_ep0_poll(struct file *file, poll_table *wait)
ffs_log("enter:state %d setup_state %d flags %lu opened %d", ffs->state,
ffs->setup_state, ffs->flags, atomic_read(&ffs->opened));
- ret = ffs_inst_exist_check();
+ ret = ffs_inst_exist_check(ffs->dev_name);
if (ret < 0)
return ret;
@@ -799,6 +808,10 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
ffs_log("enter: epfile name %s epfile err %d (%s)", epfile->name,
atomic_read(&epfile->error), io_data->read ? "READ" : "WRITE");
+ ret = ffs_inst_exist_check(epfile->ffs->dev_name);
+ if (ret < 0)
+ return ret;
+
smp_mb__before_atomic();
retry:
if (atomic_read(&epfile->error))
@@ -1085,7 +1098,7 @@ ffs_epfile_open(struct inode *inode, struct file *file)
ffs_log("enter:state %d setup_state %d flag %lu", epfile->ffs->state,
epfile->ffs->setup_state, epfile->ffs->flags);
- ret = ffs_inst_exist_check();
+ ret = ffs_inst_exist_check(epfile->ffs->dev_name);
if (ret < 0)
return ret;
@@ -1143,16 +1156,11 @@ static ssize_t ffs_epfile_write_iter(struct kiocb *kiocb, struct iov_iter *from)
{
struct ffs_io_data io_data, *p = &io_data;
ssize_t res;
- int ret;
ENTER();
ffs_log("enter");
- ret = ffs_inst_exist_check();
- if (ret < 0)
- return ret;
-
if (!is_sync_kiocb(kiocb)) {
p = kmalloc(sizeof(io_data), GFP_KERNEL);
if (unlikely(!p))
@@ -1189,16 +1197,11 @@ static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to)
{
struct ffs_io_data io_data, *p = &io_data;
ssize_t res;
- int ret;
ENTER();
ffs_log("enter");
- ret = ffs_inst_exist_check();
- if (ret < 0)
- return ret;
-
if (!is_sync_kiocb(kiocb)) {
p = kmalloc(sizeof(io_data), GFP_KERNEL);
if (unlikely(!p))
@@ -1275,7 +1278,7 @@ static long ffs_epfile_ioctl(struct file *file, unsigned code,
ffs_log("enter:state %d setup_state %d flag %lu", epfile->ffs->state,
epfile->ffs->setup_state, epfile->ffs->flags);
- ret = ffs_inst_exist_check();
+ ret = ffs_inst_exist_check(epfile->ffs->dev_name);
if (ret < 0)
return ret;
@@ -1583,6 +1586,7 @@ ffs_fs_mount(struct file_system_type *t, int flags,
int ret;
void *ffs_dev;
struct ffs_data *ffs;
+ struct ffs_inst_status *inst_status;
ENTER();
@@ -1612,6 +1616,18 @@ ffs_fs_mount(struct file_system_type *t, int flags,
ffs->private_data = ffs_dev;
data.ffs_data = ffs;
+ inst_status = name_to_inst_status(ffs->dev_name, false);
+ if (IS_ERR(inst_status)) {
+ ffs_log("failed to find instance (%s)\n",
+ ffs->dev_name);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Store ffs to global status structure */
+ ffs_dev_lock();
+ inst_status->ffs_data = ffs;
+ ffs_dev_unlock();
+
rv = mount_nodev(t, flags, &data, ffs_sb_fill);
if (IS_ERR(rv) && data.ffs_data) {
ffs_release_dev(data.ffs_data);
@@ -1711,6 +1727,9 @@ static void ffs_data_opened(struct ffs_data *ffs)
static void ffs_data_put(struct ffs_data *ffs)
{
+ struct ffs_inst_status *inst_status;
+ const char *dev_name;
+
ENTER();
ffs_log("enter");
@@ -1718,16 +1737,20 @@ static void ffs_data_put(struct ffs_data *ffs)
smp_mb__before_atomic();
if (unlikely(atomic_dec_and_test(&ffs->ref))) {
pr_info("%s(): freeing\n", __func__);
- /* Clear g_ffs_data */
- ffs_dev_lock();
- g_ffs_data = NULL;
- ffs_dev_unlock();
+ /* Clear ffs from global structure */
+ inst_status = name_to_inst_status(ffs->dev_name, false);
+ if (!IS_ERR(inst_status)) {
+ ffs_dev_lock();
+ inst_status->ffs_data = NULL;
+ ffs_dev_unlock();
+ }
ffs_data_clear(ffs);
BUG_ON(waitqueue_active(&ffs->ev.waitq) ||
waitqueue_active(&ffs->ep0req_completion.wait));
- kfree(ffs->dev_name);
+ dev_name = ffs->dev_name;
kfree(ffs);
- ffs_inst_clean_delay();
+ ffs_inst_clean_delay(dev_name);
+ kfree(dev_name);
}
ffs_log("exit");
@@ -1792,11 +1815,6 @@ static struct ffs_data *ffs_data_new(void)
/* XXX REVISIT need to update it in some places, or do we? */
ffs->ev.can_stall = 1;
- /* Store ffs to g_ffs_data */
- ffs_dev_lock();
- g_ffs_data = ffs;
- ffs_dev_unlock();
-
ffs_log("exit");
return ffs;
@@ -3226,8 +3244,8 @@ static int _ffs_func_bind(struct usb_configuration *c,
struct ffs_data *ffs = func->ffs;
const int full = !!func->ffs->fs_descs_count;
- const int high = func->ffs->hs_descs_count;
- const int super = func->ffs->ss_descs_count;
+ const int high = !!func->ffs->hs_descs_count;
+ const int super = !!func->ffs->ss_descs_count;
int fs_len, hs_len, ss_len, ret, i;
struct ffs_ep *eps_ptr;
@@ -3684,79 +3702,146 @@ static struct config_item_type ffs_func_type = {
/* Function registration interface ******************************************/
-static int ffs_inst_exist_check(void)
+static struct ffs_inst_status *name_to_inst_status(
+ const char *inst_name, bool create_inst)
+{
+ struct ffs_inst_status *inst_status;
+
+ list_for_each_entry(inst_status, &inst_list, list) {
+ if (!strncasecmp(inst_status->inst_name,
+ inst_name, strlen(inst_name)))
+ return inst_status;
+ }
+
+ if (!create_inst)
+ return ERR_PTR(-ENODEV);
+
+ inst_status = kzalloc(sizeof(struct ffs_inst_status),
+ GFP_KERNEL);
+ if (!inst_status)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&inst_status->ffs_lock);
+ snprintf(inst_status->inst_name, INST_NAME_SIZE, inst_name);
+ list_add_tail(&inst_status->list, &inst_list);
+
+ return inst_status;
+}
+
+static int ffs_inst_exist_check(const char *inst_name)
{
- mutex_lock(&ffs_ep_lock);
+ struct ffs_inst_status *inst_status;
- if (unlikely(ffs_inst_exist == false)) {
- mutex_unlock(&ffs_ep_lock);
+ inst_status = name_to_inst_status(inst_name, false);
+ if (IS_ERR(inst_status)) {
pr_err_ratelimited(
- "%s: f_fs instance freed already.\n",
- __func__);
+ "%s: failed to find instance (%s)\n",
+ __func__, inst_name);
+ return -ENODEV;
+ }
+
+ mutex_lock(&inst_status->ffs_lock);
+
+ if (unlikely(inst_status->inst_exist == false)) {
+ mutex_unlock(&inst_status->ffs_lock);
+ pr_err_ratelimited(
+ "%s: f_fs instance (%s) has been freed already.\n",
+ __func__, inst_name);
return -ENODEV;
}
- mutex_unlock(&ffs_ep_lock);
+ mutex_unlock(&inst_status->ffs_lock);
return 0;
}
-static void ffs_inst_clean(struct f_fs_opts *opts)
+static void ffs_inst_clean(struct f_fs_opts *opts,
+ const char *inst_name)
{
- g_opts = NULL;
+ struct ffs_inst_status *inst_status;
+
+ inst_status = name_to_inst_status(inst_name, false);
+ if (IS_ERR(inst_status)) {
+ pr_err_ratelimited(
+ "%s: failed to find instance (%s)\n",
+ __func__, inst_name);
+ return;
+ }
+
+ inst_status->opts = NULL;
+
ffs_dev_lock();
_ffs_free_dev(opts->dev);
ffs_dev_unlock();
kfree(opts);
}
-static void ffs_inst_clean_delay(void)
+static void ffs_inst_clean_delay(const char *inst_name)
{
- mutex_lock(&ffs_ep_lock);
+ struct ffs_inst_status *inst_status;
- if (unlikely(ffs_inst_exist == false)) {
- if (g_opts) {
- ffs_inst_clean(g_opts);
+ inst_status = name_to_inst_status(inst_name, false);
+ if (IS_ERR(inst_status)) {
+ pr_err_ratelimited(
+ "%s: failed to find (%s) instance\n",
+ __func__, inst_name);
+ return;
+ }
+
+ mutex_lock(&inst_status->ffs_lock);
+
+ if (unlikely(inst_status->inst_exist == false)) {
+ if (inst_status->opts) {
+ ffs_inst_clean(inst_status->opts, inst_name);
pr_err_ratelimited("%s: Delayed free memory\n",
__func__);
}
- mutex_unlock(&ffs_ep_lock);
+ mutex_unlock(&inst_status->ffs_lock);
return;
}
- mutex_unlock(&ffs_ep_lock);
+ mutex_unlock(&inst_status->ffs_lock);
}
static void ffs_free_inst(struct usb_function_instance *f)
{
struct f_fs_opts *opts;
+ struct ffs_inst_status *inst_status;
opts = to_f_fs_opts(f);
- mutex_lock(&ffs_ep_lock);
+ inst_status = name_to_inst_status(opts->dev->name, false);
+ if (IS_ERR(inst_status)) {
+ ffs_log("failed to find (%s) instance\n",
+ opts->dev->name);
+ return;
+ }
+
+ mutex_lock(&inst_status->ffs_lock);
if (opts->dev->ffs_data
&& atomic_read(&opts->dev->ffs_data->opened)) {
- ffs_inst_exist = false;
- mutex_unlock(&ffs_ep_lock);
- ffs_log("%s: Dev is open, free mem when dev close\n",
- __func__);
+ inst_status->inst_exist = false;
+ mutex_unlock(&inst_status->ffs_lock);
+ ffs_log("Dev is open, free mem when dev (%s) close\n",
+ opts->dev->name);
return;
}
- ffs_inst_clean(opts);
- ffs_inst_exist = false;
- g_opts = NULL;
- mutex_unlock(&ffs_ep_lock);
+ ffs_inst_clean(opts, opts->dev->name);
+ inst_status->inst_exist = false;
+ mutex_unlock(&inst_status->ffs_lock);
}
#define MAX_INST_NAME_LEN 40
static int ffs_set_inst_name(struct usb_function_instance *fi, const char *name)
{
- struct f_fs_opts *opts;
+ struct f_fs_opts *opts, *opts_prev;
+ struct ffs_data *ffs_data_tmp;
char *ptr;
const char *tmp;
int name_len, ret;
+ struct ffs_inst_status *inst_status;
name_len = strlen(name) + 1;
if (name_len > MAX_INST_NAME_LEN)
@@ -3766,13 +3851,22 @@ static int ffs_set_inst_name(struct usb_function_instance *fi, const char *name)
if (!ptr)
return -ENOMEM;
- mutex_lock(&ffs_ep_lock);
- if (g_opts) {
- mutex_unlock(&ffs_ep_lock);
- ffs_log("%s: prev inst do not freed yet\n", __func__);
+ inst_status = name_to_inst_status(ptr, true);
+ if (IS_ERR(inst_status)) {
+ ffs_log("failed to create status struct for (%s) instance\n",
+ ptr);
+ return -EINVAL;
+ }
+
+ mutex_lock(&inst_status->ffs_lock);
+ opts_prev = inst_status->opts;
+ if (opts_prev) {
+ mutex_unlock(&inst_status->ffs_lock);
+ ffs_log("instance (%s): prev inst do not freed yet\n",
+ inst_status->inst_name);
return -EBUSY;
}
- mutex_unlock(&ffs_ep_lock);
+ mutex_unlock(&inst_status->ffs_lock);
opts = to_f_fs_opts(fi);
tmp = NULL;
@@ -3794,8 +3888,9 @@ static int ffs_set_inst_name(struct usb_function_instance *fi, const char *name)
* ffs_private_data also need to update new allocated opts->dev
* address.
*/
- if (g_ffs_data)
- opts->dev->ffs_data = g_ffs_data;
+ ffs_data_tmp = inst_status->ffs_data;
+ if (ffs_data_tmp)
+ opts->dev->ffs_data = ffs_data_tmp;
if (opts->dev->ffs_data)
opts->dev->ffs_data->private_data = opts->dev;
@@ -3804,10 +3899,10 @@ static int ffs_set_inst_name(struct usb_function_instance *fi, const char *name)
kfree(tmp);
- mutex_lock(&ffs_ep_lock);
- ffs_inst_exist = true;
- g_opts = opts;
- mutex_unlock(&ffs_ep_lock);
+ mutex_lock(&inst_status->ffs_lock);
+ inst_status->inst_exist = true;
+ inst_status->opts = opts;
+ mutex_unlock(&inst_status->ffs_lock);
return 0;
}
@@ -4212,6 +4307,20 @@ module_init(ffs_init);
static void __exit ffs_exit(void)
{
+ struct ffs_inst_status *inst_status, *inst_status_tmp = NULL;
+
+ list_for_each_entry(inst_status, &inst_list, list) {
+ if (inst_status_tmp) {
+ list_del(&inst_status_tmp->list);
+ kfree(inst_status_tmp);
+ }
+ inst_status_tmp = inst_status;
+ }
+ if (inst_status_tmp) {
+ list_del(&inst_status_tmp->list);
+ kfree(inst_status_tmp);
+ }
+
if (ffs_ipc_log) {
ipc_log_context_destroy(ffs_ipc_log);
ffs_ipc_log = NULL;
diff --git a/drivers/usb/gadget/function/f_gsi.c b/drivers/usb/gadget/function/f_gsi.c
index 67f7e75a9219..ff61879767b3 100644
--- a/drivers/usb/gadget/function/f_gsi.c
+++ b/drivers/usb/gadget/function/f_gsi.c
@@ -49,6 +49,7 @@ static struct gsi_inst_status {
/* Deregister misc device and free instance structures */
static void gsi_inst_clean(struct gsi_opts *opts);
+static void gsi_rndis_ipa_reset_trigger(struct gsi_data_port *d_port);
static void ipa_disconnect_handler(struct gsi_data_port *d_port);
static int gsi_ctrl_send_notification(struct f_gsi *gsi);
static int gsi_alloc_trb_buffer(struct f_gsi *gsi);
@@ -501,14 +502,11 @@ static void ipa_disconnect_handler(struct gsi_data_port *d_port)
*/
usb_gsi_ep_op(d_port->in_ep, (void *)&block_db,
GSI_EP_OP_SET_CLR_BLOCK_DBL);
- gsi->in_ep_desc_backup = gsi->d_port.in_ep->desc;
usb_gsi_ep_op(gsi->d_port.in_ep, NULL, GSI_EP_OP_DISABLE);
}
- if (gsi->d_port.out_ep) {
- gsi->out_ep_desc_backup = gsi->d_port.out_ep->desc;
+ if (gsi->d_port.out_ep)
usb_gsi_ep_op(gsi->d_port.out_ep, NULL, GSI_EP_OP_DISABLE);
- }
gsi->d_port.net_ready_trigger = false;
}
@@ -617,6 +615,7 @@ static void ipa_work_handler(struct work_struct *w)
struct usb_gadget *gadget = gsi->gadget;
struct device *dev;
struct device *gad_dev;
+ bool block_db;
event = read_event(d_port);
@@ -679,28 +678,6 @@ static void ipa_work_handler(struct work_struct *w)
break;
}
- /*
- * Update desc and reconfigure USB GSI OUT and IN
- * endpoint for RNDIS Adaptor enable case.
- */
- if (d_port->out_ep && !d_port->out_ep->desc &&
- gsi->out_ep_desc_backup) {
- d_port->out_ep->desc = gsi->out_ep_desc_backup;
- d_port->out_ep->ep_intr_num = 1;
- log_event_dbg("%s: OUT ep_op_config", __func__);
- usb_gsi_ep_op(d_port->out_ep,
- &d_port->out_request, GSI_EP_OP_CONFIG);
- }
-
- if (d_port->in_ep && !d_port->in_ep->desc &&
- gsi->in_ep_desc_backup) {
- d_port->in_ep->desc = gsi->in_ep_desc_backup;
- d_port->in_ep->ep_intr_num = 2;
- log_event_dbg("%s: IN ep_op_config", __func__);
- usb_gsi_ep_op(d_port->in_ep,
- &d_port->in_request, GSI_EP_OP_CONFIG);
- }
-
ipa_connect_channels(d_port);
ipa_data_path_enable(d_port);
d_port->sm_state = STATE_CONNECTED;
@@ -762,7 +739,15 @@ static void ipa_work_handler(struct work_struct *w)
if (event == EVT_HOST_NRDY) {
log_event_dbg("%s: ST_CON_HOST_NRDY\n",
__func__);
- ipa_disconnect_handler(d_port);
+ block_db = true;
+ /* stop USB ringing doorbell to GSI(OUT_EP) */
+ usb_gsi_ep_op(d_port->in_ep, (void *)&block_db,
+ GSI_EP_OP_SET_CLR_BLOCK_DBL);
+ gsi_rndis_ipa_reset_trigger(d_port);
+ usb_gsi_ep_op(d_port->in_ep, NULL,
+ GSI_EP_OP_ENDXFER);
+ usb_gsi_ep_op(d_port->out_ep, NULL,
+ GSI_EP_OP_ENDXFER);
}
ipa_disconnect_work_handler(d_port);
@@ -1471,6 +1456,27 @@ static void gsi_rndis_open(struct f_gsi *rndis)
rndis_signal_connect(rndis->params);
}
+static void gsi_rndis_ipa_reset_trigger(struct gsi_data_port *d_port)
+{
+ struct f_gsi *rndis = d_port_to_gsi(d_port);
+ unsigned long flags;
+
+ if (!rndis) {
+ log_event_err("%s: gsi prot ctx is %pK", __func__, rndis);
+ return;
+ }
+
+ spin_lock_irqsave(&rndis->d_port.lock, flags);
+ if (!rndis) {
+ log_event_err("%s: No RNDIS instance", __func__);
+ spin_unlock_irqrestore(&rndis->d_port.lock, flags);
+ return;
+ }
+
+ rndis->d_port.net_ready_trigger = false;
+ spin_unlock_irqrestore(&rndis->d_port.lock, flags);
+}
+
void gsi_rndis_flow_ctrl_enable(bool enable, struct rndis_params *param)
{
struct f_gsi *rndis = param->v;
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index ee579ba2b59e..a5dae5bb62ab 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -223,6 +223,13 @@ static ssize_t f_hidg_read(struct file *file, char __user *buffer,
/* pick the first one */
list = list_first_entry(&hidg->completed_out_req,
struct f_hidg_req_list, list);
+
+ /*
+ * Remove this from list to protect it from beign free()
+ * while host disables our function
+ */
+ list_del(&list->list);
+
req = list->req;
count = min_t(unsigned int, count, req->actual - list->pos);
spin_unlock_irqrestore(&hidg->spinlock, flags);
@@ -238,15 +245,20 @@ static ssize_t f_hidg_read(struct file *file, char __user *buffer,
* call, taking into account its current read position.
*/
if (list->pos == req->actual) {
- spin_lock_irqsave(&hidg->spinlock, flags);
- list_del(&list->list);
kfree(list);
- spin_unlock_irqrestore(&hidg->spinlock, flags);
req->length = hidg->report_length;
ret = usb_ep_queue(hidg->out_ep, req, GFP_KERNEL);
- if (ret < 0)
+ if (ret < 0) {
+ free_ep_req(hidg->out_ep, req);
return ret;
+ }
+ } else {
+ spin_lock_irqsave(&hidg->spinlock, flags);
+ list_add(&list->list, &hidg->completed_out_req);
+ spin_unlock_irqrestore(&hidg->spinlock, flags);
+
+ wake_up(&hidg->read_queue);
}
return count;
@@ -490,14 +502,18 @@ static void hidg_disable(struct usb_function *f)
{
struct f_hidg *hidg = func_to_hidg(f);
struct f_hidg_req_list *list, *next;
+ unsigned long flags;
usb_ep_disable(hidg->in_ep);
usb_ep_disable(hidg->out_ep);
+ spin_lock_irqsave(&hidg->spinlock, flags);
list_for_each_entry_safe(list, next, &hidg->completed_out_req, list) {
+ free_ep_req(hidg->out_ep, list->req);
list_del(&list->list);
kfree(list);
}
+ spin_unlock_irqrestore(&hidg->spinlock, flags);
}
static int hidg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
index 79f554f1fb23..1fcdbdc35cd1 100644
--- a/drivers/usb/gadget/function/f_midi.c
+++ b/drivers/usb/gadget/function/f_midi.c
@@ -210,12 +210,6 @@ static inline struct usb_request *midi_alloc_ep_req(struct usb_ep *ep,
return alloc_ep_req(ep, length, length);
}
-static void free_ep_req(struct usb_ep *ep, struct usb_request *req)
-{
- kfree(req->buf);
- usb_ep_free_request(ep, req);
-}
-
static const uint8_t f_midi_cin_length[] = {
0, 0, 2, 3, 3, 1, 2, 3, 3, 3, 3, 3, 2, 2, 3, 1
};
diff --git a/drivers/usb/gadget/function/f_qdss.c b/drivers/usb/gadget/function/f_qdss.c
index afb3d3a03253..2c416472e279 100644
--- a/drivers/usb/gadget/function/f_qdss.c
+++ b/drivers/usb/gadget/function/f_qdss.c
@@ -1,7 +1,7 @@
/*
* f_qdss.c -- QDSS function Driver
*
- * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1162,7 +1162,7 @@ static struct usb_function *qdss_alloc(struct usb_function_instance *fi)
return &usb_qdss->port.function;
}
-DECLARE_USB_FUNCTION_INIT(qdss, qdss_alloc_inst, qdss_alloc);
+DECLARE_USB_FUNCTION(qdss, qdss_alloc_inst, qdss_alloc);
static int __init usb_qdss_init(void)
{
int ret;
diff --git a/drivers/usb/gadget/function/f_sourcesink.c b/drivers/usb/gadget/function/f_sourcesink.c
index 9f3ced62d916..67b243989938 100644
--- a/drivers/usb/gadget/function/f_sourcesink.c
+++ b/drivers/usb/gadget/function/f_sourcesink.c
@@ -303,12 +303,6 @@ static inline struct usb_request *ss_alloc_ep_req(struct usb_ep *ep, int len)
return alloc_ep_req(ep, len, ss->buflen);
}
-void free_ep_req(struct usb_ep *ep, struct usb_request *req)
-{
- kfree(req->buf);
- usb_ep_free_request(ep, req);
-}
-
static void disable_ep(struct usb_composite_dev *cdev, struct usb_ep *ep)
{
int value;
diff --git a/drivers/usb/gadget/function/g_zero.h b/drivers/usb/gadget/function/g_zero.h
index 15f180904f8a..5ed90b437f18 100644
--- a/drivers/usb/gadget/function/g_zero.h
+++ b/drivers/usb/gadget/function/g_zero.h
@@ -59,7 +59,6 @@ void lb_modexit(void);
int lb_modinit(void);
/* common utilities */
-void free_ep_req(struct usb_ep *ep, struct usb_request *req);
void disable_endpoints(struct usb_composite_dev *cdev,
struct usb_ep *in, struct usb_ep *out,
struct usb_ep *iso_in, struct usb_ep *iso_out);
diff --git a/drivers/usb/gadget/function/uvc_configfs.c b/drivers/usb/gadget/function/uvc_configfs.c
index ad8c9b05572d..01656f1c6d65 100644
--- a/drivers/usb/gadget/function/uvc_configfs.c
+++ b/drivers/usb/gadget/function/uvc_configfs.c
@@ -2202,7 +2202,7 @@ static struct configfs_item_operations uvc_item_ops = {
.release = uvc_attr_release,
};
-#define UVCG_OPTS_ATTR(cname, conv, str2u, uxx, vnoc, limit) \
+#define UVCG_OPTS_ATTR(cname, aname, conv, str2u, uxx, vnoc, limit) \
static ssize_t f_uvc_opts_##cname##_show( \
struct config_item *item, char *page) \
{ \
@@ -2245,16 +2245,16 @@ end: \
return ret; \
} \
\
-UVC_ATTR(f_uvc_opts_, cname, aname)
+UVC_ATTR(f_uvc_opts_, cname, cname)
#define identity_conv(x) (x)
-UVCG_OPTS_ATTR(streaming_interval, identity_conv, kstrtou8, u8, identity_conv,
- 16);
-UVCG_OPTS_ATTR(streaming_maxpacket, le16_to_cpu, kstrtou16, u16, le16_to_cpu,
- 3072);
-UVCG_OPTS_ATTR(streaming_maxburst, identity_conv, kstrtou8, u8, identity_conv,
- 15);
+UVCG_OPTS_ATTR(streaming_interval, streaming_interval, identity_conv,
+ kstrtou8, u8, identity_conv, 16);
+UVCG_OPTS_ATTR(streaming_maxpacket, streaming_maxpacket, le16_to_cpu,
+ kstrtou16, u16, le16_to_cpu, 3072);
+UVCG_OPTS_ATTR(streaming_maxburst, streaming_maxburst, identity_conv,
+ kstrtou8, u8, identity_conv, 15);
#undef identity_conv
diff --git a/drivers/usb/gadget/u_f.c b/drivers/usb/gadget/u_f.c
index c6276f0268ae..907f8144813c 100644
--- a/drivers/usb/gadget/u_f.c
+++ b/drivers/usb/gadget/u_f.c
@@ -11,16 +11,18 @@
* published by the Free Software Foundation.
*/
-#include <linux/usb/gadget.h>
#include "u_f.h"
+#include <linux/usb/ch9.h>
-struct usb_request *alloc_ep_req(struct usb_ep *ep, int len, int default_len)
+struct usb_request *alloc_ep_req(struct usb_ep *ep, size_t len, int default_len)
{
struct usb_request *req;
req = usb_ep_alloc_request(ep, GFP_ATOMIC);
if (req) {
req->length = len ?: default_len;
+ if (usb_endpoint_dir_out(ep->desc))
+ req->length = usb_ep_align(ep, req->length);
req->buf = kmalloc(req->length, GFP_ATOMIC);
if (!req->buf) {
usb_ep_free_request(ep, req);
diff --git a/drivers/usb/gadget/u_f.h b/drivers/usb/gadget/u_f.h
index 1d5f0eb68552..69a1d10df04f 100644
--- a/drivers/usb/gadget/u_f.h
+++ b/drivers/usb/gadget/u_f.h
@@ -16,6 +16,8 @@
#ifndef __U_F_H__
#define __U_F_H__
+#include <linux/usb/gadget.h>
+
/* Variable Length Array Macros **********************************************/
#define vla_group(groupname) size_t groupname##__next = 0
#define vla_group_size(groupname) groupname##__next
@@ -45,8 +47,26 @@
struct usb_ep;
struct usb_request;
-struct usb_request *alloc_ep_req(struct usb_ep *ep, int len, int default_len);
-
-#endif /* __U_F_H__ */
+/**
+ * alloc_ep_req - returns a usb_request allocated by the gadget driver and
+ * allocates the request's buffer.
+ *
+ * @ep: the endpoint to allocate a usb_request
+ * @len: usb_requests's buffer suggested size
+ * @default_len: used if @len is not provided, ie, is 0
+ *
+ * In case @ep direction is OUT, the @len will be aligned to ep's
+ * wMaxPacketSize. In order to avoid memory leaks or drops, *always* use
+ * usb_requests's length (req->length) to refer to the allocated buffer size.
+ * Requests allocated via alloc_ep_req() *must* be freed by free_ep_req().
+ */
+struct usb_request *alloc_ep_req(struct usb_ep *ep, size_t len, int default_len);
+/* Frees a usb_request previously allocated by alloc_ep_req() */
+static inline void free_ep_req(struct usb_ep *ep, struct usb_request *req)
+{
+ kfree(req->buf);
+ usb_ep_free_request(ep, req);
+}
+#endif /* __U_F_H__ */
diff --git a/drivers/usb/gadget/udc/bdc/bdc_core.c b/drivers/usb/gadget/udc/bdc/bdc_core.c
index ccb9c213cc9f..e9bd8d4abca0 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_core.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_core.c
@@ -475,7 +475,7 @@ static int bdc_probe(struct platform_device *pdev)
bdc->dev = dev;
dev_dbg(bdc->dev, "bdc->regs: %p irq=%d\n", bdc->regs, bdc->irq);
- temp = bdc_readl(bdc->regs, BDC_BDCSC);
+ temp = bdc_readl(bdc->regs, BDC_BDCCAP1);
if ((temp & BDC_P64) &&
!dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) {
dev_dbg(bdc->dev, "Using 64-bit address\n");
diff --git a/drivers/usb/gadget/udc/bdc/bdc_pci.c b/drivers/usb/gadget/udc/bdc/bdc_pci.c
index 02968842b359..708e36f530d8 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_pci.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_pci.c
@@ -82,6 +82,7 @@ static int bdc_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
if (ret) {
dev_err(&pci->dev,
"couldn't add resources to bdc device\n");
+ platform_device_put(bdc);
return ret;
}
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index 8080a11947b7..eb876ed96861 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -2105,16 +2105,13 @@ static int dummy_hub_control(
}
break;
case USB_PORT_FEAT_POWER:
- if (hcd->speed == HCD_USB3) {
- if (dum_hcd->port_status & USB_PORT_STAT_POWER)
- dev_dbg(dummy_dev(dum_hcd),
- "power-off\n");
- } else
- if (dum_hcd->port_status &
- USB_SS_PORT_STAT_POWER)
- dev_dbg(dummy_dev(dum_hcd),
- "power-off\n");
- /* FALLS THROUGH */
+ dev_dbg(dummy_dev(dum_hcd), "power-off\n");
+ if (hcd->speed == HCD_USB3)
+ dum_hcd->port_status &= ~USB_SS_PORT_STAT_POWER;
+ else
+ dum_hcd->port_status &= ~USB_PORT_STAT_POWER;
+ set_link_state(dum_hcd);
+ break;
default:
dum_hcd->port_status &= ~(1 << wValue);
set_link_state(dum_hcd);
@@ -2285,14 +2282,13 @@ static int dummy_hub_control(
if ((dum_hcd->port_status &
USB_SS_PORT_STAT_POWER) != 0) {
dum_hcd->port_status |= (1 << wValue);
- set_link_state(dum_hcd);
}
} else
if ((dum_hcd->port_status &
USB_PORT_STAT_POWER) != 0) {
dum_hcd->port_status |= (1 << wValue);
- set_link_state(dum_hcd);
}
+ set_link_state(dum_hcd);
}
break;
case GetPortErrorCount:
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 3bb08870148f..95e72d75e0a0 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -220,6 +220,8 @@ config USB_EHCI_TEGRA
depends on ARCH_TEGRA
select USB_EHCI_ROOT_HUB_TT
select USB_PHY
+ select USB_ULPI
+ select USB_ULPI_VIEWPORT
help
This driver enables support for the internal USB Host Controllers
found in NVIDIA Tegra SoCs. The controllers are EHCI compliant.
diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c
index 4365dc36be8d..48200a89f7aa 100644
--- a/drivers/usb/host/ohci-q.c
+++ b/drivers/usb/host/ohci-q.c
@@ -1018,6 +1018,8 @@ skip_ed:
* have modified this list. normally it's just prepending
* entries (which we'd ignore), but paranoia won't hurt.
*/
+ *last = ed->ed_next;
+ ed->ed_next = NULL;
modified = 0;
/* unlink urbs as requested, but rescan the list after
@@ -1076,21 +1078,22 @@ rescan_this:
goto rescan_this;
/*
- * If no TDs are queued, take ED off the ed_rm_list.
+ * If no TDs are queued, ED is now idle.
* Otherwise, if the HC is running, reschedule.
- * If not, leave it on the list for further dequeues.
+ * If the HC isn't running, add ED back to the
+ * start of the list for later processing.
*/
if (list_empty(&ed->td_list)) {
- *last = ed->ed_next;
- ed->ed_next = NULL;
ed->state = ED_IDLE;
list_del(&ed->in_use_list);
} else if (ohci->rh_state == OHCI_RH_RUNNING) {
- *last = ed->ed_next;
- ed->ed_next = NULL;
ed_schedule(ohci, ed);
} else {
- last = &ed->ed_next;
+ ed->ed_next = ohci->ed_rm_list;
+ ohci->ed_rm_list = ed;
+ /* Don't loop on the same ED */
+ if (last == &ohci->ed_rm_list)
+ last = &ed->ed_next;
}
if (modified)
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index c9596f1a7d26..efe1924e0875 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -233,10 +233,6 @@ static int xhci_plat_probe(struct platform_device *pdev)
hcd_to_bus(xhci->shared_hcd)->skip_resume = true;
- if (device_property_read_u32(pdev->dev.parent, "usb-core-id",
- &xhci->core_id))
- xhci->core_id = -EINVAL;
-
hcd->usb_phy = devm_usb_get_phy_by_phandle(&pdev->dev, "usb-phy", 0);
if (IS_ERR(hcd->usb_phy)) {
ret = PTR_ERR(hcd->usb_phy);
@@ -353,7 +349,7 @@ static int xhci_plat_resume(struct device *dev)
dev_dbg(dev, "xhci-plat PM resume\n");
- return xhci_resume(xhci, false);
+ return (!hcd_to_bus(hcd)->skip_resume) ? xhci_resume(xhci, false) : 0;
}
#endif
@@ -374,6 +370,39 @@ static int xhci_plat_runtime_idle(struct device *dev)
return -EBUSY;
}
+static int xhci_plat_pm_freeze(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+ if (!xhci)
+ return 0;
+
+ dev_dbg(dev, "xhci-plat freeze\n");
+
+ return xhci_suspend(xhci, false);
+}
+
+static int xhci_plat_pm_restore(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ int ret;
+
+ if (!xhci)
+ return 0;
+
+ dev_dbg(dev, "xhci-plat restore\n");
+
+ ret = xhci_resume(xhci, true);
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ pm_runtime_mark_last_busy(dev);
+
+ return ret;
+}
+
static int xhci_plat_runtime_suspend(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
@@ -405,7 +434,11 @@ static int xhci_plat_runtime_resume(struct device *dev)
}
static const struct dev_pm_ops xhci_plat_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(xhci_plat_suspend, xhci_plat_resume)
+ .suspend = xhci_plat_suspend,
+ .resume = xhci_plat_resume,
+ .freeze = xhci_plat_pm_freeze,
+ .restore = xhci_plat_pm_restore,
+ .thaw = xhci_plat_pm_restore,
SET_RUNTIME_PM_OPS(xhci_plat_runtime_suspend, xhci_plat_runtime_resume,
xhci_plat_runtime_idle)
};
@@ -437,7 +470,6 @@ MODULE_DEVICE_TABLE(acpi, usb_xhci_acpi_match);
static struct platform_driver usb_xhci_driver = {
.probe = xhci_plat_probe,
.remove = xhci_plat_remove,
- .shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "xhci-hcd",
.pm = DEV_PM_OPS,
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 7e76573c8236..4954e22a421b 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -4999,6 +4999,17 @@ dma_addr_t xhci_get_sec_event_ring_dma_addr(struct usb_hcd *hcd,
return 0;
}
+static dma_addr_t xhci_get_dcba_dma_addr(struct usb_hcd *hcd,
+ struct usb_device *udev)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+ if (!(xhci->xhc_state & XHCI_STATE_HALTED) && xhci->dcbaa)
+ return xhci->dcbaa->dev_context_ptrs[udev->slot_id];
+
+ return 0;
+}
+
dma_addr_t xhci_get_xfer_ring_dma_addr(struct usb_hcd *hcd,
struct usb_device *udev, struct usb_host_endpoint *ep)
{
@@ -5024,13 +5035,6 @@ dma_addr_t xhci_get_xfer_ring_dma_addr(struct usb_hcd *hcd,
return 0;
}
-int xhci_get_core_id(struct usb_hcd *hcd)
-{
- struct xhci_hcd *xhci = hcd_to_xhci(hcd);
-
- return xhci->core_id;
-}
-
static const struct hc_driver xhci_hc_driver = {
.description = "xhci-hcd",
.product_desc = "xHCI Host Controller",
@@ -5094,7 +5098,7 @@ static const struct hc_driver xhci_hc_driver = {
.sec_event_ring_cleanup = xhci_sec_event_ring_cleanup,
.get_sec_event_ring_dma_addr = xhci_get_sec_event_ring_dma_addr,
.get_xfer_ring_dma_addr = xhci_get_xfer_ring_dma_addr,
- .get_core_id = xhci_get_core_id,
+ .get_dcba_dma_addr = xhci_get_dcba_dma_addr,
};
void xhci_init_driver(struct hc_driver *drv,
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index ac637dc6e3cc..c665806983be 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1519,8 +1519,6 @@ struct xhci_hcd {
/* secondary interrupter */
struct xhci_intr_reg __iomem **sec_ir_set;
- int core_id;
-
/* Cached register copies of read-only HC data */
__u32 hcs_params1;
__u32 hcs_params2;
@@ -1948,7 +1946,6 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,
char *buf, u16 wLength);
int xhci_hub_status_data(struct usb_hcd *hcd, char *buf);
int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1);
-int xhci_get_core_id(struct usb_hcd *hcd);
#ifdef CONFIG_PM
int xhci_bus_suspend(struct usb_hcd *hcd);
diff --git a/drivers/usb/misc/ehset.c b/drivers/usb/misc/ehset.c
index c31b4a33e6bb..0efcd485c02a 100644
--- a/drivers/usb/misc/ehset.c
+++ b/drivers/usb/misc/ehset.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2010-2013, 2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -26,69 +26,89 @@
#define TEST_SINGLE_STEP_GET_DEV_DESC 0x0107
#define TEST_SINGLE_STEP_SET_FEATURE 0x0108
-static int ehset_probe(struct usb_interface *intf,
- const struct usb_device_id *id)
+static u8 numPorts;
+
+static int ehset_get_port_num(struct device *dev, const char *buf,
+ unsigned long *val)
+{
+ int ret;
+
+ ret = kstrtoul(buf, 10, val);
+ if (ret < 0) {
+ dev_err(dev, "couldn't parse string %d\n", ret);
+ return ret;
+ }
+
+ if (!*val || *val > numPorts) {
+ dev_err(dev, "Invalid port num entered\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ehset_clear_port_feature(struct usb_device *udev, int feature,
+ int port1)
+{
+ return usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+ USB_REQ_CLEAR_FEATURE, USB_RT_PORT, feature, port1,
+ NULL, 0, 1000);
+}
+
+static int ehset_set_port_feature(struct usb_device *udev, int feature,
+ int port1, int timeout)
+{
+ return usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+ USB_REQ_SET_FEATURE, USB_RT_PORT, feature, port1,
+ NULL, 0, timeout);
+}
+
+static int ehset_set_testmode(struct device *dev, struct usb_device *child_udev,
+ struct usb_device *hub_udev, int test_id, int port)
{
- int ret = -EINVAL;
- struct usb_device *dev = interface_to_usbdev(intf);
- struct usb_device *hub_udev = dev->parent;
struct usb_device_descriptor *buf;
- u8 portnum = dev->portnum;
- u16 test_pid = le16_to_cpu(dev->descriptor.idProduct);
+ int ret = -EINVAL;
- switch (test_pid) {
+ switch (test_id) {
case TEST_SE0_NAK_PID:
- ret = usb_control_msg(hub_udev, usb_sndctrlpipe(hub_udev, 0),
- USB_REQ_SET_FEATURE, USB_RT_PORT,
- USB_PORT_FEAT_TEST,
- (TEST_SE0_NAK << 8) | portnum,
- NULL, 0, 1000);
+ ret = ehset_set_port_feature(hub_udev, USB_PORT_FEAT_TEST,
+ (TEST_SE0_NAK << 8) | port, 1000);
break;
case TEST_J_PID:
- ret = usb_control_msg(hub_udev, usb_sndctrlpipe(hub_udev, 0),
- USB_REQ_SET_FEATURE, USB_RT_PORT,
- USB_PORT_FEAT_TEST,
- (TEST_J << 8) | portnum,
- NULL, 0, 1000);
+ ret = ehset_set_port_feature(hub_udev, USB_PORT_FEAT_TEST,
+ (TEST_J << 8) | port, 1000);
break;
case TEST_K_PID:
- ret = usb_control_msg(hub_udev, usb_sndctrlpipe(hub_udev, 0),
- USB_REQ_SET_FEATURE, USB_RT_PORT,
- USB_PORT_FEAT_TEST,
- (TEST_K << 8) | portnum,
- NULL, 0, 1000);
+ ret = ehset_set_port_feature(hub_udev, USB_PORT_FEAT_TEST,
+ (TEST_K << 8) | port, 1000);
break;
case TEST_PACKET_PID:
- ret = usb_control_msg(hub_udev, usb_sndctrlpipe(hub_udev, 0),
- USB_REQ_SET_FEATURE, USB_RT_PORT,
- USB_PORT_FEAT_TEST,
- (TEST_PACKET << 8) | portnum,
- NULL, 0, 1000);
+ ret = ehset_set_port_feature(hub_udev, USB_PORT_FEAT_TEST,
+ (TEST_PACKET << 8) | port, 1000);
break;
case TEST_HS_HOST_PORT_SUSPEND_RESUME:
/* Test: wait for 15secs -> suspend -> 15secs delay -> resume */
msleep(15 * 1000);
- ret = usb_control_msg(hub_udev, usb_sndctrlpipe(hub_udev, 0),
- USB_REQ_SET_FEATURE, USB_RT_PORT,
- USB_PORT_FEAT_SUSPEND, portnum,
- NULL, 0, 1000);
- if (ret < 0)
+ ret = ehset_set_port_feature(hub_udev, USB_PORT_FEAT_SUSPEND,
+ port, 1000);
+ if (ret)
break;
msleep(15 * 1000);
- ret = usb_control_msg(hub_udev, usb_sndctrlpipe(hub_udev, 0),
- USB_REQ_CLEAR_FEATURE, USB_RT_PORT,
- USB_PORT_FEAT_SUSPEND, portnum,
- NULL, 0, 1000);
+ ret = ehset_clear_port_feature(hub_udev, USB_PORT_FEAT_SUSPEND,
+ port);
break;
case TEST_SINGLE_STEP_GET_DEV_DESC:
/* Test: wait for 15secs -> GetDescriptor request */
msleep(15 * 1000);
buf = kmalloc(USB_DT_DEVICE_SIZE, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
+ if (!buf) {
+ ret = -ENOMEM;
+ break;
+ }
- ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
+ ret = usb_control_msg(child_udev,
+ usb_rcvctrlpipe(child_udev, 0),
USB_REQ_GET_DESCRIPTOR, USB_DIR_IN,
USB_DT_DEVICE << 8, 0,
buf, USB_DT_DEVICE_SIZE,
@@ -103,28 +123,212 @@ static int ehset_probe(struct usb_interface *intf,
* SetPortFeature handling can only be done inside the HCD's
* hub_control callback function.
*/
- if (hub_udev != dev->bus->root_hub) {
- dev_err(&intf->dev, "SINGLE_STEP_SET_FEATURE test only supported on root hub\n");
+ if (hub_udev != child_udev->bus->root_hub) {
+ dev_err(dev, "SINGLE_STEP_SET_FEATURE test only supported on root hub\n");
break;
}
- ret = usb_control_msg(hub_udev, usb_sndctrlpipe(hub_udev, 0),
- USB_REQ_SET_FEATURE, USB_RT_PORT,
- USB_PORT_FEAT_TEST,
- (6 << 8) | portnum,
- NULL, 0, 60 * 1000);
+ ret = ehset_set_port_feature(hub_udev, USB_PORT_FEAT_TEST,
+ (6 << 8) | port, 60 * 1000);
break;
default:
- dev_err(&intf->dev, "%s: unsupported PID: 0x%x\n",
- __func__, test_pid);
+ dev_err(dev, "%s: unsupported test ID: 0x%x\n",
+ __func__, test_id);
+ }
+
+ return ret;
+}
+
+static ssize_t test_se0_nak_portnum_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct usb_interface *intf = to_usb_interface(dev);
+ struct usb_device *udev = interface_to_usbdev(intf);
+ unsigned long portnum;
+ int ret;
+
+ ret = ehset_get_port_num(dev, buf, &portnum);
+ if (ret)
+ return ret;
+
+ usb_lock_device(udev);
+ ret = ehset_set_testmode(dev, NULL, udev, TEST_SE0_NAK_PID, portnum);
+ usb_unlock_device(udev);
+ if (ret) {
+ dev_err(dev, "Error %d while SE0_NAK test\n", ret);
+ return ret;
}
+ return count;
+}
+static DEVICE_ATTR_WO(test_se0_nak_portnum);
+
+static ssize_t test_j_portnum_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct usb_interface *intf = to_usb_interface(dev);
+ struct usb_device *udev = interface_to_usbdev(intf);
+ unsigned long portnum;
+ int ret;
+
+ ret = ehset_get_port_num(dev, buf, &portnum);
+ if (ret)
+ return ret;
+
+ usb_lock_device(udev);
+ ret = ehset_set_testmode(dev, NULL, udev, TEST_J_PID, portnum);
+ usb_unlock_device(udev);
+ if (ret) {
+ dev_err(dev, "Error %d while J state test\n", ret);
+ return ret;
+ }
+
+ return count;
+}
+static DEVICE_ATTR_WO(test_j_portnum);
+
+static ssize_t test_k_portnum_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct usb_interface *intf = to_usb_interface(dev);
+ struct usb_device *udev = interface_to_usbdev(intf);
+ unsigned long portnum;
+ int ret;
+
+ ret = ehset_get_port_num(dev, buf, &portnum);
+ if (ret)
+ return ret;
+
+ usb_lock_device(udev);
+ ret = ehset_set_testmode(dev, NULL, udev, TEST_K_PID, portnum);
+ usb_unlock_device(udev);
+ if (ret) {
+ dev_err(dev, "Error %d while K state test\n", ret);
+ return ret;
+ }
+
+ return count;
+}
+static DEVICE_ATTR_WO(test_k_portnum);
+
+static ssize_t test_packet_portnum_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct usb_interface *intf = to_usb_interface(dev);
+ struct usb_device *udev = interface_to_usbdev(intf);
+ unsigned long portnum;
+ int ret;
+
+ ret = ehset_get_port_num(dev, buf, &portnum);
+ if (ret)
+ return ret;
+
+ usb_lock_device(udev);
+ ret = ehset_set_testmode(dev, NULL, udev, TEST_PACKET_PID, portnum);
+ usb_unlock_device(udev);
+ if (ret) {
+ dev_err(dev, "Error %d while sending test packets\n", ret);
+ return ret;
+ }
+
+ return count;
+}
+static DEVICE_ATTR_WO(test_packet_portnum);
+
+static ssize_t test_port_susp_resume_portnum_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct usb_interface *intf = to_usb_interface(dev);
+ struct usb_device *udev = interface_to_usbdev(intf);
+ unsigned long portnum;
+ int ret;
+
+ ret = ehset_get_port_num(dev, buf, &portnum);
+ if (ret)
+ return ret;
+
+ usb_lock_device(udev);
+ ret = ehset_set_testmode(dev, NULL, udev,
+ TEST_HS_HOST_PORT_SUSPEND_RESUME, portnum);
+ usb_unlock_device(udev);
+ if (ret) {
+ dev_err(dev, "Error %d while port suspend resume test\n", ret);
+ return ret;
+ }
+
+ return count;
+}
+static DEVICE_ATTR_WO(test_port_susp_resume_portnum);
+
+static struct attribute *ehset_attributes[] = {
+ &dev_attr_test_se0_nak_portnum.attr,
+ &dev_attr_test_j_portnum.attr,
+ &dev_attr_test_k_portnum.attr,
+ &dev_attr_test_packet_portnum.attr,
+ &dev_attr_test_port_susp_resume_portnum.attr,
+ NULL
+};
+
+static const struct attribute_group ehset_attr_group = {
+ .attrs = ehset_attributes,
+};
+
+static int ehset_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ int ret = -EINVAL;
+ struct usb_device *dev = interface_to_usbdev(intf);
+ struct usb_device *hub_udev = dev->parent;
+ u8 portnum = dev->portnum;
+ u16 test_pid = le16_to_cpu(dev->descriptor.idProduct);
+
+ /*
+ * If an external hub does not support the EHSET test fixture, then user
+ * can forcefully unbind the external hub from the hub driver (to which
+ * an external hub gets bound by default) and bind it to this driver, so
+ * as to send test signals on any downstream port of the hub.
+ */
+ if (dev->descriptor.bDeviceClass == USB_CLASS_HUB) {
+ struct usb_hub_descriptor *descriptor;
+
+ descriptor = kzalloc(sizeof(*descriptor), GFP_KERNEL);
+ if (!descriptor)
+ return -ENOMEM;
+
+ ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
+ USB_REQ_GET_DESCRIPTOR, USB_DIR_IN | USB_RT_HUB,
+ USB_DT_HUB << 8, 0, descriptor,
+ USB_DT_HUB_NONVAR_SIZE, USB_CTRL_GET_TIMEOUT);
+ if (ret < 0) {
+ dev_err(&intf->dev, "%s: Failed to get hub desc %d\n",
+ __func__, ret);
+ kfree(descriptor);
+ return ret;
+ }
+
+ numPorts = descriptor->bNbrPorts;
+ ret = sysfs_create_group(&intf->dev.kobj, &ehset_attr_group);
+ if (ret < 0)
+ dev_err(&intf->dev, "%s: Failed to create sysfs nodes %d\n",
+ __func__, ret);
+
+ kfree(descriptor);
+ return ret;
+ }
+
+ ret = ehset_set_testmode(&intf->dev, dev, hub_udev, test_pid, portnum);
+
return (ret < 0) ? ret : 0;
}
static void ehset_disconnect(struct usb_interface *intf)
{
+ struct usb_device *dev = interface_to_usbdev(intf);
+
+ numPorts = 0;
+ if (dev->descriptor.bDeviceClass == USB_CLASS_HUB)
+ sysfs_remove_group(&intf->dev.kobj, &ehset_attr_group);
}
static const struct usb_device_id ehset_id_table[] = {
diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c
index cce22ff1c2eb..e9113238d9e3 100644
--- a/drivers/usb/misc/ldusb.c
+++ b/drivers/usb/misc/ldusb.c
@@ -46,6 +46,9 @@
#define USB_DEVICE_ID_LD_MICROCASSYTIME 0x1033 /* USB Product ID of Micro-CASSY Time (reserved) */
#define USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE 0x1035 /* USB Product ID of Micro-CASSY Temperature */
#define USB_DEVICE_ID_LD_MICROCASSYPH 0x1038 /* USB Product ID of Micro-CASSY pH */
+#define USB_DEVICE_ID_LD_POWERANALYSERCASSY 0x1040 /* USB Product ID of Power Analyser CASSY */
+#define USB_DEVICE_ID_LD_CONVERTERCONTROLLERCASSY 0x1042 /* USB Product ID of Converter Controller CASSY */
+#define USB_DEVICE_ID_LD_MACHINETESTCASSY 0x1043 /* USB Product ID of Machine Test CASSY */
#define USB_DEVICE_ID_LD_JWM 0x1080 /* USB Product ID of Joule and Wattmeter */
#define USB_DEVICE_ID_LD_DMMP 0x1081 /* USB Product ID of Digital Multimeter P (reserved) */
#define USB_DEVICE_ID_LD_UMIP 0x1090 /* USB Product ID of UMI P */
@@ -88,6 +91,9 @@ static const struct usb_device_id ld_usb_table[] = {
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTIME) },
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE) },
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYPH) },
+ { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POWERANALYSERCASSY) },
+ { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CONVERTERCONTROLLERCASSY) },
+ { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MACHINETESTCASSY) },
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_JWM) },
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_DMMP) },
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIP) },
diff --git a/drivers/usb/mon/mon_text.c b/drivers/usb/mon/mon_text.c
index 060d78d53118..d0c7f4949f6f 100644
--- a/drivers/usb/mon/mon_text.c
+++ b/drivers/usb/mon/mon_text.c
@@ -82,6 +82,8 @@ struct mon_reader_text {
wait_queue_head_t wait;
int printf_size;
+ size_t printf_offset;
+ size_t printf_togo;
char *printf_buf;
struct mutex printf_lock;
@@ -373,73 +375,103 @@ err_alloc:
return rc;
}
-/*
- * For simplicity, we read one record in one system call and throw out
- * what does not fit. This means that the following does not work:
- * dd if=/dbg/usbmon/0t bs=10
- * Also, we do not allow seeks and do not bother advancing the offset.
- */
+static ssize_t mon_text_copy_to_user(struct mon_reader_text *rp,
+ char __user * const buf, const size_t nbytes)
+{
+ const size_t togo = min(nbytes, rp->printf_togo);
+
+ if (copy_to_user(buf, &rp->printf_buf[rp->printf_offset], togo))
+ return -EFAULT;
+ rp->printf_togo -= togo;
+ rp->printf_offset += togo;
+ return togo;
+}
+
+/* ppos is not advanced since the llseek operation is not permitted. */
static ssize_t mon_text_read_t(struct file *file, char __user *buf,
- size_t nbytes, loff_t *ppos)
+ size_t nbytes, loff_t *ppos)
{
struct mon_reader_text *rp = file->private_data;
struct mon_event_text *ep;
struct mon_text_ptr ptr;
+ ssize_t ret;
- if (IS_ERR(ep = mon_text_read_wait(rp, file)))
- return PTR_ERR(ep);
mutex_lock(&rp->printf_lock);
- ptr.cnt = 0;
- ptr.pbuf = rp->printf_buf;
- ptr.limit = rp->printf_size;
-
- mon_text_read_head_t(rp, &ptr, ep);
- mon_text_read_statset(rp, &ptr, ep);
- ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt,
- " %d", ep->length);
- mon_text_read_data(rp, &ptr, ep);
-
- if (copy_to_user(buf, rp->printf_buf, ptr.cnt))
- ptr.cnt = -EFAULT;
+
+ if (rp->printf_togo == 0) {
+
+ ep = mon_text_read_wait(rp, file);
+ if (IS_ERR(ep)) {
+ mutex_unlock(&rp->printf_lock);
+ return PTR_ERR(ep);
+ }
+ ptr.cnt = 0;
+ ptr.pbuf = rp->printf_buf;
+ ptr.limit = rp->printf_size;
+
+ mon_text_read_head_t(rp, &ptr, ep);
+ mon_text_read_statset(rp, &ptr, ep);
+ ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt,
+ " %d", ep->length);
+ mon_text_read_data(rp, &ptr, ep);
+
+ rp->printf_togo = ptr.cnt;
+ rp->printf_offset = 0;
+
+ kmem_cache_free(rp->e_slab, ep);
+ }
+
+ ret = mon_text_copy_to_user(rp, buf, nbytes);
mutex_unlock(&rp->printf_lock);
- kmem_cache_free(rp->e_slab, ep);
- return ptr.cnt;
+ return ret;
}
+/* ppos is not advanced since the llseek operation is not permitted. */
static ssize_t mon_text_read_u(struct file *file, char __user *buf,
- size_t nbytes, loff_t *ppos)
+ size_t nbytes, loff_t *ppos)
{
struct mon_reader_text *rp = file->private_data;
struct mon_event_text *ep;
struct mon_text_ptr ptr;
+ ssize_t ret;
- if (IS_ERR(ep = mon_text_read_wait(rp, file)))
- return PTR_ERR(ep);
mutex_lock(&rp->printf_lock);
- ptr.cnt = 0;
- ptr.pbuf = rp->printf_buf;
- ptr.limit = rp->printf_size;
- mon_text_read_head_u(rp, &ptr, ep);
- if (ep->type == 'E') {
- mon_text_read_statset(rp, &ptr, ep);
- } else if (ep->xfertype == USB_ENDPOINT_XFER_ISOC) {
- mon_text_read_isostat(rp, &ptr, ep);
- mon_text_read_isodesc(rp, &ptr, ep);
- } else if (ep->xfertype == USB_ENDPOINT_XFER_INT) {
- mon_text_read_intstat(rp, &ptr, ep);
- } else {
- mon_text_read_statset(rp, &ptr, ep);
+ if (rp->printf_togo == 0) {
+
+ ep = mon_text_read_wait(rp, file);
+ if (IS_ERR(ep)) {
+ mutex_unlock(&rp->printf_lock);
+ return PTR_ERR(ep);
+ }
+ ptr.cnt = 0;
+ ptr.pbuf = rp->printf_buf;
+ ptr.limit = rp->printf_size;
+
+ mon_text_read_head_u(rp, &ptr, ep);
+ if (ep->type == 'E') {
+ mon_text_read_statset(rp, &ptr, ep);
+ } else if (ep->xfertype == USB_ENDPOINT_XFER_ISOC) {
+ mon_text_read_isostat(rp, &ptr, ep);
+ mon_text_read_isodesc(rp, &ptr, ep);
+ } else if (ep->xfertype == USB_ENDPOINT_XFER_INT) {
+ mon_text_read_intstat(rp, &ptr, ep);
+ } else {
+ mon_text_read_statset(rp, &ptr, ep);
+ }
+ ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt,
+ " %d", ep->length);
+ mon_text_read_data(rp, &ptr, ep);
+
+ rp->printf_togo = ptr.cnt;
+ rp->printf_offset = 0;
+
+ kmem_cache_free(rp->e_slab, ep);
}
- ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt,
- " %d", ep->length);
- mon_text_read_data(rp, &ptr, ep);
- if (copy_to_user(buf, rp->printf_buf, ptr.cnt))
- ptr.cnt = -EFAULT;
+ ret = mon_text_copy_to_user(rp, buf, nbytes);
mutex_unlock(&rp->printf_lock);
- kmem_cache_free(rp->e_slab, ep);
- return ptr.cnt;
+ return ret;
}
static struct mon_event_text *mon_text_read_wait(struct mon_reader_text *rp,
diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c
index 10d30afe4a3c..a0d1417362cd 100644
--- a/drivers/usb/musb/musb_gadget_ep0.c
+++ b/drivers/usb/musb/musb_gadget_ep0.c
@@ -114,15 +114,19 @@ static int service_tx_status_request(
}
is_in = epnum & USB_DIR_IN;
- if (is_in) {
- epnum &= 0x0f;
+ epnum &= 0x0f;
+ if (epnum >= MUSB_C_NUM_EPS) {
+ handled = -EINVAL;
+ break;
+ }
+
+ if (is_in)
ep = &musb->endpoints[epnum].ep_in;
- } else {
+ else
ep = &musb->endpoints[epnum].ep_out;
- }
regs = musb->endpoints[epnum].regs;
- if (epnum >= MUSB_C_NUM_EPS || !ep->desc) {
+ if (!ep->desc) {
handled = -EINVAL;
break;
}
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 0d843e0f8055..494823f21c28 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -1048,7 +1048,9 @@ static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep,
/* set tx_reinit and schedule the next qh */
ep->tx_reinit = 1;
}
- musb_start_urb(musb, is_in, next_qh);
+
+ if (next_qh)
+ musb_start_urb(musb, is_in, next_qh);
}
}
diff --git a/drivers/usb/musb/ux500_dma.c b/drivers/usb/musb/ux500_dma.c
index d0b6a1cd7f62..c92a295049ad 100644
--- a/drivers/usb/musb/ux500_dma.c
+++ b/drivers/usb/musb/ux500_dma.c
@@ -207,9 +207,6 @@ static int ux500_dma_channel_program(struct dma_channel *channel,
BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN ||
channel->status == MUSB_DMA_STATUS_BUSY);
- if (!ux500_dma_is_compatible(channel, packet_sz, (void *)dma_addr, len))
- return false;
-
channel->status = MUSB_DMA_STATUS_BUSY;
channel->actual_len = 0;
ret = ux500_configure_channel(channel, packet_sz, mode, dma_addr, len);
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index 732fa214f608..e358fc8086f7 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -148,6 +148,7 @@ config USB_MSM_OTG
tristate "Qualcomm on-chip USB OTG controller support"
depends on (USB || USB_GADGET) && (ARCH_QCOM || COMPILE_TEST)
depends on RESET_CONTROLLER
+ depends on REGULATOR
depends on EXTCON
select USB_PHY
help
diff --git a/drivers/usb/phy/phy-msm-ssusb-qmp.c b/drivers/usb/phy/phy-msm-ssusb-qmp.c
index 2bc3c6fa417a..3ffb20c4a207 100644
--- a/drivers/usb/phy/phy-msm-ssusb-qmp.c
+++ b/drivers/usb/phy/phy-msm-ssusb-qmp.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -473,11 +473,11 @@ static int msm_ssphy_qmp_set_suspend(struct usb_phy *uphy, int suspend)
}
if (suspend) {
- if (!phy->cable_connected)
+ if (phy->cable_connected)
+ msm_ssusb_qmp_enable_autonomous(phy, 1);
+ else
writel_relaxed(0x00,
phy->base + phy->phy_reg[USB3_PHY_POWER_DOWN_CONTROL]);
- else
- msm_ssusb_qmp_enable_autonomous(phy, 1);
/* Make sure above write completed with PHY */
wmb();
@@ -540,6 +540,10 @@ static int msm_ssphy_qmp_notify_disconnect(struct usb_phy *uphy,
struct msm_ssphy_qmp *phy = container_of(uphy, struct msm_ssphy_qmp,
phy);
+ writel_relaxed(0x00,
+ phy->base + phy->phy_reg[USB3_PHY_POWER_DOWN_CONTROL]);
+ readl_relaxed(phy->base + phy->phy_reg[USB3_PHY_POWER_DOWN_CONTROL]);
+
dev_dbg(uphy->dev, "QMP phy disconnect notification\n");
dev_dbg(uphy->dev, " cable_connected=%d\n", phy->cable_connected);
phy->cable_connected = false;
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index 8bb9367ada45..6f37966ea54b 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -999,6 +999,10 @@ static int usbhsf_dma_prepare_pop_with_usb_dmac(struct usbhs_pkt *pkt,
if ((uintptr_t)pkt->buf & (USBHS_USB_DMAC_XFER_SIZE - 1))
goto usbhsf_pio_prepare_pop;
+ /* return at this time if the pipe is running */
+ if (usbhs_pipe_is_running(pipe))
+ return 0;
+
usbhs_pipe_config_change_bfre(pipe, 1);
ret = usbhsf_fifo_select(pipe, fifo, 0);
@@ -1189,6 +1193,7 @@ static int usbhsf_dma_pop_done_with_usb_dmac(struct usbhs_pkt *pkt,
usbhsf_fifo_clear(pipe, fifo);
pkt->actual = usbhs_dma_calc_received_size(pkt, chan, rcv_len);
+ usbhs_pipe_running(pipe, 0);
usbhsf_dma_stop(pipe, fifo);
usbhsf_dma_unmap(pkt);
usbhsf_fifo_unselect(pipe, pipe->fifo);
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index 584ae8cbaf1c..77c3ebe860c5 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -62,6 +62,7 @@ config USB_SERIAL_SIMPLE
- Fundamental Software dongle.
- Google USB serial devices
- HP4x calculators
+ - Libtransistor USB console
- a number of Motorola phones
- Motorola Tetra devices
- Novatel Wireless GPS receivers
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index a4ab4fdf5ba3..32cadca198b2 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -151,6 +151,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x12B8, 0xEC62) }, /* Link G4+ ECU */
{ USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */
{ USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */
+ { USB_DEVICE(0x155A, 0x1006) }, /* ELDAT Easywave RX09 */
{ USB_DEVICE(0x166A, 0x0201) }, /* Clipsal 5500PACA C-Bus Pascal Automation Controller */
{ USB_DEVICE(0x166A, 0x0301) }, /* Clipsal 5800PC C-Bus Wireless PC Interface */
{ USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */
@@ -209,6 +210,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */
{ USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */
{ USB_DEVICE(0x3195, 0xF281) }, /* Link Instruments MSO-28 */
+ { USB_DEVICE(0x3923, 0x7A0B) }, /* National Instruments USB Serial Console */
{ USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */
{ } /* Terminating Entry */
};
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 64fe9dc25ed4..3e5b189a79b4 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -773,6 +773,7 @@ static const struct usb_device_id id_table_combined[] = {
.driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk },
{ USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) },
{ USB_DEVICE(NOVITUS_VID, NOVITUS_BONO_E_PID) },
+ { USB_DEVICE(FTDI_VID, RTSYSTEMS_USB_VX8_PID) },
{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_S03_PID) },
{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_59_PID) },
{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_57A_PID) },
@@ -935,6 +936,7 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LS_LOGBOOK_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_HS_LOGBOOK_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CINTERION_MC55I_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_FHE_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_DOTEC_PID) },
{ USB_DEVICE(QIHARDWARE_VID, MILKYMISTONE_JTAGSERIAL_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
@@ -1909,7 +1911,8 @@ static int ftdi_8u2232c_probe(struct usb_serial *serial)
return ftdi_jtag_probe(serial);
if (udev->product &&
- (!strcmp(udev->product, "BeagleBone/XDS100V2") ||
+ (!strcmp(udev->product, "Arrow USB Blaster") ||
+ !strcmp(udev->product, "BeagleBone/XDS100V2") ||
!strcmp(udev->product, "SNAP Connect E10")))
return ftdi_jtag_probe(serial);
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 543d2801632b..76a10b222ff9 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -922,6 +922,9 @@
/*
* RT Systems programming cables for various ham radios
*/
+/* This device uses the VID of FTDI */
+#define RTSYSTEMS_USB_VX8_PID 0x9e50 /* USB-VX8 USB to 7 pin modular plug for Yaesu VX-8 radio */
+
#define RTSYSTEMS_VID 0x2100 /* Vendor ID */
#define RTSYSTEMS_USB_S03_PID 0x9001 /* RTS-03 USB to Serial Adapter */
#define RTSYSTEMS_USB_59_PID 0x9e50 /* USB-59 USB to 8 pin plug */
@@ -1441,6 +1444,12 @@
#define FTDI_CINTERION_MC55I_PID 0xA951
/*
+ * Product: FirmwareHubEmulator
+ * Manufacturer: Harman Becker Automotive Systems
+ */
+#define FTDI_FHE_PID 0xA9A0
+
+/*
* Product: Comet Caller ID decoder
* Manufacturer: Crucible Technologies
*/
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 1799aa058a5b..d982c455e18e 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -236,6 +236,8 @@ static void option_instat_callback(struct urb *urb);
/* These Quectel products use Qualcomm's vendor ID */
#define QUECTEL_PRODUCT_UC20 0x9003
#define QUECTEL_PRODUCT_UC15 0x9090
+/* These u-blox products use Qualcomm's vendor ID */
+#define UBLOX_PRODUCT_R410M 0x90b2
/* These Yuga products use Qualcomm's vendor ID */
#define YUGA_PRODUCT_CLM920_NC5 0x9625
@@ -244,6 +246,7 @@ static void option_instat_callback(struct urb *urb);
#define QUECTEL_PRODUCT_EC21 0x0121
#define QUECTEL_PRODUCT_EC25 0x0125
#define QUECTEL_PRODUCT_BG96 0x0296
+#define QUECTEL_PRODUCT_EP06 0x0306
#define CMOTECH_VENDOR_ID 0x16d8
#define CMOTECH_PRODUCT_6001 0x6001
@@ -550,147 +553,15 @@ static void option_instat_callback(struct urb *urb);
#define WETELECOM_PRODUCT_6802 0x6802
#define WETELECOM_PRODUCT_WMD300 0x6803
-struct option_blacklist_info {
- /* bitmask of interface numbers blacklisted for send_setup */
- const unsigned long sendsetup;
- /* bitmask of interface numbers that are reserved */
- const unsigned long reserved;
-};
-
-static const struct option_blacklist_info four_g_w14_blacklist = {
- .sendsetup = BIT(0) | BIT(1),
-};
-
-static const struct option_blacklist_info four_g_w100_blacklist = {
- .sendsetup = BIT(1) | BIT(2),
- .reserved = BIT(3),
-};
-
-static const struct option_blacklist_info alcatel_x200_blacklist = {
- .sendsetup = BIT(0) | BIT(1),
- .reserved = BIT(4),
-};
-
-static const struct option_blacklist_info zte_0037_blacklist = {
- .sendsetup = BIT(0) | BIT(1),
-};
-
-static const struct option_blacklist_info zte_k3765_z_blacklist = {
- .sendsetup = BIT(0) | BIT(1) | BIT(2),
- .reserved = BIT(4),
-};
-
-static const struct option_blacklist_info zte_ad3812_z_blacklist = {
- .sendsetup = BIT(0) | BIT(1) | BIT(2),
-};
-
-static const struct option_blacklist_info zte_mc2718_z_blacklist = {
- .sendsetup = BIT(1) | BIT(2) | BIT(3) | BIT(4),
-};
-
-static const struct option_blacklist_info zte_mc2716_z_blacklist = {
- .sendsetup = BIT(1) | BIT(2) | BIT(3),
-};
-
-static const struct option_blacklist_info zte_me3620_mbim_blacklist = {
- .reserved = BIT(2) | BIT(3) | BIT(4),
-};
-
-static const struct option_blacklist_info zte_me3620_xl_blacklist = {
- .reserved = BIT(3) | BIT(4) | BIT(5),
-};
-
-static const struct option_blacklist_info zte_zm8620_x_blacklist = {
- .reserved = BIT(3) | BIT(4) | BIT(5),
-};
-
-static const struct option_blacklist_info huawei_cdc12_blacklist = {
- .reserved = BIT(1) | BIT(2),
-};
-
-static const struct option_blacklist_info net_intf0_blacklist = {
- .reserved = BIT(0),
-};
-static const struct option_blacklist_info net_intf1_blacklist = {
- .reserved = BIT(1),
-};
+/* Device flags */
-static const struct option_blacklist_info net_intf2_blacklist = {
- .reserved = BIT(2),
-};
+/* Interface does not support modem-control requests */
+#define NCTRL(ifnum) ((BIT(ifnum) & 0xff) << 8)
-static const struct option_blacklist_info net_intf3_blacklist = {
- .reserved = BIT(3),
-};
+/* Interface is reserved */
+#define RSVD(ifnum) ((BIT(ifnum) & 0xff) << 0)
-static const struct option_blacklist_info net_intf4_blacklist = {
- .reserved = BIT(4),
-};
-
-static const struct option_blacklist_info net_intf5_blacklist = {
- .reserved = BIT(5),
-};
-
-static const struct option_blacklist_info net_intf6_blacklist = {
- .reserved = BIT(6),
-};
-
-static const struct option_blacklist_info zte_mf626_blacklist = {
- .sendsetup = BIT(0) | BIT(1),
- .reserved = BIT(4),
-};
-
-static const struct option_blacklist_info zte_1255_blacklist = {
- .reserved = BIT(3) | BIT(4),
-};
-
-static const struct option_blacklist_info simcom_sim7100e_blacklist = {
- .reserved = BIT(5) | BIT(6),
-};
-
-static const struct option_blacklist_info telit_me910_blacklist = {
- .sendsetup = BIT(0),
- .reserved = BIT(1) | BIT(3),
-};
-
-static const struct option_blacklist_info telit_me910_dual_modem_blacklist = {
- .sendsetup = BIT(0),
- .reserved = BIT(3),
-};
-
-static const struct option_blacklist_info telit_le910_blacklist = {
- .sendsetup = BIT(0),
- .reserved = BIT(1) | BIT(2),
-};
-
-static const struct option_blacklist_info telit_le920_blacklist = {
- .sendsetup = BIT(0),
- .reserved = BIT(1) | BIT(5),
-};
-
-static const struct option_blacklist_info telit_le920a4_blacklist_1 = {
- .sendsetup = BIT(0),
- .reserved = BIT(1),
-};
-
-static const struct option_blacklist_info telit_le922_blacklist_usbcfg0 = {
- .sendsetup = BIT(2),
- .reserved = BIT(0) | BIT(1) | BIT(3),
-};
-
-static const struct option_blacklist_info telit_le922_blacklist_usbcfg3 = {
- .sendsetup = BIT(0),
- .reserved = BIT(1) | BIT(2) | BIT(3),
-};
-
-static const struct option_blacklist_info cinterion_rmnet2_blacklist = {
- .reserved = BIT(4) | BIT(5),
-};
-
-static const struct option_blacklist_info yuga_clm920_nc5_blacklist = {
- .reserved = BIT(1) | BIT(4),
-};
static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
@@ -724,26 +595,26 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GKE) },
{ USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLE) },
{ USB_DEVICE(QUANTA_VENDOR_ID, 0xea42),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c05, USB_CLASS_COMM, 0x02, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c1f, USB_CLASS_COMM, 0x02, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c23, USB_CLASS_COMM, 0x02, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t) &net_intf1_blacklist },
+ .driver_info = RSVD(1) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173S6, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t) &net_intf1_blacklist },
+ .driver_info = RSVD(1) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1750, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t) &net_intf2_blacklist },
+ .driver_info = RSVD(2) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1441, USB_CLASS_COMM, 0x02, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1442, USB_CLASS_COMM, 0x02, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
+ .driver_info = RSVD(1) | RSVD(2) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
+ .driver_info = RSVD(1) | RSVD(2) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x14ac, 0xff, 0xff, 0xff), /* Huawei E1820 */
- .driver_info = (kernel_ulong_t) &net_intf1_blacklist },
+ .driver_info = RSVD(1) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
+ .driver_info = RSVD(1) | RSVD(2) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0xff, 0xff) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x01) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x02) },
@@ -1188,65 +1059,70 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
{ USB_DEVICE_AND_INTERFACE_INFO(QUALCOMM_VENDOR_ID, 0x6001, 0xff, 0xff, 0xff), /* 4G LTE usb-modem U901 */
- .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ .driver_info = RSVD(3) },
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
/* Quectel products using Qualcomm vendor ID */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)},
{ USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
/* Yuga products use Qualcomm vendor ID */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, YUGA_PRODUCT_CLM920_NC5),
- .driver_info = (kernel_ulong_t)&yuga_clm920_nc5_blacklist },
+ .driver_info = RSVD(1) | RSVD(4) },
+ /* u-blox products using Qualcomm vendor ID */
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R410M),
+ .driver_info = RSVD(1) | RSVD(3) },
/* Quectel products using Quectel vendor ID */
{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
+ { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06),
+ .driver_info = RSVD(4) | RSVD(5) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
- .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+ .driver_info = RSVD(0) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6004) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6005) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CGU_628A) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHE_628S),
- .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+ .driver_info = RSVD(0) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_301),
- .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+ .driver_info = RSVD(0) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_628),
- .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+ .driver_info = RSVD(0) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_628S) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU_680) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU_685A) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_720S),
- .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+ .driver_info = RSVD(0) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7002),
- .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+ .driver_info = RSVD(0) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_629K),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7004),
- .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ .driver_info = RSVD(3) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7005) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CGU_629),
- .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ .driver_info = RSVD(5) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_629S),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_720I),
- .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+ .driver_info = RSVD(0) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7212),
- .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+ .driver_info = RSVD(0) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7213),
- .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+ .driver_info = RSVD(0) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7251),
- .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+ .driver_info = RSVD(1) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7252),
- .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+ .driver_info = RSVD(1) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7253),
- .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+ .driver_info = RSVD(1) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864G) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_DUAL) },
@@ -1254,38 +1130,38 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG0),
- .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
+ .driver_info = RSVD(0) | RSVD(1) | NCTRL(2) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG1),
- .driver_info = (kernel_ulong_t)&telit_le910_blacklist },
+ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG2),
- .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
+ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG3),
- .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
+ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff),
- .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
+ .driver_info = RSVD(0) | RSVD(1) | NCTRL(2) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
- .driver_info = (kernel_ulong_t)&telit_me910_blacklist },
+ .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
- .driver_info = (kernel_ulong_t)&telit_me910_dual_modem_blacklist },
+ .driver_info = NCTRL(0) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
- .driver_info = (kernel_ulong_t)&telit_le910_blacklist },
+ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
- .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
+ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
- .driver_info = (kernel_ulong_t)&telit_le920_blacklist },
+ .driver_info = NCTRL(0) | RSVD(1) | RSVD(5) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1207) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1208),
- .driver_info = (kernel_ulong_t)&telit_le920a4_blacklist_1 },
+ .driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1211),
- .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
+ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1212),
- .driver_info = (kernel_ulong_t)&telit_le920a4_blacklist_1 },
+ .driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1213, 0xff) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1214),
- .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
+ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+ .driver_info = RSVD(1) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0003, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0004, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0005, 0xff, 0xff, 0xff) },
@@ -1301,58 +1177,58 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0010, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0011, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0012, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+ .driver_info = RSVD(1) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0013, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0016, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0017, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ .driver_info = RSVD(3) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0018, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0019, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ .driver_info = RSVD(3) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0020, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0021, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0022, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0023, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0024, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0025, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+ .driver_info = RSVD(1) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0028, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0029, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0030, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626, 0xff,
- 0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_mf626_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626, 0xff, 0xff, 0xff),
+ .driver_info = NCTRL(0) | NCTRL(1) | RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0032, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0033, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0034, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0037, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&zte_0037_blacklist },
+ .driver_info = NCTRL(0) | NCTRL(1) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0038, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0039, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0040, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0042, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0043, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0044, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0048, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0049, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ .driver_info = RSVD(5) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0050, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0051, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0052, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0054, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0055, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+ .driver_info = RSVD(1) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0056, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0057, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0058, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0061, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0062, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0063, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0064, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0065, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0066, 0xff, 0xff, 0xff) },
@@ -1377,26 +1253,26 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0096, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0097, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0104, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0105, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0106, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0108, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0113, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ .driver_info = RSVD(5) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0117, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0118, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ .driver_info = RSVD(5) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0121, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ .driver_info = RSVD(5) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0122, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0123, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0124, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ .driver_info = RSVD(5) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0125, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
+ .driver_info = RSVD(6) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0126, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ .driver_info = RSVD(5) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0128, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0135, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0136, 0xff, 0xff, 0xff) },
@@ -1412,50 +1288,50 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0155, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0156, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0157, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ .driver_info = RSVD(5) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0158, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ .driver_info = RSVD(3) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0159, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0161, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0162, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0164, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0165, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0167, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0189, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0191, 0xff, 0xff, 0xff), /* ZTE EuFi890 */
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0196, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0197, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0199, 0xff, 0xff, 0xff), /* ZTE MF820S */
- .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+ .driver_info = RSVD(1) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0200, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0201, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0254, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0257, 0xff, 0xff, 0xff), /* ZTE MF821 */
- .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ .driver_info = RSVD(3) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0265, 0xff, 0xff, 0xff), /* ONDA MT8205 */
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0284, 0xff, 0xff, 0xff), /* ZTE MF880 */
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0317, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0326, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0330, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0395, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0412, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G */
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0414, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0417, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1018, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1021, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ .driver_info = RSVD(2) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1057, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1058, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1059, 0xff, 0xff, 0xff) },
@@ -1572,23 +1448,23 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1170, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1244, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1245, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1246, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1247, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1248, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1249, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1250, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1251, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1252, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1253, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1254, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1255, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&zte_1255_blacklist },
+ .driver_info = RSVD(3) | RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1256, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1257, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1258, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1259, 0xff, 0xff, 0xff) },
@@ -1603,7 +1479,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1268, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1269, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1270, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ .driver_info = RSVD(5) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1271, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1272, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1273, 0xff, 0xff, 0xff) },
@@ -1639,17 +1515,17 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1303, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1333, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1401, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ .driver_info = RSVD(2) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1402, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ .driver_info = RSVD(2) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1424, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ .driver_info = RSVD(2) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1425, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ .driver_info = RSVD(2) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1426, 0xff, 0xff, 0xff), /* ZTE MF91 */
- .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ .driver_info = RSVD(2) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G v2 */
- .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ .driver_info = RSVD(2) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) },
@@ -1667,8 +1543,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1596, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1598, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1600, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff,
- 0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_k3765_z_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff, 0xff, 0xff),
+ .driver_info = NCTRL(0) | NCTRL(1) | NCTRL(2) | RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) }, /* ZTE CDMA products */
@@ -1679,20 +1555,20 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0094, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+ .driver_info = RSVD(1) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0133, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ .driver_info = RSVD(3) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0141, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ .driver_info = RSVD(5) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0147, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0152, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0168, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0170, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0176, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ .driver_info = RSVD(3) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ .driver_info = RSVD(3) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff42, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff43, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff44, 0xff, 0xff, 0xff) },
@@ -1844,19 +1720,19 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710T, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2718, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&zte_mc2718_z_blacklist },
+ .driver_info = NCTRL(1) | NCTRL(2) | NCTRL(3) | NCTRL(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AD3812, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&zte_ad3812_z_blacklist },
+ .driver_info = NCTRL(0) | NCTRL(1) | NCTRL(2) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&zte_mc2716_z_blacklist },
+ .driver_info = NCTRL(1) | NCTRL(2) | NCTRL(3) },
{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_L),
- .driver_info = (kernel_ulong_t)&zte_me3620_xl_blacklist },
+ .driver_info = RSVD(3) | RSVD(4) | RSVD(5) },
{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_MBIM),
- .driver_info = (kernel_ulong_t)&zte_me3620_mbim_blacklist },
+ .driver_info = RSVD(2) | RSVD(3) | RSVD(4) },
{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_X),
- .driver_info = (kernel_ulong_t)&zte_me3620_xl_blacklist },
+ .driver_info = RSVD(3) | RSVD(4) | RSVD(5) },
{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ZM8620_X),
- .driver_info = (kernel_ulong_t)&zte_zm8620_x_blacklist },
+ .driver_info = RSVD(3) | RSVD(4) | RSVD(5) },
{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) },
{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) },
{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) },
@@ -1876,37 +1752,34 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(ALINK_VENDOR_ID, ALINK_PRODUCT_PH300) },
{ USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) },
{ USB_DEVICE(ALINK_VENDOR_ID, SIMCOM_PRODUCT_SIM7100E),
- .driver_info = (kernel_ulong_t)&simcom_sim7100e_blacklist },
+ .driver_info = RSVD(5) | RSVD(6) },
{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
- .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist
- },
+ .driver_info = NCTRL(0) | NCTRL(1) | RSVD(4) },
{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D),
- .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
+ .driver_info = RSVD(6) },
{ USB_DEVICE(ALCATEL_VENDOR_ID, 0x0052),
- .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
+ .driver_info = RSVD(6) },
{ USB_DEVICE(ALCATEL_VENDOR_ID, 0x00b6),
- .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ .driver_info = RSVD(3) },
{ USB_DEVICE(ALCATEL_VENDOR_ID, 0x00b7),
- .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ .driver_info = RSVD(5) },
{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L100V),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L800MA),
- .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ .driver_info = RSVD(2) },
{ USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) },
{ USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14),
- .driver_info = (kernel_ulong_t)&four_g_w14_blacklist
- },
+ .driver_info = NCTRL(0) | NCTRL(1) },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W100),
- .driver_info = (kernel_ulong_t)&four_g_w100_blacklist
- },
+ .driver_info = NCTRL(1) | NCTRL(2) | RSVD(3) },
{USB_DEVICE(LONGCHEER_VENDOR_ID, FUJISOFT_PRODUCT_FS040U),
- .driver_info = (kernel_ulong_t)&net_intf3_blacklist},
+ .driver_info = RSVD(3)},
{ USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) },
{ USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9801, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ .driver_info = RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9803, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) },
{ USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
@@ -1932,14 +1805,14 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_E) },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX, 0xff) },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLXX),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8_2RMNET, 0xff),
- .driver_info = (kernel_ulong_t)&cinterion_rmnet2_blacklist },
+ .driver_info = RSVD(4) | RSVD(5) },
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8_AUDIO, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_2RMNET, 0xff) },
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_AUDIO, 0xff) },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) },
@@ -1949,20 +1822,20 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, /* HC28 enumerates with Siemens or Cinterion VID depending on FW revision */
{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD120),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD140),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD145) },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD155),
- .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
+ .driver_info = RSVD(6) },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200),
- .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
+ .driver_info = RSVD(6) },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD160),
- .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
+ .driver_info = RSVD(6) },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD500),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
{ USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) },
@@ -2039,9 +1912,9 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600E) },
{ USB_DEVICE_AND_INTERFACE_INFO(TPLINK_VENDOR_ID, TPLINK_PRODUCT_LTE, 0xff, 0x00, 0x00) }, /* TP-Link LTE Module */
{ USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE(TPLINK_VENDOR_ID, 0x9000), /* TP-Link MA260 */
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE(CHANGHONG_VENDOR_ID, CHANGHONG_PRODUCT_CH690) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d01, 0xff, 0x02, 0x01) }, /* D-Link DWM-156 (variant) */
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d01, 0xff, 0x00, 0x00) }, /* D-Link DWM-156 (variant) */
@@ -2052,9 +1925,9 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d04, 0xff) }, /* D-Link DWM-158 */
{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d0e, 0xff) }, /* D-Link DWM-157 C1 */
{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff), /* D-Link DWM-221 B1 */
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
@@ -2114,7 +1987,7 @@ static int option_probe(struct usb_serial *serial,
struct usb_interface_descriptor *iface_desc =
&serial->interface->cur_altsetting->desc;
struct usb_device_descriptor *dev_desc = &serial->dev->descriptor;
- const struct option_blacklist_info *blacklist;
+ unsigned long device_flags = id->driver_info;
/* Never bind to the CD-Rom emulation interface */
if (iface_desc->bInterfaceClass == 0x08)
@@ -2125,9 +1998,7 @@ static int option_probe(struct usb_serial *serial,
* the same class/subclass/protocol as the serial interfaces. Look at
* the Windows driver .INF files for reserved interface numbers.
*/
- blacklist = (void *)id->driver_info;
- if (blacklist && test_bit(iface_desc->bInterfaceNumber,
- &blacklist->reserved))
+ if (device_flags & RSVD(iface_desc->bInterfaceNumber))
return -ENODEV;
/*
* Don't bind network interface on Samsung GT-B3730, it is handled by
@@ -2138,8 +2009,8 @@ static int option_probe(struct usb_serial *serial,
iface_desc->bInterfaceClass != USB_CLASS_CDC_DATA)
return -ENODEV;
- /* Store the blacklist info so we can use it during attach. */
- usb_set_serial_data(serial, (void *)blacklist);
+ /* Store the device flags so we can use them during attach. */
+ usb_set_serial_data(serial, (void *)device_flags);
return 0;
}
@@ -2147,22 +2018,21 @@ static int option_probe(struct usb_serial *serial,
static int option_attach(struct usb_serial *serial)
{
struct usb_interface_descriptor *iface_desc;
- const struct option_blacklist_info *blacklist;
struct usb_wwan_intf_private *data;
+ unsigned long device_flags;
data = kzalloc(sizeof(struct usb_wwan_intf_private), GFP_KERNEL);
if (!data)
return -ENOMEM;
- /* Retrieve blacklist info stored at probe. */
- blacklist = usb_get_serial_data(serial);
+ /* Retrieve device flags stored at probe. */
+ device_flags = (unsigned long)usb_get_serial_data(serial);
iface_desc = &serial->interface->cur_altsetting->desc;
- if (!blacklist || !test_bit(iface_desc->bInterfaceNumber,
- &blacklist->sendsetup)) {
+ if (!(device_flags & NCTRL(iface_desc->bInterfaceNumber)))
data->use_send_setup = 1;
- }
+
spin_lock_init(&data->susp_lock);
usb_set_serial_data(serial, data);
diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
index 6aa7ff2c1cf7..2674da40d9cd 100644
--- a/drivers/usb/serial/usb-serial-simple.c
+++ b/drivers/usb/serial/usb-serial-simple.c
@@ -66,6 +66,11 @@ DEVICE(flashloader, FLASHLOADER_IDS);
0x01) }
DEVICE(google, GOOGLE_IDS);
+/* Libtransistor USB console */
+#define LIBTRANSISTOR_IDS() \
+ { USB_DEVICE(0x1209, 0x8b00) }
+DEVICE(libtransistor, LIBTRANSISTOR_IDS);
+
/* ViVOpay USB Serial Driver */
#define VIVOPAY_IDS() \
{ USB_DEVICE(0x1d5f, 0x1004) } /* ViVOpay 8800 */
@@ -113,6 +118,7 @@ static struct usb_serial_driver * const serial_drivers[] = {
&funsoft_device,
&flashloader_device,
&google_device,
+ &libtransistor_device,
&vivopay_device,
&moto_modem_device,
&motorola_tetra_device,
@@ -129,6 +135,7 @@ static const struct usb_device_id id_table[] = {
FUNSOFT_IDS(),
FLASHLOADER_IDS(),
GOOGLE_IDS(),
+ LIBTRANSISTOR_IDS(),
VIVOPAY_IDS(),
MOTO_IDS(),
MOTOROLA_TETRA_IDS(),
diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
index 337a0be89fcf..dbc3801b43eb 100644
--- a/drivers/usb/serial/visor.c
+++ b/drivers/usb/serial/visor.c
@@ -338,47 +338,48 @@ static int palm_os_3_probe(struct usb_serial *serial,
goto exit;
}
- if (retval == sizeof(*connection_info)) {
- connection_info = (struct visor_connection_info *)
- transfer_buffer;
-
- num_ports = le16_to_cpu(connection_info->num_ports);
- for (i = 0; i < num_ports; ++i) {
- switch (
- connection_info->connections[i].port_function_id) {
- case VISOR_FUNCTION_GENERIC:
- string = "Generic";
- break;
- case VISOR_FUNCTION_DEBUGGER:
- string = "Debugger";
- break;
- case VISOR_FUNCTION_HOTSYNC:
- string = "HotSync";
- break;
- case VISOR_FUNCTION_CONSOLE:
- string = "Console";
- break;
- case VISOR_FUNCTION_REMOTE_FILE_SYS:
- string = "Remote File System";
- break;
- default:
- string = "unknown";
- break;
- }
- dev_info(dev, "%s: port %d, is for %s use\n",
- serial->type->description,
- connection_info->connections[i].port, string);
- }
+ if (retval != sizeof(*connection_info)) {
+ dev_err(dev, "Invalid connection information received from device\n");
+ retval = -ENODEV;
+ goto exit;
}
- /*
- * Handle devices that report invalid stuff here.
- */
+
+ connection_info = (struct visor_connection_info *)transfer_buffer;
+
+ num_ports = le16_to_cpu(connection_info->num_ports);
+
+ /* Handle devices that report invalid stuff here. */
if (num_ports == 0 || num_ports > 2) {
dev_warn(dev, "%s: No valid connect info available\n",
serial->type->description);
num_ports = 2;
}
+ for (i = 0; i < num_ports; ++i) {
+ switch (connection_info->connections[i].port_function_id) {
+ case VISOR_FUNCTION_GENERIC:
+ string = "Generic";
+ break;
+ case VISOR_FUNCTION_DEBUGGER:
+ string = "Debugger";
+ break;
+ case VISOR_FUNCTION_HOTSYNC:
+ string = "HotSync";
+ break;
+ case VISOR_FUNCTION_CONSOLE:
+ string = "Console";
+ break;
+ case VISOR_FUNCTION_REMOTE_FILE_SYS:
+ string = "Remote File System";
+ break;
+ default:
+ string = "unknown";
+ break;
+ }
+ dev_info(dev, "%s: port %d, is for %s use\n",
+ serial->type->description,
+ connection_info->connections[i].port, string);
+ }
dev_info(dev, "%s: Number of ports: %d\n", serial->type->description,
num_ports);
diff --git a/drivers/usb/storage/ene_ub6250.c b/drivers/usb/storage/ene_ub6250.c
index 091e8ec7a6c0..962bb6376b0c 100644
--- a/drivers/usb/storage/ene_ub6250.c
+++ b/drivers/usb/storage/ene_ub6250.c
@@ -1953,6 +1953,8 @@ static int ene_load_bincode(struct us_data *us, unsigned char flag)
bcb->CDB[0] = 0xEF;
result = ene_send_scsi_cmd(us, FDIR_WRITE, buf, 0);
+ if (us->srb != NULL)
+ scsi_set_resid(us->srb, 0);
info->BIN_FLAG = flag;
kfree(buf);
@@ -2306,21 +2308,22 @@ static int ms_scsi_irp(struct us_data *us, struct scsi_cmnd *srb)
static int ene_transport(struct scsi_cmnd *srb, struct us_data *us)
{
- int result = 0;
+ int result = USB_STOR_XFER_GOOD;
struct ene_ub6250_info *info = (struct ene_ub6250_info *)(us->extra);
/*US_DEBUG(usb_stor_show_command(us, srb)); */
scsi_set_resid(srb, 0);
- if (unlikely(!(info->SD_Status.Ready || info->MS_Status.Ready))) {
+ if (unlikely(!(info->SD_Status.Ready || info->MS_Status.Ready)))
result = ene_init(us);
- } else {
+ if (result == USB_STOR_XFER_GOOD) {
+ result = USB_STOR_TRANSPORT_ERROR;
if (info->SD_Status.Ready)
result = sd_scsi_irp(us, srb);
if (info->MS_Status.Ready)
result = ms_scsi_irp(us, srb);
}
- return 0;
+ return result;
}
static struct scsi_host_template ene_ub6250_host_template;
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index de7214ae4fed..6cac8f26b97a 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -1052,7 +1052,7 @@ static int uas_post_reset(struct usb_interface *intf)
return 0;
err = uas_configure_endpoints(devinfo);
- if (err && err != ENODEV)
+ if (err && err != -ENODEV)
shost_printk(KERN_ERR, shost,
"%s: alloc streams error %d after reset",
__func__, err);
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index c10eceb76c39..1a34d2a89de6 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -2142,6 +2142,13 @@ UNUSUAL_DEV( 0x22b8, 0x3010, 0x0001, 0x0001,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_FIX_CAPACITY | US_FL_IGNORE_RESIDUE ),
+/* Reported by Teijo Kinnunen <teijo.kinnunen@code-q.fi> */
+UNUSUAL_DEV( 0x152d, 0x2567, 0x0117, 0x0117,
+ "JMicron",
+ "USB to ATA/ATAPI Bridge",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_BROKEN_FUA ),
+
/* Reported-by George Cherian <george.cherian@cavium.com> */
UNUSUAL_DEV(0x152d, 0x9561, 0x0000, 0x9999,
"JMicron",
diff --git a/drivers/usb/usbip/stub.h b/drivers/usb/usbip/stub.h
index 266e2b0ce9a8..47ccd73a74f0 100644
--- a/drivers/usb/usbip/stub.h
+++ b/drivers/usb/usbip/stub.h
@@ -88,6 +88,7 @@ struct bus_id_priv {
struct stub_device *sdev;
struct usb_device *udev;
char shutdown_busid;
+ spinlock_t busid_lock;
};
/* stub_priv is allocated from stub_priv_cache */
@@ -98,6 +99,7 @@ extern struct usb_device_driver stub_driver;
/* stub_main.c */
struct bus_id_priv *get_busid_priv(const char *busid);
+void put_busid_priv(struct bus_id_priv *bid);
int del_match_busid(char *busid);
void stub_device_cleanup_urbs(struct stub_device *sdev);
diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c
index ec38370ffcab..4aad99a59958 100644
--- a/drivers/usb/usbip/stub_dev.c
+++ b/drivers/usb/usbip/stub_dev.c
@@ -87,6 +87,7 @@ static ssize_t store_sockfd(struct device *dev, struct device_attribute *attr,
goto err;
sdev->ud.tcp_socket = socket;
+ sdev->ud.sockfd = sockfd;
spin_unlock_irq(&sdev->ud.lock);
@@ -186,6 +187,7 @@ static void stub_shutdown_connection(struct usbip_device *ud)
if (ud->tcp_socket) {
sockfd_put(ud->tcp_socket);
ud->tcp_socket = NULL;
+ ud->sockfd = -1;
}
/* 3. free used data */
@@ -280,6 +282,7 @@ static struct stub_device *stub_device_alloc(struct usb_device *udev)
sdev->ud.status = SDEV_ST_AVAILABLE;
spin_lock_init(&sdev->ud.lock);
sdev->ud.tcp_socket = NULL;
+ sdev->ud.sockfd = -1;
INIT_LIST_HEAD(&sdev->priv_init);
INIT_LIST_HEAD(&sdev->priv_tx);
@@ -311,9 +314,9 @@ static int stub_probe(struct usb_device *udev)
struct stub_device *sdev = NULL;
const char *udev_busid = dev_name(&udev->dev);
struct bus_id_priv *busid_priv;
- int rc;
+ int rc = 0;
- dev_dbg(&udev->dev, "Enter\n");
+ dev_dbg(&udev->dev, "Enter probe\n");
/* check we should claim or not by busid_table */
busid_priv = get_busid_priv(udev_busid);
@@ -328,13 +331,15 @@ static int stub_probe(struct usb_device *udev)
* other matched drivers by the driver core.
* See driver_probe_device() in driver/base/dd.c
*/
- return -ENODEV;
+ rc = -ENODEV;
+ goto call_put_busid_priv;
}
if (udev->descriptor.bDeviceClass == USB_CLASS_HUB) {
dev_dbg(&udev->dev, "%s is a usb hub device... skip!\n",
udev_busid);
- return -ENODEV;
+ rc = -ENODEV;
+ goto call_put_busid_priv;
}
if (!strcmp(udev->bus->bus_name, "vhci_hcd")) {
@@ -342,13 +347,16 @@ static int stub_probe(struct usb_device *udev)
"%s is attached on vhci_hcd... skip!\n",
udev_busid);
- return -ENODEV;
+ rc = -ENODEV;
+ goto call_put_busid_priv;
}
/* ok, this is my device */
sdev = stub_device_alloc(udev);
- if (!sdev)
- return -ENOMEM;
+ if (!sdev) {
+ rc = -ENOMEM;
+ goto call_put_busid_priv;
+ }
dev_info(&udev->dev,
"usbip-host: register new device (bus %u dev %u)\n",
@@ -380,7 +388,9 @@ static int stub_probe(struct usb_device *udev)
}
busid_priv->status = STUB_BUSID_ALLOC;
- return 0;
+ rc = 0;
+ goto call_put_busid_priv;
+
err_files:
usb_hub_release_port(udev->parent, udev->portnum,
(struct usb_dev_state *) udev);
@@ -391,6 +401,9 @@ err_port:
busid_priv->sdev = NULL;
stub_device_free(sdev);
+
+call_put_busid_priv:
+ put_busid_priv(busid_priv);
return rc;
}
@@ -416,7 +429,7 @@ static void stub_disconnect(struct usb_device *udev)
struct bus_id_priv *busid_priv;
int rc;
- dev_dbg(&udev->dev, "Enter\n");
+ dev_dbg(&udev->dev, "Enter disconnect\n");
busid_priv = get_busid_priv(udev_busid);
if (!busid_priv) {
@@ -429,7 +442,7 @@ static void stub_disconnect(struct usb_device *udev)
/* get stub_device */
if (!sdev) {
dev_err(&udev->dev, "could not get device");
- return;
+ goto call_put_busid_priv;
}
dev_set_drvdata(&udev->dev, NULL);
@@ -444,12 +457,12 @@ static void stub_disconnect(struct usb_device *udev)
(struct usb_dev_state *) udev);
if (rc) {
dev_dbg(&udev->dev, "unable to release port\n");
- return;
+ goto call_put_busid_priv;
}
/* If usb reset is called from event handler */
if (busid_priv->sdev->ud.eh == current)
- return;
+ goto call_put_busid_priv;
/* shutdown the current connection */
shutdown_busid(busid_priv);
@@ -460,12 +473,11 @@ static void stub_disconnect(struct usb_device *udev)
busid_priv->sdev = NULL;
stub_device_free(sdev);
- if (busid_priv->status == STUB_BUSID_ALLOC) {
+ if (busid_priv->status == STUB_BUSID_ALLOC)
busid_priv->status = STUB_BUSID_ADDED;
- } else {
- busid_priv->status = STUB_BUSID_OTHER;
- del_match_busid((char *)udev_busid);
- }
+
+call_put_busid_priv:
+ put_busid_priv(busid_priv);
}
#ifdef CONFIG_PM
diff --git a/drivers/usb/usbip/stub_main.c b/drivers/usb/usbip/stub_main.c
index 325b4c05acdd..fa90496ca7a8 100644
--- a/drivers/usb/usbip/stub_main.c
+++ b/drivers/usb/usbip/stub_main.c
@@ -28,6 +28,7 @@
#define DRIVER_DESC "USB/IP Host Driver"
struct kmem_cache *stub_priv_cache;
+
/*
* busid_tables defines matching busids that usbip can grab. A user can change
* dynamically what device is locally used and what device is exported to a
@@ -39,6 +40,8 @@ static spinlock_t busid_table_lock;
static void init_busid_table(void)
{
+ int i;
+
/*
* This also sets the bus_table[i].status to
* STUB_BUSID_OTHER, which is 0.
@@ -46,6 +49,9 @@ static void init_busid_table(void)
memset(busid_table, 0, sizeof(busid_table));
spin_lock_init(&busid_table_lock);
+
+ for (i = 0; i < MAX_BUSID; i++)
+ spin_lock_init(&busid_table[i].busid_lock);
}
/*
@@ -57,15 +63,20 @@ static int get_busid_idx(const char *busid)
int i;
int idx = -1;
- for (i = 0; i < MAX_BUSID; i++)
+ for (i = 0; i < MAX_BUSID; i++) {
+ spin_lock(&busid_table[i].busid_lock);
if (busid_table[i].name[0])
if (!strncmp(busid_table[i].name, busid, BUSID_SIZE)) {
idx = i;
+ spin_unlock(&busid_table[i].busid_lock);
break;
}
+ spin_unlock(&busid_table[i].busid_lock);
+ }
return idx;
}
+/* Returns holding busid_lock. Should call put_busid_priv() to unlock */
struct bus_id_priv *get_busid_priv(const char *busid)
{
int idx;
@@ -73,13 +84,22 @@ struct bus_id_priv *get_busid_priv(const char *busid)
spin_lock(&busid_table_lock);
idx = get_busid_idx(busid);
- if (idx >= 0)
+ if (idx >= 0) {
bid = &(busid_table[idx]);
+ /* get busid_lock before returning */
+ spin_lock(&bid->busid_lock);
+ }
spin_unlock(&busid_table_lock);
return bid;
}
+void put_busid_priv(struct bus_id_priv *bid)
+{
+ if (bid)
+ spin_unlock(&bid->busid_lock);
+}
+
static int add_match_busid(char *busid)
{
int i;
@@ -92,15 +112,19 @@ static int add_match_busid(char *busid)
goto out;
}
- for (i = 0; i < MAX_BUSID; i++)
+ for (i = 0; i < MAX_BUSID; i++) {
+ spin_lock(&busid_table[i].busid_lock);
if (!busid_table[i].name[0]) {
strlcpy(busid_table[i].name, busid, BUSID_SIZE);
if ((busid_table[i].status != STUB_BUSID_ALLOC) &&
(busid_table[i].status != STUB_BUSID_REMOV))
busid_table[i].status = STUB_BUSID_ADDED;
ret = 0;
+ spin_unlock(&busid_table[i].busid_lock);
break;
}
+ spin_unlock(&busid_table[i].busid_lock);
+ }
out:
spin_unlock(&busid_table_lock);
@@ -121,6 +145,8 @@ int del_match_busid(char *busid)
/* found */
ret = 0;
+ spin_lock(&busid_table[idx].busid_lock);
+
if (busid_table[idx].status == STUB_BUSID_OTHER)
memset(busid_table[idx].name, 0, BUSID_SIZE);
@@ -128,6 +154,7 @@ int del_match_busid(char *busid)
(busid_table[idx].status != STUB_BUSID_ADDED))
busid_table[idx].status = STUB_BUSID_REMOV;
+ spin_unlock(&busid_table[idx].busid_lock);
out:
spin_unlock(&busid_table_lock);
@@ -140,9 +167,12 @@ static ssize_t show_match_busid(struct device_driver *drv, char *buf)
char *out = buf;
spin_lock(&busid_table_lock);
- for (i = 0; i < MAX_BUSID; i++)
+ for (i = 0; i < MAX_BUSID; i++) {
+ spin_lock(&busid_table[i].busid_lock);
if (busid_table[i].name[0])
out += sprintf(out, "%s ", busid_table[i].name);
+ spin_unlock(&busid_table[i].busid_lock);
+ }
spin_unlock(&busid_table_lock);
out += sprintf(out, "\n");
@@ -184,6 +214,51 @@ static ssize_t store_match_busid(struct device_driver *dev, const char *buf,
static DRIVER_ATTR(match_busid, S_IRUSR | S_IWUSR, show_match_busid,
store_match_busid);
+static int do_rebind(char *busid, struct bus_id_priv *busid_priv)
+{
+ int ret;
+
+ /* device_attach() callers should hold parent lock for USB */
+ if (busid_priv->udev->dev.parent)
+ device_lock(busid_priv->udev->dev.parent);
+ ret = device_attach(&busid_priv->udev->dev);
+ if (busid_priv->udev->dev.parent)
+ device_unlock(busid_priv->udev->dev.parent);
+ if (ret < 0) {
+ dev_err(&busid_priv->udev->dev, "rebind failed\n");
+ return ret;
+ }
+ return 0;
+}
+
+static void stub_device_rebind(void)
+{
+#if IS_MODULE(CONFIG_USBIP_HOST)
+ struct bus_id_priv *busid_priv;
+ int i;
+
+ /* update status to STUB_BUSID_OTHER so probe ignores the device */
+ spin_lock(&busid_table_lock);
+ for (i = 0; i < MAX_BUSID; i++) {
+ if (busid_table[i].name[0] &&
+ busid_table[i].shutdown_busid) {
+ busid_priv = &(busid_table[i]);
+ busid_priv->status = STUB_BUSID_OTHER;
+ }
+ }
+ spin_unlock(&busid_table_lock);
+
+ /* now run rebind - no need to hold locks. driver files are removed */
+ for (i = 0; i < MAX_BUSID; i++) {
+ if (busid_table[i].name[0] &&
+ busid_table[i].shutdown_busid) {
+ busid_priv = &(busid_table[i]);
+ do_rebind(busid_table[i].name, busid_priv);
+ }
+ }
+#endif
+}
+
static ssize_t rebind_store(struct device_driver *dev, const char *buf,
size_t count)
{
@@ -201,11 +276,17 @@ static ssize_t rebind_store(struct device_driver *dev, const char *buf,
if (!bid)
return -ENODEV;
- ret = device_attach(&bid->udev->dev);
- if (ret < 0) {
- dev_err(&bid->udev->dev, "rebind failed\n");
+ /* mark the device for deletion so probe ignores it during rescan */
+ bid->status = STUB_BUSID_OTHER;
+ /* release the busid lock */
+ put_busid_priv(bid);
+
+ ret = do_rebind((char *) buf, bid);
+ if (ret < 0)
return ret;
- }
+
+ /* delete device from busid_table */
+ del_match_busid((char *) buf);
return count;
}
@@ -328,6 +409,9 @@ static void __exit usbip_host_exit(void)
*/
usb_deregister_device_driver(&stub_driver);
+ /* initiate scan to attach devices */
+ stub_device_rebind();
+
kmem_cache_destroy(stub_priv_cache);
}
diff --git a/drivers/usb/usbip/usbip_common.h b/drivers/usb/usbip/usbip_common.h
index f875ccaa55f9..0fc5ace57c0e 100644
--- a/drivers/usb/usbip/usbip_common.h
+++ b/drivers/usb/usbip/usbip_common.h
@@ -248,7 +248,7 @@ enum usbip_side {
#define SDEV_EVENT_ERROR_SUBMIT (USBIP_EH_SHUTDOWN | USBIP_EH_RESET)
#define SDEV_EVENT_ERROR_MALLOC (USBIP_EH_SHUTDOWN | USBIP_EH_UNUSABLE)
-#define VDEV_EVENT_REMOVED (USBIP_EH_SHUTDOWN | USBIP_EH_BYE)
+#define VDEV_EVENT_REMOVED (USBIP_EH_SHUTDOWN | USBIP_EH_RESET | USBIP_EH_BYE)
#define VDEV_EVENT_DOWN (USBIP_EH_SHUTDOWN | USBIP_EH_RESET)
#define VDEV_EVENT_ERROR_TCP (USBIP_EH_SHUTDOWN | USBIP_EH_RESET)
#define VDEV_EVENT_ERROR_MALLOC (USBIP_EH_SHUTDOWN | USBIP_EH_UNUSABLE)
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
index 00d68945548e..4d68a1e9e878 100644
--- a/drivers/usb/usbip/vhci_hcd.c
+++ b/drivers/usb/usbip/vhci_hcd.c
@@ -285,7 +285,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
case USB_PORT_FEAT_POWER:
usbip_dbg_vhci_rh(
" ClearPortFeature: USB_PORT_FEAT_POWER\n");
- dum->port_status[rhport] = 0;
+ dum->port_status[rhport] &= ~USB_PORT_STAT_POWER;
dum->resuming = 0;
break;
case USB_PORT_FEAT_C_RESET:
@@ -797,6 +797,7 @@ static void vhci_shutdown_connection(struct usbip_device *ud)
if (vdev->ud.tcp_socket) {
sockfd_put(vdev->ud.tcp_socket);
vdev->ud.tcp_socket = NULL;
+ vdev->ud.sockfd = -1;
}
pr_info("release socket\n");
@@ -844,6 +845,7 @@ static void vhci_device_reset(struct usbip_device *ud)
if (ud->tcp_socket) {
sockfd_put(ud->tcp_socket);
ud->tcp_socket = NULL;
+ ud->sockfd = -1;
}
ud->status = VDEV_ST_NULL;
diff --git a/drivers/usb/usbip/vhci_sysfs.c b/drivers/usb/usbip/vhci_sysfs.c
index 1c7f41a65565..b9432fdec775 100644
--- a/drivers/usb/usbip/vhci_sysfs.c
+++ b/drivers/usb/usbip/vhci_sysfs.c
@@ -53,7 +53,7 @@ static ssize_t status_show(struct device *dev, struct device_attribute *attr,
* a security hole, the change is made to use sockfd instead.
*/
out += sprintf(out,
- "prt sta spd bus dev sockfd local_busid\n");
+ "prt sta spd dev sockfd local_busid\n");
for (i = 0; i < VHCI_NPORTS; i++) {
struct vhci_device *vdev = port_to_vdev(i);
@@ -64,12 +64,11 @@ static ssize_t status_show(struct device *dev, struct device_attribute *attr,
if (vdev->ud.status == VDEV_ST_USED) {
out += sprintf(out, "%03u %08x ",
vdev->speed, vdev->devid);
- out += sprintf(out, "%16p ", vdev->ud.tcp_socket);
- out += sprintf(out, "%06u", vdev->ud.sockfd);
+ out += sprintf(out, "%06u ", vdev->ud.sockfd);
out += sprintf(out, "%s", dev_name(&vdev->udev->dev));
} else
- out += sprintf(out, "000 000 000 000000 0-0");
+ out += sprintf(out, "000 00000000 000000 0-0");
out += sprintf(out, "\n");
spin_unlock(&vdev->ud.lock);
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
index fe2b470d7ec6..c55c632a3b24 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -752,6 +752,62 @@ static int __init init_pci_cap_pcix_perm(struct perm_bits *perm)
return 0;
}
+static int vfio_exp_config_write(struct vfio_pci_device *vdev, int pos,
+ int count, struct perm_bits *perm,
+ int offset, __le32 val)
+{
+ __le16 *ctrl = (__le16 *)(vdev->vconfig + pos -
+ offset + PCI_EXP_DEVCTL);
+ int readrq = le16_to_cpu(*ctrl) & PCI_EXP_DEVCTL_READRQ;
+
+ count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
+ if (count < 0)
+ return count;
+
+ /*
+ * The FLR bit is virtualized, if set and the device supports PCIe
+ * FLR, issue a reset_function. Regardless, clear the bit, the spec
+ * requires it to be always read as zero. NB, reset_function might
+ * not use a PCIe FLR, we don't have that level of granularity.
+ */
+ if (*ctrl & cpu_to_le16(PCI_EXP_DEVCTL_BCR_FLR)) {
+ u32 cap;
+ int ret;
+
+ *ctrl &= ~cpu_to_le16(PCI_EXP_DEVCTL_BCR_FLR);
+
+ ret = pci_user_read_config_dword(vdev->pdev,
+ pos - offset + PCI_EXP_DEVCAP,
+ &cap);
+
+ if (!ret && (cap & PCI_EXP_DEVCAP_FLR))
+ pci_try_reset_function(vdev->pdev);
+ }
+
+ /*
+ * MPS is virtualized to the user, writes do not change the physical
+ * register since determining a proper MPS value requires a system wide
+ * device view. The MRRS is largely independent of MPS, but since the
+ * user does not have that system-wide view, they might set a safe, but
+ * inefficiently low value. Here we allow writes through to hardware,
+ * but we set the floor to the physical device MPS setting, so that
+ * we can at least use full TLPs, as defined by the MPS value.
+ *
+ * NB, if any devices actually depend on an artificially low MRRS
+ * setting, this will need to be revisited, perhaps with a quirk
+ * though pcie_set_readrq().
+ */
+ if (readrq != (le16_to_cpu(*ctrl) & PCI_EXP_DEVCTL_READRQ)) {
+ readrq = 128 <<
+ ((le16_to_cpu(*ctrl) & PCI_EXP_DEVCTL_READRQ) >> 12);
+ readrq = max(readrq, pcie_get_mps(vdev->pdev));
+
+ pcie_set_readrq(vdev->pdev, readrq);
+ }
+
+ return count;
+}
+
/* Permissions for PCI Express capability */
static int __init init_pci_cap_exp_perm(struct perm_bits *perm)
{
@@ -759,26 +815,67 @@ static int __init init_pci_cap_exp_perm(struct perm_bits *perm)
if (alloc_perm_bits(perm, PCI_CAP_EXP_ENDPOINT_SIZEOF_V2))
return -ENOMEM;
+ perm->writefn = vfio_exp_config_write;
+
p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE);
/*
- * Allow writes to device control fields (includes FLR!)
- * but not to devctl_phantom which could confuse IOMMU
- * or to the ARI bit in devctl2 which is set at probe time
+ * Allow writes to device control fields, except devctl_phantom,
+ * which could confuse IOMMU, MPS, which can break communication
+ * with other physical devices, and the ARI bit in devctl2, which
+ * is set at probe time. FLR and MRRS get virtualized via our
+ * writefn.
*/
- p_setw(perm, PCI_EXP_DEVCTL, NO_VIRT, ~PCI_EXP_DEVCTL_PHANTOM);
+ p_setw(perm, PCI_EXP_DEVCTL,
+ PCI_EXP_DEVCTL_BCR_FLR | PCI_EXP_DEVCTL_PAYLOAD |
+ PCI_EXP_DEVCTL_READRQ, ~PCI_EXP_DEVCTL_PHANTOM);
p_setw(perm, PCI_EXP_DEVCTL2, NO_VIRT, ~PCI_EXP_DEVCTL2_ARI);
return 0;
}
+static int vfio_af_config_write(struct vfio_pci_device *vdev, int pos,
+ int count, struct perm_bits *perm,
+ int offset, __le32 val)
+{
+ u8 *ctrl = vdev->vconfig + pos - offset + PCI_AF_CTRL;
+
+ count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
+ if (count < 0)
+ return count;
+
+ /*
+ * The FLR bit is virtualized, if set and the device supports AF
+ * FLR, issue a reset_function. Regardless, clear the bit, the spec
+ * requires it to be always read as zero. NB, reset_function might
+ * not use an AF FLR, we don't have that level of granularity.
+ */
+ if (*ctrl & PCI_AF_CTRL_FLR) {
+ u8 cap;
+ int ret;
+
+ *ctrl &= ~PCI_AF_CTRL_FLR;
+
+ ret = pci_user_read_config_byte(vdev->pdev,
+ pos - offset + PCI_AF_CAP,
+ &cap);
+
+ if (!ret && (cap & PCI_AF_CAP_FLR) && (cap & PCI_AF_CAP_TP))
+ pci_try_reset_function(vdev->pdev);
+ }
+
+ return count;
+}
+
/* Permissions for Advanced Function capability */
static int __init init_pci_cap_af_perm(struct perm_bits *perm)
{
if (alloc_perm_bits(perm, pci_cap_length[PCI_CAP_ID_AF]))
return -ENOMEM;
+ perm->writefn = vfio_af_config_write;
+
p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE);
- p_setb(perm, PCI_AF_CTRL, NO_VIRT, PCI_AF_CTRL_FLR);
+ p_setb(perm, PCI_AF_CTRL, PCI_AF_CTRL_FLR, PCI_AF_CTRL_FLR);
return 0;
}
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 9eda69e40678..44a5a8777053 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -981,6 +981,7 @@ static long vhost_net_reset_owner(struct vhost_net *n)
}
vhost_net_stop(n, &tx_sock, &rx_sock);
vhost_net_flush(n);
+ vhost_dev_stop(&n->dev);
vhost_dev_reset_owner(&n->dev, memory);
vhost_net_vq_reset(n);
done:
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index ad2146a9ab2d..675819a1af37 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -173,8 +173,7 @@ int vhost_poll_start(struct vhost_poll *poll, struct file *file)
if (mask)
vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask);
if (mask & POLLERR) {
- if (poll->wqh)
- remove_wait_queue(poll->wqh, &poll->wait);
+ vhost_poll_stop(poll);
ret = -EINVAL;
}
diff --git a/drivers/video/console/dummycon.c b/drivers/video/console/dummycon.c
index 0efc52f11ad0..b30e7d87804b 100644
--- a/drivers/video/console/dummycon.c
+++ b/drivers/video/console/dummycon.c
@@ -68,7 +68,6 @@ const struct consw dummy_con = {
.con_switch = DUMMY,
.con_blank = DUMMY,
.con_font_set = DUMMY,
- .con_font_get = DUMMY,
.con_font_default = DUMMY,
.con_font_copy = DUMMY,
.con_set_palette = DUMMY,
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index 517f565b65d7..598ec7545e84 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -409,7 +409,10 @@ static const char *vgacon_startup(void)
vga_video_port_val = VGA_CRT_DM;
if ((screen_info.orig_video_ega_bx & 0xff) != 0x10) {
static struct resource ega_console_resource =
- { .name = "ega", .start = 0x3B0, .end = 0x3BF };
+ { .name = "ega",
+ .flags = IORESOURCE_IO,
+ .start = 0x3B0,
+ .end = 0x3BF };
vga_video_type = VIDEO_TYPE_EGAM;
vga_vram_size = 0x8000;
display_desc = "EGA+";
@@ -417,9 +420,15 @@ static const char *vgacon_startup(void)
&ega_console_resource);
} else {
static struct resource mda1_console_resource =
- { .name = "mda", .start = 0x3B0, .end = 0x3BB };
+ { .name = "mda",
+ .flags = IORESOURCE_IO,
+ .start = 0x3B0,
+ .end = 0x3BB };
static struct resource mda2_console_resource =
- { .name = "mda", .start = 0x3BF, .end = 0x3BF };
+ { .name = "mda",
+ .flags = IORESOURCE_IO,
+ .start = 0x3BF,
+ .end = 0x3BF };
vga_video_type = VIDEO_TYPE_MDA;
vga_vram_size = 0x2000;
display_desc = "*MDA";
@@ -441,15 +450,21 @@ static const char *vgacon_startup(void)
vga_vram_size = 0x8000;
if (!screen_info.orig_video_isVGA) {
- static struct resource ega_console_resource
- = { .name = "ega", .start = 0x3C0, .end = 0x3DF };
+ static struct resource ega_console_resource =
+ { .name = "ega",
+ .flags = IORESOURCE_IO,
+ .start = 0x3C0,
+ .end = 0x3DF };
vga_video_type = VIDEO_TYPE_EGAC;
display_desc = "EGA";
request_resource(&ioport_resource,
&ega_console_resource);
} else {
- static struct resource vga_console_resource
- = { .name = "vga+", .start = 0x3C0, .end = 0x3DF };
+ static struct resource vga_console_resource =
+ { .name = "vga+",
+ .flags = IORESOURCE_IO,
+ .start = 0x3C0,
+ .end = 0x3DF };
vga_video_type = VIDEO_TYPE_VGAC;
display_desc = "VGA+";
request_resource(&ioport_resource,
@@ -493,7 +508,10 @@ static const char *vgacon_startup(void)
}
} else {
static struct resource cga_console_resource =
- { .name = "cga", .start = 0x3D4, .end = 0x3D5 };
+ { .name = "cga",
+ .flags = IORESOURCE_IO,
+ .start = 0x3D4,
+ .end = 0x3D5 };
vga_video_type = VIDEO_TYPE_CGA;
vga_vram_size = 0x2000;
display_desc = "*CGA";
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index 9868d8a5c1ed..3f7f92a90795 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -1506,6 +1506,7 @@ config FB_SIS
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB_BOOT_VESA_SUPPORT if FB_SIS = y
+ select FB_SIS_300 if !FB_SIS_315
help
This is the frame buffer device driver for the SiS 300, 315, 330
and 340 series as well as XGI V3XT, V5, V8, Z7 graphics chipsets.
diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c
index 9362424c2340..924b3d6c3e9b 100644
--- a/drivers/video/fbdev/amba-clcd.c
+++ b/drivers/video/fbdev/amba-clcd.c
@@ -759,8 +759,8 @@ static int clcdfb_of_dma_setup(struct clcd_fb *fb)
if (err)
return err;
- framesize = fb->panel->mode.xres * fb->panel->mode.yres *
- fb->panel->bpp / 8;
+ framesize = PAGE_ALIGN(fb->panel->mode.xres * fb->panel->mode.yres *
+ fb->panel->bpp / 8);
fb->fb.screen_base = dma_alloc_coherent(&fb->dev->dev, framesize,
&dma, GFP_KERNEL);
if (!fb->fb.screen_base)
diff --git a/drivers/video/fbdev/atmel_lcdfb.c b/drivers/video/fbdev/atmel_lcdfb.c
index 19eb42b57d87..a6da82648c92 100644
--- a/drivers/video/fbdev/atmel_lcdfb.c
+++ b/drivers/video/fbdev/atmel_lcdfb.c
@@ -1120,7 +1120,7 @@ static int atmel_lcdfb_of_init(struct atmel_lcdfb_info *sinfo)
goto put_display_node;
}
- timings_np = of_find_node_by_name(display_np, "display-timings");
+ timings_np = of_get_child_by_name(display_np, "display-timings");
if (!timings_np) {
dev_err(dev, "failed to find display-timings node\n");
ret = -ENODEV;
@@ -1141,6 +1141,12 @@ static int atmel_lcdfb_of_init(struct atmel_lcdfb_info *sinfo)
fb_add_videomode(&fb_vm, &info->modelist);
}
+ /*
+ * FIXME: Make sure we are not referencing any fields in display_np
+ * and timings_np and drop our references to them before returning to
+ * avoid leaking the nodes on probe deferral and driver unbind.
+ */
+
return 0;
put_timings_node:
diff --git a/drivers/video/fbdev/auo_k190x.c b/drivers/video/fbdev/auo_k190x.c
index 8d2499d1cafb..9580374667ba 100644
--- a/drivers/video/fbdev/auo_k190x.c
+++ b/drivers/video/fbdev/auo_k190x.c
@@ -773,9 +773,7 @@ static void auok190x_recover(struct auok190xfb_par *par)
/*
* Power-management
*/
-
-#ifdef CONFIG_PM
-static int auok190x_runtime_suspend(struct device *dev)
+static int __maybe_unused auok190x_runtime_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct fb_info *info = platform_get_drvdata(pdev);
@@ -822,7 +820,7 @@ finish:
return 0;
}
-static int auok190x_runtime_resume(struct device *dev)
+static int __maybe_unused auok190x_runtime_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct fb_info *info = platform_get_drvdata(pdev);
@@ -856,7 +854,7 @@ static int auok190x_runtime_resume(struct device *dev)
return 0;
}
-static int auok190x_suspend(struct device *dev)
+static int __maybe_unused auok190x_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct fb_info *info = platform_get_drvdata(pdev);
@@ -896,7 +894,7 @@ static int auok190x_suspend(struct device *dev)
return 0;
}
-static int auok190x_resume(struct device *dev)
+static int __maybe_unused auok190x_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct fb_info *info = platform_get_drvdata(pdev);
@@ -933,7 +931,6 @@ static int auok190x_resume(struct device *dev)
return 0;
}
-#endif
const struct dev_pm_ops auok190x_pm = {
SET_RUNTIME_PM_OPS(auok190x_runtime_suspend, auok190x_runtime_resume,
diff --git a/drivers/video/fbdev/exynos/s6e8ax0.c b/drivers/video/fbdev/exynos/s6e8ax0.c
index 95873f26e39c..de2f3e793786 100644
--- a/drivers/video/fbdev/exynos/s6e8ax0.c
+++ b/drivers/video/fbdev/exynos/s6e8ax0.c
@@ -829,8 +829,7 @@ static int s6e8ax0_probe(struct mipi_dsim_lcd_device *dsim_dev)
return 0;
}
-#ifdef CONFIG_PM
-static int s6e8ax0_suspend(struct mipi_dsim_lcd_device *dsim_dev)
+static int __maybe_unused s6e8ax0_suspend(struct mipi_dsim_lcd_device *dsim_dev)
{
struct s6e8ax0 *lcd = dev_get_drvdata(&dsim_dev->dev);
@@ -843,7 +842,7 @@ static int s6e8ax0_suspend(struct mipi_dsim_lcd_device *dsim_dev)
return 0;
}
-static int s6e8ax0_resume(struct mipi_dsim_lcd_device *dsim_dev)
+static int __maybe_unused s6e8ax0_resume(struct mipi_dsim_lcd_device *dsim_dev)
{
struct s6e8ax0 *lcd = dev_get_drvdata(&dsim_dev->dev);
@@ -855,10 +854,6 @@ static int s6e8ax0_resume(struct mipi_dsim_lcd_device *dsim_dev)
return 0;
}
-#else
-#define s6e8ax0_suspend NULL
-#define s6e8ax0_resume NULL
-#endif
static struct mipi_dsim_lcd_driver s6e8ax0_dsim_ddi_driver = {
.name = "s6e8ax0",
@@ -867,8 +862,8 @@ static struct mipi_dsim_lcd_driver s6e8ax0_dsim_ddi_driver = {
.power_on = s6e8ax0_power_on,
.set_sequence = s6e8ax0_set_sequence,
.probe = s6e8ax0_probe,
- .suspend = s6e8ax0_suspend,
- .resume = s6e8ax0_resume,
+ .suspend = IS_ENABLED(CONFIG_PM) ? s6e8ax0_suspend : NULL,
+ .resume = IS_ENABLED(CONFIG_PM) ? s6e8ax0_resume : NULL,
};
static int s6e8ax0_init(void)
diff --git a/drivers/video/fbdev/goldfishfb.c b/drivers/video/fbdev/goldfishfb.c
index 1e56b50e4082..88adb2970b44 100644
--- a/drivers/video/fbdev/goldfishfb.c
+++ b/drivers/video/fbdev/goldfishfb.c
@@ -38,11 +38,58 @@ enum {
FB_SET_BLANK = 0x18,
FB_GET_PHYS_WIDTH = 0x1c,
FB_GET_PHYS_HEIGHT = 0x20,
+ FB_GET_FORMAT = 0x24,
FB_INT_VSYNC = 1U << 0,
FB_INT_BASE_UPDATE_DONE = 1U << 1
};
+/* These values *must* match the platform definitions found under
+ * <system/graphics.h>
+ */
+enum {
+ HAL_PIXEL_FORMAT_RGBA_8888 = 1,
+ HAL_PIXEL_FORMAT_RGBX_8888 = 2,
+ HAL_PIXEL_FORMAT_RGB_888 = 3,
+ HAL_PIXEL_FORMAT_RGB_565 = 4,
+ HAL_PIXEL_FORMAT_BGRA_8888 = 5,
+};
+
+struct framebuffer_config {
+ u8 bytes_per_pixel;
+ u8 red_offset;
+ u8 red_length;
+ u8 green_offset;
+ u8 green_length;
+ u8 blue_offset;
+ u8 blue_length;
+ u8 transp_offset;
+ u8 transp_length;
+};
+
+enum {
+ CHAR_BIT = 8
+};
+
+static const struct framebuffer_config *get_fb_config_from_format(int format)
+{
+ static const struct framebuffer_config fb_configs[] = {
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0 }, /* Invalid, assume RGB_565 */
+ { 4, 0, 8, 8, 8, 16, 8, 24, 8 }, /* HAL_PIXEL_FORMAT_RGBA_8888 */
+ { 4, 0, 8, 8, 8, 16, 8, 0, 0 }, /* HAL_PIXEL_FORMAT_RGBX_8888 */
+ { 3, 0, 8, 8, 8, 16, 8, 0, 0 }, /* HAL_PIXEL_FORMAT_RGB_888 */
+ { 2, 11, 5, 5, 6, 0, 5, 0, 0 }, /* HAL_PIXEL_FORMAT_RGB_565 */
+ { 4, 16, 8, 8, 8, 0, 8, 24, 8 }, /* HAL_PIXEL_FORMAT_BGRA_8888 */
+ };
+
+ if (format > 0 &&
+ format < sizeof(fb_configs) / sizeof(struct framebuffer_config)) {
+ return &fb_configs[format];
+ }
+
+ return &fb_configs[HAL_PIXEL_FORMAT_RGB_565]; /* legacy default */
+}
+
struct goldfish_fb {
void __iomem *reg_base;
int irq;
@@ -125,8 +172,10 @@ static int goldfish_fb_check_var(struct fb_var_screeninfo *var,
static int goldfish_fb_set_par(struct fb_info *info)
{
struct goldfish_fb *fb = container_of(info, struct goldfish_fb, fb);
+
if (fb->rotation != fb->fb.var.rotate) {
- info->fix.line_length = info->var.xres * 2;
+ info->fix.line_length = info->var.xres *
+ (fb->fb.var.bits_per_pixel / CHAR_BIT);
fb->rotation = fb->fb.var.rotate;
writel(fb->rotation, fb->reg_base + FB_SET_ROTATION);
}
@@ -143,19 +192,24 @@ static int goldfish_fb_pan_display(struct fb_var_screeninfo *var,
spin_lock_irqsave(&fb->lock, irq_flags);
base_update_count = fb->base_update_count;
- writel(fb->fb.fix.smem_start + fb->fb.var.xres * 2 * var->yoffset,
- fb->reg_base + FB_SET_BASE);
+ writel(fb->fb.fix.smem_start +
+ fb->fb.var.xres *
+ (fb->fb.var.bits_per_pixel / CHAR_BIT) *
+ var->yoffset,
+ fb->reg_base + FB_SET_BASE);
spin_unlock_irqrestore(&fb->lock, irq_flags);
wait_event_timeout(fb->wait,
fb->base_update_count != base_update_count, HZ / 15);
if (fb->base_update_count == base_update_count)
- pr_err("goldfish_fb_pan_display: timeout waiting for base update\n");
+ pr_err("goldfish_fb_pan_display: timeout waiting for "
+ "base update\n");
return 0;
}
static int goldfish_fb_blank(int blank, struct fb_info *info)
{
struct goldfish_fb *fb = container_of(info, struct goldfish_fb, fb);
+
switch (blank) {
case FB_BLANK_NORMAL:
writel(1, fb->reg_base + FB_SET_BLANK);
@@ -186,8 +240,10 @@ static int goldfish_fb_probe(struct platform_device *pdev)
struct resource *r;
struct goldfish_fb *fb;
size_t framesize;
- u32 width, height;
+ u32 width, height, format;
+ int bytes_per_pixel;
dma_addr_t fbpaddr;
+ const struct framebuffer_config *fb_config;
fb = kzalloc(sizeof(*fb), GFP_KERNEL);
if (fb == NULL) {
@@ -217,13 +273,20 @@ static int goldfish_fb_probe(struct platform_device *pdev)
width = readl(fb->reg_base + FB_GET_WIDTH);
height = readl(fb->reg_base + FB_GET_HEIGHT);
+ format = readl(fb->reg_base + FB_GET_FORMAT);
+ fb_config = get_fb_config_from_format(format);
+ if (!fb_config) {
+ ret = -EINVAL;
+ goto err_no_irq;
+ }
+ bytes_per_pixel = fb_config->bytes_per_pixel;
fb->fb.fbops = &goldfish_fb_ops;
fb->fb.flags = FBINFO_FLAG_DEFAULT;
fb->fb.pseudo_palette = fb->cmap;
fb->fb.fix.type = FB_TYPE_PACKED_PIXELS;
fb->fb.fix.visual = FB_VISUAL_TRUECOLOR;
- fb->fb.fix.line_length = width * 2;
+ fb->fb.fix.line_length = width * bytes_per_pixel;
fb->fb.fix.accel = FB_ACCEL_NONE;
fb->fb.fix.ypanstep = 1;
@@ -231,20 +294,22 @@ static int goldfish_fb_probe(struct platform_device *pdev)
fb->fb.var.yres = height;
fb->fb.var.xres_virtual = width;
fb->fb.var.yres_virtual = height * 2;
- fb->fb.var.bits_per_pixel = 16;
+ fb->fb.var.bits_per_pixel = bytes_per_pixel * CHAR_BIT;
fb->fb.var.activate = FB_ACTIVATE_NOW;
fb->fb.var.height = readl(fb->reg_base + FB_GET_PHYS_HEIGHT);
fb->fb.var.width = readl(fb->reg_base + FB_GET_PHYS_WIDTH);
fb->fb.var.pixclock = 0;
- fb->fb.var.red.offset = 11;
- fb->fb.var.red.length = 5;
- fb->fb.var.green.offset = 5;
- fb->fb.var.green.length = 6;
- fb->fb.var.blue.offset = 0;
- fb->fb.var.blue.length = 5;
+ fb->fb.var.red.offset = fb_config->red_offset;
+ fb->fb.var.red.length = fb_config->red_length;
+ fb->fb.var.green.offset = fb_config->green_offset;
+ fb->fb.var.green.length = fb_config->green_length;
+ fb->fb.var.blue.offset = fb_config->blue_offset;
+ fb->fb.var.blue.length = fb_config->blue_length;
+ fb->fb.var.transp.offset = fb_config->transp_offset;
+ fb->fb.var.transp.length = fb_config->transp_length;
- framesize = width * height * 2 * 2;
+ framesize = width * height * 2 * bytes_per_pixel;
fb->fb.screen_base = (char __force __iomem *)dma_alloc_coherent(
&pdev->dev, framesize,
&fbpaddr, GFP_KERNEL);
@@ -295,7 +360,8 @@ static int goldfish_fb_remove(struct platform_device *pdev)
size_t framesize;
struct goldfish_fb *fb = platform_get_drvdata(pdev);
- framesize = fb->fb.var.xres_virtual * fb->fb.var.yres_virtual * 2;
+ framesize = fb->fb.var.xres_virtual * fb->fb.var.yres_virtual *
+ (fb->fb.var.bits_per_pixel / CHAR_BIT);
unregister_framebuffer(&fb->fb);
free_irq(fb->irq, fb);
diff --git a/drivers/video/fbdev/intelfb/intelfbdrv.c b/drivers/video/fbdev/intelfb/intelfbdrv.c
index bbec737eef30..bf207444ba0c 100644
--- a/drivers/video/fbdev/intelfb/intelfbdrv.c
+++ b/drivers/video/fbdev/intelfb/intelfbdrv.c
@@ -302,7 +302,7 @@ static __inline__ int get_opt_int(const char *this_opt, const char *name,
}
static __inline__ int get_opt_bool(const char *this_opt, const char *name,
- int *ret)
+ bool *ret)
{
if (!ret)
return 0;
diff --git a/drivers/video/fbdev/mmp/core.c b/drivers/video/fbdev/mmp/core.c
index a0f496049db7..3a6bb6561ba0 100644
--- a/drivers/video/fbdev/mmp/core.c
+++ b/drivers/video/fbdev/mmp/core.c
@@ -23,6 +23,7 @@
#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include <linux/export.h>
+#include <linux/module.h>
#include <video/mmp_disp.h>
static struct mmp_overlay *path_get_overlay(struct mmp_path *path,
@@ -249,3 +250,7 @@ void mmp_unregister_path(struct mmp_path *path)
mutex_unlock(&disp_lock);
}
EXPORT_SYMBOL_GPL(mmp_unregister_path);
+
+MODULE_AUTHOR("Zhou Zhu <zzhu3@marvell.com>");
+MODULE_DESCRIPTION("Marvell MMP display framework");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/fbdev/msm/mdp3_ctrl.c b/drivers/video/fbdev/msm/mdp3_ctrl.c
index fc89a2ea772e..8a9e8acf6c0e 100644
--- a/drivers/video/fbdev/msm/mdp3_ctrl.c
+++ b/drivers/video/fbdev/msm/mdp3_ctrl.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1563,12 +1563,16 @@ static int mdp3_get_metadata(struct msm_fb_data_type *mfd,
}
break;
case metadata_op_get_ion_fd:
- if (mfd->fb_ion_handle) {
+ if (mfd->fb_ion_handle && mfd->fb_ion_client) {
+ get_dma_buf(mfd->fbmem_buf);
metadata->data.fbmem_ionfd =
- dma_buf_fd(mfd->fbmem_buf, 0);
- if (metadata->data.fbmem_ionfd < 0)
+ ion_share_dma_buf_fd(mfd->fb_ion_client,
+ mfd->fb_ion_handle);
+ if (metadata->data.fbmem_ionfd < 0) {
+ dma_buf_put(mfd->fbmem_buf);
pr_err("fd allocation failed. fd = %d\n",
- metadata->data.fbmem_ionfd);
+ metadata->data.fbmem_ionfd);
+ }
}
break;
default:
@@ -2643,6 +2647,7 @@ int mdp3_ctrl_init(struct msm_fb_data_type *mfd)
mdp3_interface->kickoff_fnc = mdp3_ctrl_display_commit_kickoff;
mdp3_interface->lut_update = NULL;
mdp3_interface->configure_panel = mdp3_update_panel_info;
+ mdp3_interface->signal_retire_fence = NULL;
mdp3_session = kzalloc(sizeof(struct mdp3_session_data), GFP_KERNEL);
if (!mdp3_session) {
diff --git a/drivers/video/fbdev/msm/mdss.h b/drivers/video/fbdev/msm/mdss.h
index 5548f0f09f8a..1ccb27113c11 100644
--- a/drivers/video/fbdev/msm/mdss.h
+++ b/drivers/video/fbdev/msm/mdss.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -255,6 +255,13 @@ struct mdss_scaler_block {
u32 *dest_scaler_off;
u32 *dest_scaler_lut_off;
struct mdss_mdp_qseed3_lut_tbl lut_tbl;
+
+ /*
+ * Lock is mainly to serialize access to LUT.
+ * LUT values come asynchronously from userspace
+ * via ioctl.
+ */
+ struct mutex scaler_lock;
};
struct mdss_data_type;
diff --git a/drivers/video/fbdev/msm/mdss_debug_xlog.c b/drivers/video/fbdev/msm/mdss_debug_xlog.c
index aeefc81657b0..10d747962a91 100644
--- a/drivers/video/fbdev/msm/mdss_debug_xlog.c
+++ b/drivers/video/fbdev/msm/mdss_debug_xlog.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -755,6 +755,11 @@ static ssize_t mdss_xlog_dump_read(struct file *file, char __user *buff,
if (__mdss_xlog_dump_calc_range()) {
len = mdss_xlog_dump_entry(xlog_buf, MDSS_XLOG_BUF_MAX);
+ if (len < 0 || len > count) {
+ pr_err("len is more than the size of user buffer\n");
+ return 0;
+ }
+
if (copy_to_user(buff, xlog_buf, len))
return -EFAULT;
*ppos += len;
diff --git a/drivers/video/fbdev/msm/mdss_dp.c b/drivers/video/fbdev/msm/mdss_dp.c
index 71584dff75cb..3240c0e71fb6 100644
--- a/drivers/video/fbdev/msm/mdss_dp.c
+++ b/drivers/video/fbdev/msm/mdss_dp.c
@@ -1967,6 +1967,7 @@ static int mdss_dp_edid_init(struct mdss_panel_data *pdata)
edid_init_data.kobj = dp_drv->kobj;
edid_init_data.max_pclk_khz = dp_drv->max_pclk_khz;
+ edid_init_data.yc420_support = false;
edid_data = hdmi_edid_init(&edid_init_data);
if (!edid_data) {
diff --git a/drivers/video/fbdev/msm/mdss_dp_aux.c b/drivers/video/fbdev/msm/mdss_dp_aux.c
index 407f230ca71e..662e55be8b94 100644
--- a/drivers/video/fbdev/msm/mdss_dp_aux.c
+++ b/drivers/video/fbdev/msm/mdss_dp_aux.c
@@ -1558,7 +1558,7 @@ static void dp_sink_parse_sink_count(struct mdss_dp_drv_pdata *ep)
data = *bp++;
/* BIT 7, BIT 5:0 */
- ep->sink_count.count = (data & BIT(7)) << 6 | (data & 0x63);
+ ep->sink_count.count = (data & BIT(7)) >> 1 | (data & 0x3F);
/* BIT 6*/
ep->sink_count.cp_ready = data & BIT(6);
diff --git a/drivers/video/fbdev/msm/mdss_dsi.c b/drivers/video/fbdev/msm/mdss_dsi.c
index 7b6153503af5..0d41f41371dd 100644
--- a/drivers/video/fbdev/msm/mdss_dsi.c
+++ b/drivers/video/fbdev/msm/mdss_dsi.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -760,7 +760,7 @@ static ssize_t mdss_dsi_cmd_state_read(struct file *file, char __user *buf,
if (blen < 0)
return 0;
- if (copy_to_user(buf, buffer, blen))
+ if (copy_to_user(buf, buffer, min(count, (size_t)blen+1)))
return -EFAULT;
*ppos += blen;
diff --git a/drivers/video/fbdev/msm/mdss_fb.c b/drivers/video/fbdev/msm/mdss_fb.c
index d35858137191..6c4db0f1f5bd 100644
--- a/drivers/video/fbdev/msm/mdss_fb.c
+++ b/drivers/video/fbdev/msm/mdss_fb.c
@@ -3743,8 +3743,11 @@ skip_commit:
if (IS_ERR_VALUE(ret) || !sync_pt_data->flushed) {
mdss_fb_release_kickoff(mfd);
mdss_fb_signal_timeline(sync_pt_data);
- }
+ if ((mfd->panel.type == MIPI_CMD_PANEL) &&
+ (mfd->mdp.signal_retire_fence))
+ mfd->mdp.signal_retire_fence(mfd, 1);
+ }
if (dynamic_dsi_switch) {
MDSS_XLOG(mfd->index, mfd->split_mode, new_dsi_mode,
XLOG_FUNC_EXIT);
@@ -4658,6 +4661,7 @@ static int mdss_fb_atomic_commit_ioctl(struct fb_info *info,
struct mdp_destination_scaler_data *ds_data = NULL;
struct mdp_destination_scaler_data __user *ds_data_user;
struct msm_fb_data_type *mfd;
+ struct mdss_overlay_private *mdp5_data = NULL;
ret = copy_from_user(&commit, argp, sizeof(struct mdp_layer_commit));
if (ret) {
@@ -4669,9 +4673,20 @@ static int mdss_fb_atomic_commit_ioctl(struct fb_info *info,
if (!mfd)
return -EINVAL;
+ mdp5_data = mfd_to_mdp5_data(mfd);
+
if (mfd->panel_info->panel_dead) {
pr_debug("early commit return\n");
MDSS_XLOG(mfd->panel_info->panel_dead);
+ /*
+ * In case of an ESD attack, since we early return from the
+ * commits, we need to signal the outstanding fences.
+ */
+ mdss_fb_release_fences(mfd);
+ if ((mfd->panel.type == MIPI_CMD_PANEL) &&
+ mfd->mdp.signal_retire_fence && mdp5_data)
+ mfd->mdp.signal_retire_fence(mfd,
+ mdp5_data->retire_cnt);
return 0;
}
diff --git a/drivers/video/fbdev/msm/mdss_fb.h b/drivers/video/fbdev/msm/mdss_fb.h
index 6e52390c2886..301c1386a639 100644
--- a/drivers/video/fbdev/msm/mdss_fb.h
+++ b/drivers/video/fbdev/msm/mdss_fb.h
@@ -234,6 +234,8 @@ struct msm_mdp_interface {
int (*input_event_handler)(struct msm_fb_data_type *mfd);
void (*footswitch_ctrl)(bool on);
int (*pp_release_fnc)(struct msm_fb_data_type *mfd);
+ void (*signal_retire_fence)(struct msm_fb_data_type *mfd,
+ int retire_cnt);
void *private1;
};
diff --git a/drivers/video/fbdev/msm/mdss_hdcp.h b/drivers/video/fbdev/msm/mdss_hdcp.h
index 40089d96ea78..8570abf2d0c0 100644
--- a/drivers/video/fbdev/msm/mdss_hdcp.h
+++ b/drivers/video/fbdev/msm/mdss_hdcp.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012, 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -18,6 +18,8 @@
#include <video/msm_hdmi_modes.h>
#include <soc/qcom/scm.h>
+#define HDCP_SRM_CHECK_FAIL 29
+
enum hdcp_client_id {
HDCP_CLIENT_HDMI,
HDCP_CLIENT_DP,
@@ -28,6 +30,7 @@ enum hdcp_states {
HDCP_STATE_AUTHENTICATING,
HDCP_STATE_AUTHENTICATED,
HDCP_STATE_AUTH_FAIL,
+ HDCP_STATE_AUTH_FAIL_NOREAUTH,
HDCP_STATE_AUTH_ENC_NONE,
HDCP_STATE_AUTH_ENC_1X,
HDCP_STATE_AUTH_ENC_2P2
diff --git a/drivers/video/fbdev/msm/mdss_hdcp_1x.c b/drivers/video/fbdev/msm/mdss_hdcp_1x.c
index 2dc9c8f96c5b..8df9080129b9 100644
--- a/drivers/video/fbdev/msm/mdss_hdcp_1x.c
+++ b/drivers/video/fbdev/msm/mdss_hdcp_1x.c
@@ -789,13 +789,118 @@ error:
return rc;
}
+static u8 *hdcp_1x_swap_byte_order(u8 *bksv_in, int num_dev)
+{
+ u8 *bksv_out;
+ u8 *tmp_out;
+ u8 *tmp_in;
+ int i, j;
+
+ /* Dont exceed max downstream devices */
+ if (num_dev > MAX_DEVICES_SUPPORTED) {
+ pr_err("invalid params\n");
+ return NULL;
+ }
+
+ bksv_out = kzalloc(RECV_ID_SIZE * num_dev, GFP_KERNEL);
+
+ if (!bksv_out)
+ return NULL;
+
+ pr_debug("num_dev = %d\n", num_dev);
+
+ /* Store temporarily for return */
+ tmp_out = bksv_out;
+ tmp_in = bksv_in;
+
+ for (i = 0; i < num_dev; i++) {
+ for (j = 0; j < RECV_ID_SIZE; j++)
+ bksv_out[j] = tmp_in[RECV_ID_SIZE - j - 1];
+
+ /* Each KSV is 5 bytes long */
+ bksv_out += RECV_ID_SIZE;
+ tmp_in += RECV_ID_SIZE;
+ }
+
+ return tmp_out;
+}
+
+static int hdcp_1x_revoked_rcv_chk(struct hdcp_1x *hdcp)
+{
+ int rc = 0;
+ u8 *bksv = hdcp->current_tp.bksv;
+ u8 *bksv_out;
+ struct hdcp_srm_device_id_t *bksv_srm;
+
+ bksv_out = hdcp_1x_swap_byte_order(bksv, 1);
+
+ if (!bksv_out) {
+ rc = -ENOMEM;
+ goto exit;
+ }
+
+ pr_debug("bksv_out : 0x%2x%2x%2x%2x%2x\n",
+ bksv_out[4], bksv_out[3], bksv_out[2],
+ bksv_out[1], bksv_out[0]);
+
+ bksv_srm = (struct hdcp_srm_device_id_t *)bksv_out;
+ /* Here we are checking only receiver ID
+ * hence the device count is one
+ */
+ rc = hdcp1_validate_receiver_ids(bksv_srm, 1);
+
+ kfree(bksv_out);
+
+exit:
+ return rc;
+}
+
+static int hdcp_1x_revoked_rpt_chk(struct hdcp_1x *hdcp)
+{
+ int rc = 0;
+ int i;
+ u8 *bksv = hdcp->current_tp.ksv_list;
+ u8 *bksv_out;
+ struct hdcp_srm_device_id_t *bksv_srm;
+
+ for (i = 0; i < hdcp->sink_addr.ksv_fifo.len;
+ i += RECV_ID_SIZE) {
+ pr_debug("bksv : 0x%2x%2x%2x%2x%2x\n",
+ bksv[i + 4],
+ bksv[i + 3], bksv[i + 2],
+ bksv[i + 1], bksv[i]);
+ }
+
+ bksv_out = hdcp_1x_swap_byte_order(bksv,
+ hdcp->current_tp.dev_count);
+
+ if (!bksv_out) {
+ rc = -ENOMEM;
+ goto exit;
+ }
+
+ bksv_srm = (struct hdcp_srm_device_id_t *)bksv_out;
+ /* Here we are checking repeater ksv list */
+ rc = hdcp1_validate_receiver_ids(bksv_srm,
+ hdcp->current_tp.dev_count);
+
+ kfree(bksv_out);
+
+exit:
+ return rc;
+}
+
static void hdcp_1x_enable_sink_irq_hpd(struct hdcp_1x *hdcp)
{
int rc;
u8 enable_hpd_irq = 0x1;
- u16 version = *hdcp->init_data.version;
+ u16 version;
const int major = 1, minor = 2;
+ if (hdcp->init_data.client_id == HDCP_CLIENT_HDMI)
+ return;
+
+ version = *hdcp->init_data.version;
pr_debug("version 0x%x\n", version);
if (((version & 0xFF) < minor) ||
@@ -906,6 +1011,12 @@ static int hdcp_1x_authentication_part1(struct hdcp_1x *hdcp)
if (rc)
goto error;
+ rc = hdcp_1x_revoked_rcv_chk(hdcp);
+ if (rc) {
+ rc = -HDCP_SRM_CHECK_FAIL;
+ goto error;
+ }
+
rc = hdcp_1x_send_an_aksv_to_sink(hdcp);
if (rc)
goto error;
@@ -1230,14 +1341,22 @@ static int hdcp_1x_authentication_part2(struct hdcp_1x *hdcp)
if (rc)
goto error;
+ rc = hdcp_1x_revoked_rpt_chk(hdcp);
+ if (rc) {
+ rc = -HDCP_SRM_CHECK_FAIL;
+ goto error;
+ }
+
do {
rc = hdcp_1x_transfer_v_h(hdcp);
if (rc)
goto error;
/* do not proceed further if no device connected */
- if (!hdcp->current_tp.dev_count)
+ if (!hdcp->current_tp.dev_count) {
+ rc = -EINVAL;
goto error;
+ }
rc = hdcp_1x_write_ksv_fifo(hdcp);
} while (--v_retry && rc);
@@ -1344,7 +1463,7 @@ static void hdcp_1x_auth_work(struct work_struct *work)
goto end;
} else {
hdcp->hdcp_state = HDCP_STATE_AUTHENTICATED;
- goto end;
+ goto disable_sw_ddc;
}
hdcp->ksv_ready = false;
@@ -1353,6 +1472,7 @@ static void hdcp_1x_auth_work(struct work_struct *work)
if (rc)
goto end;
+disable_sw_ddc:
/*
* Disabling software DDC before going into part3 to make sure
* there is no Arbitration between software and hardware for DDC
@@ -1361,9 +1481,11 @@ static void hdcp_1x_auth_work(struct work_struct *work)
DSS_REG_W_ND(io, HDMI_DDC_ARBITRATION, DSS_REG_R(io,
HDMI_DDC_ARBITRATION) | (BIT(4)));
end:
- if (rc && !hdcp_1x_state(HDCP_STATE_INACTIVE))
+ if (rc && !hdcp_1x_state(HDCP_STATE_INACTIVE)) {
hdcp->hdcp_state = HDCP_STATE_AUTH_FAIL;
-
+ if (rc == -HDCP_SRM_CHECK_FAIL)
+ hdcp->hdcp_state = HDCP_STATE_AUTH_FAIL_NOREAUTH;
+ }
hdcp_1x_update_auth_status(hdcp);
@@ -1751,6 +1873,7 @@ void hdcp_1x_deinit(void *input)
sysfs_remove_group(hdcp->init_data.sysfs_kobj,
&hdcp_1x_fs_attr_group);
+ hdcp1_client_unregister();
kfree(hdcp);
} /* hdcp_1x_deinit */
@@ -1853,6 +1976,44 @@ irq_not_handled:
return -EINVAL;
}
+static void hdcp_1x_srm_cb(void *input)
+{
+
+ struct hdcp_1x *hdcp = (struct hdcp_1x *)input;
+ int rc = 0;
+
+ if (!hdcp) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ rc = hdcp_1x_revoked_rcv_chk(hdcp);
+
+ if (rc) {
+ pr_err("receiver failed SRM check\n");
+ goto fail_noreauth;
+ }
+
+ /* If its not a repeater we are done */
+ if (hdcp->current_tp.ds_type != DS_REPEATER)
+ return;
+
+
+ /* Check the repeater KSV against SRM */
+ rc = hdcp_1x_revoked_rpt_chk(hdcp);
+ if (rc) {
+ pr_err("repeater failed SRM check\n");
+ goto fail_noreauth;
+ }
+
+ return;
+
+ fail_noreauth:
+ /* No reauth in case of SRM failure */
+ hdcp->hdcp_state = HDCP_STATE_AUTH_FAIL_NOREAUTH;
+ hdcp_1x_update_auth_status(hdcp);
+}
+
void *hdcp_1x_init(struct hdcp_init_data *init_data)
{
struct hdcp_1x *hdcp = NULL;
@@ -1865,6 +2026,10 @@ void *hdcp_1x_init(struct hdcp_init_data *init_data)
.off = hdcp_1x_off
};
+ static struct hdcp_client_ops client_ops = {
+ .srm_cb = hdcp_1x_srm_cb,
+ };
+
if (!init_data || !init_data->core_io || !init_data->qfprom_io ||
!init_data->mutex || !init_data->notify_status ||
!init_data->workq || !init_data->cb_data) {
@@ -1907,6 +2072,9 @@ void *hdcp_1x_init(struct hdcp_init_data *init_data)
init_completion(&hdcp->r0_checked);
init_completion(&hdcp->sink_r0_available);
+ /* Register client ctx and the srm_cb with hdcp lib */
+ hdcp1_client_register((void *)hdcp, &client_ops);
+
pr_debug("HDCP module initialized. HDCP_STATE=%s\n",
HDCP_STATE_NAME);
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_edid.c b/drivers/video/fbdev/msm/mdss_hdmi_edid.c
index a49c5290753c..0c04fd35c0d5 100644
--- a/drivers/video/fbdev/msm/mdss_hdmi_edid.c
+++ b/drivers/video/fbdev/msm/mdss_hdmi_edid.c
@@ -165,6 +165,10 @@ static bool hdmi_edid_is_mode_supported(struct hdmi_edid_ctrl *edid_ctrl,
pclk > edid_ctrl->init_data.max_pclk_khz)
return false;
+ if ((out_format == MDP_Y_CBCR_H2V2) &&
+ !edid_ctrl->init_data.yc420_support)
+ return false;
+
return true;
}
@@ -204,6 +208,7 @@ static int hdmi_edid_reset_parser(struct hdmi_edid_ctrl *edid_ctrl)
sizeof(edid_ctrl->spkr_alloc_data_block));
edid_ctrl->adb_size = 0;
edid_ctrl->sadb_size = 0;
+ edid_ctrl->basic_audio_supp = false;
hdmi_edid_set_video_resolution(edid_ctrl, edid_ctrl->default_vic, true);
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_edid.h b/drivers/video/fbdev/msm/mdss_hdmi_edid.h
index af802bb45f89..63785e95bd59 100644
--- a/drivers/video/fbdev/msm/mdss_hdmi_edid.h
+++ b/drivers/video/fbdev/msm/mdss_hdmi_edid.h
@@ -24,6 +24,7 @@ struct hdmi_edid_init_data {
struct kobject *kobj;
struct hdmi_util_ds_data ds_data;
u32 max_pclk_khz;
+ bool yc420_support;
u8 *buf;
u32 buf_size;
};
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_hdcp2p2.c b/drivers/video/fbdev/msm/mdss_hdmi_hdcp2p2.c
index eadb90f3ce88..46e289b6dbd3 100644
--- a/drivers/video/fbdev/msm/mdss_hdmi_hdcp2p2.c
+++ b/drivers/video/fbdev/msm/mdss_hdmi_hdcp2p2.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -463,6 +463,41 @@ static void hdmi_hdcp2p2_auth_failed(struct hdmi_hdcp2p2_ctrl *ctrl)
HDCP_STATE_AUTH_FAIL);
}
+static void hdmi_hdcp2p2_fail_noreauth(struct hdmi_hdcp2p2_ctrl *ctrl)
+{
+ if (!ctrl) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ atomic_set(&ctrl->auth_state, HDCP_STATE_AUTH_FAIL);
+
+ hdmi_hdcp2p2_ddc_disable(ctrl->init_data.cb_data);
+
+ /* notify hdmi tx about HDCP failure */
+ ctrl->init_data.notify_status(ctrl->init_data.cb_data,
+ HDCP_STATE_AUTH_FAIL_NOREAUTH);
+}
+
+static void hdmi_hdcp2p2_srm_cb(void *client_ctx)
+{
+ struct hdmi_hdcp2p2_ctrl *ctrl =
+ (struct hdmi_hdcp2p2_ctrl *)client_ctx;
+ struct hdcp_lib_wakeup_data cdata = {
+ HDCP_LIB_WKUP_CMD_INVALID};
+
+ if (!ctrl) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ cdata.context = ctrl->lib_ctx;
+ cdata.cmd = HDCP_LIB_WKUP_CMD_STOP;
+ hdmi_hdcp2p2_wakeup_lib(ctrl, &cdata);
+
+ hdmi_hdcp2p2_fail_noreauth(ctrl);
+}
+
static int hdmi_hdcp2p2_ddc_read_message(struct hdmi_hdcp2p2_ctrl *ctrl,
u8 *buf, int size, u32 timeout)
{
@@ -995,6 +1030,7 @@ void *hdmi_hdcp2p2_init(struct hdcp_init_data *init_data)
static struct hdcp_client_ops client_ops = {
.wakeup = hdmi_hdcp2p2_wakeup,
+ .srm_cb = hdmi_hdcp2p2_srm_cb,
};
static struct hdcp_txmtr_ops txmtr_ops;
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_tx.c b/drivers/video/fbdev/msm/mdss_hdmi_tx.c
index 5cb436261115..4f30f7864bb0 100644
--- a/drivers/video/fbdev/msm/mdss_hdmi_tx.c
+++ b/drivers/video/fbdev/msm/mdss_hdmi_tx.c
@@ -120,6 +120,8 @@ static int hdmi_tx_get_audio_edid_blk(struct platform_device *pdev,
struct msm_ext_disp_audio_edid_blk *blk);
static int hdmi_tx_get_cable_status(struct platform_device *pdev, u32 vote);
static int hdmi_tx_update_ppm(struct hdmi_tx_ctrl *hdmi_ctrl, s32 ppm);
+static int hdmi_tx_enable_pll_update(struct hdmi_tx_ctrl *hdmi_ctrl,
+ int enable);
static struct mdss_hw hdmi_tx_hw = {
.hw_ndx = MDSS_HW_HDMI,
@@ -415,13 +417,13 @@ static inline void hdmi_tx_send_audio_notification(
struct hdmi_tx_ctrl *hdmi_ctrl, int val)
{
if (hdmi_ctrl && hdmi_ctrl->ext_audio_data.intf_ops.hpd) {
- u32 flags = 0;
+ u32 flags = 0;
- if (!hdmi_tx_is_dvi_mode(hdmi_ctrl))
- flags |= MSM_EXT_DISP_HPD_AUDIO;
+ if (!hdmi_tx_is_dvi_mode(hdmi_ctrl))
+ flags |= MSM_EXT_DISP_HPD_AUDIO;
- if (flags)
- hdmi_ctrl->ext_audio_data.intf_ops.hpd(
+ if (flags)
+ hdmi_ctrl->ext_audio_data.intf_ops.hpd(
hdmi_ctrl->ext_pdev,
hdmi_ctrl->ext_audio_data.type, val, flags);
}
@@ -450,8 +452,6 @@ static inline void hdmi_tx_ack_state(
!hdmi_tx_is_dvi_mode(hdmi_ctrl))
hdmi_ctrl->ext_audio_data.intf_ops.notify(hdmi_ctrl->ext_pdev,
val);
-
- hdmi_tx_send_audio_notification(hdmi_ctrl, val);
}
static struct hdmi_tx_ctrl *hdmi_tx_get_drvdata_from_panel_data(
@@ -1370,6 +1370,59 @@ end:
return ret;
}
+static ssize_t hdmi_tx_sysfs_rda_pll_enable(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t ret;
+ struct hdmi_tx_ctrl *hdmi_ctrl =
+ hdmi_tx_get_drvdata_from_sysfs_dev(dev);
+
+ if (!hdmi_ctrl) {
+ pr_err("invalid input\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&hdmi_ctrl->tx_lock);
+ ret = snprintf(buf, PAGE_SIZE, "%d\n",
+ hdmi_ctrl->pll_update_enable);
+ pr_debug("HDMI PLL update: %s\n",
+ hdmi_ctrl->pll_update_enable ? "enable" : "disable");
+
+ mutex_unlock(&hdmi_ctrl->tx_lock);
+
+ return ret;
+} /* hdmi_tx_sysfs_rda_pll_enable */
+
+
+static ssize_t hdmi_tx_sysfs_wta_pll_enable(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int enable, rc;
+ struct hdmi_tx_ctrl *hdmi_ctrl = NULL;
+
+ hdmi_ctrl = hdmi_tx_get_drvdata_from_sysfs_dev(dev);
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&hdmi_ctrl->tx_lock);
+
+ rc = kstrtoint(buf, 10, &enable);
+ if (rc) {
+ DEV_ERR("%s: kstrtoint failed. rc=%d\n", __func__, rc);
+ goto end;
+ }
+
+ hdmi_tx_enable_pll_update(hdmi_ctrl, enable);
+
+ rc = strnlen(buf, PAGE_SIZE);
+end:
+ mutex_unlock(&hdmi_ctrl->tx_lock);
+ return rc;
+} /* hdmi_tx_sysfs_wta_pll_enable */
+
static DEVICE_ATTR(connected, S_IRUGO, hdmi_tx_sysfs_rda_connected, NULL);
static DEVICE_ATTR(hot_plug, S_IWUSR, NULL, hdmi_tx_sysfs_wta_hot_plug);
static DEVICE_ATTR(sim_mode, S_IRUGO | S_IWUSR, hdmi_tx_sysfs_rda_sim_mode,
@@ -1392,6 +1445,9 @@ static DEVICE_ATTR(5v, S_IWUSR, NULL, hdmi_tx_sysfs_wta_5v);
static DEVICE_ATTR(hdr_stream, S_IWUSR, NULL, hdmi_tx_sysfs_wta_hdr_stream);
static DEVICE_ATTR(hdmi_ppm, S_IRUGO | S_IWUSR, NULL,
hdmi_tx_sysfs_wta_hdmi_ppm);
+static DEVICE_ATTR(pll_enable, S_IRUGO | S_IWUSR, hdmi_tx_sysfs_rda_pll_enable,
+ hdmi_tx_sysfs_wta_pll_enable);
+
static struct attribute *hdmi_tx_fs_attrs[] = {
&dev_attr_connected.attr,
@@ -1408,6 +1464,7 @@ static struct attribute *hdmi_tx_fs_attrs[] = {
&dev_attr_5v.attr,
&dev_attr_hdr_stream.attr,
&dev_attr_hdmi_ppm.attr,
+ &dev_attr_pll_enable.attr,
NULL,
};
static struct attribute_group hdmi_tx_fs_attrs_group = {
@@ -1577,12 +1634,14 @@ static void hdmi_tx_hdcp_cb_work(struct work_struct *work)
rc = hdmi_tx_config_avmute(hdmi_ctrl, false);
}
- if (hdmi_ctrl->hdcp1_use_sw_keys && hdmi_ctrl->hdcp14_present)
- hdcp1_set_enc(true);
+ if (hdmi_ctrl->hdcp1_use_sw_keys && hdmi_ctrl->hdcp14_present) {
+ if (!hdmi_ctrl->hdcp22_present)
+ hdcp1_set_enc(true);
+ }
break;
case HDCP_STATE_AUTH_FAIL:
if (hdmi_ctrl->hdcp1_use_sw_keys && hdmi_ctrl->hdcp14_present) {
- if (hdmi_ctrl->auth_state)
+ if (hdmi_ctrl->auth_state && !hdmi_ctrl->hdcp22_present)
hdcp1_set_enc(false);
}
@@ -1610,6 +1669,15 @@ static void hdmi_tx_hdcp_cb_work(struct work_struct *work)
}
break;
+ case HDCP_STATE_AUTH_FAIL_NOREAUTH:
+ if (hdmi_ctrl->hdcp1_use_sw_keys && hdmi_ctrl->hdcp14_present) {
+ if (hdmi_ctrl->auth_state && !hdmi_ctrl->hdcp22_present)
+ hdcp1_set_enc(false);
+ }
+
+ hdmi_ctrl->auth_state = false;
+
+ break;
case HDCP_STATE_AUTH_ENC_NONE:
hdmi_ctrl->enc_lvl = HDCP_STATE_AUTH_ENC_NONE;
@@ -1825,6 +1893,7 @@ static int hdmi_tx_init_edid(struct hdmi_tx_ctrl *hdmi_ctrl)
edid_init_data.kobj = hdmi_ctrl->kobj;
edid_init_data.ds_data = hdmi_ctrl->ds_data;
edid_init_data.max_pclk_khz = hdmi_ctrl->max_pclk_khz;
+ edid_init_data.yc420_support = true;
edid_data = hdmi_edid_init(&edid_init_data);
if (!edid_data) {
@@ -3618,6 +3687,7 @@ static int hdmi_tx_dev_init(struct hdmi_tx_ctrl *hdmi_ctrl)
hdmi_ctrl->hpd_state = false;
hdmi_ctrl->hpd_initialized = false;
hdmi_ctrl->hpd_off_pending = false;
+ hdmi_ctrl->pll_update_enable = false;
init_completion(&hdmi_ctrl->hpd_int_done);
INIT_WORK(&hdmi_ctrl->hpd_int_work, hdmi_tx_hpd_int_work);
@@ -3791,6 +3861,11 @@ static int hdmi_tx_update_ppm(struct hdmi_tx_ctrl *hdmi_ctrl, s32 ppm)
return -EINVAL;
}
+ if (!hdmi_ctrl->pll_update_enable) {
+ pr_err("PLL update feature not enabled\n");
+ return -EINVAL;
+ }
+
/* get current pclk */
cur_pclk = pinfo->clk_rate;
/* get desired pclk */
@@ -3814,6 +3889,49 @@ static int hdmi_tx_update_ppm(struct hdmi_tx_ctrl *hdmi_ctrl, s32 ppm)
return rc;
}
+static int hdmi_tx_enable_pll_update(struct hdmi_tx_ctrl *hdmi_ctrl,
+ int enable)
+{
+ struct mdss_panel_info *pinfo = NULL;
+ int rc = 0;
+
+ if (!hdmi_ctrl) {
+ pr_err("invalid input\n");
+ return -EINVAL;
+ }
+
+ /* only available in case HDMI is up */
+ if (!hdmi_tx_is_panel_on(hdmi_ctrl)) {
+ pr_err("hdmi is not on\n");
+ return -EINVAL;
+ }
+
+ enable = !!enable;
+ if (hdmi_ctrl->pll_update_enable == enable) {
+ pr_warn("HDMI PLL update already %s\n",
+ hdmi_ctrl->pll_update_enable ? "enabled" : "disabled");
+ return -EINVAL;
+ }
+
+ pinfo = &hdmi_ctrl->panel_data.panel_info;
+
+ if (!enable && hdmi_ctrl->actual_clk_rate != pinfo->clk_rate) {
+ if (hdmi_ctrl->actual_clk_rate) {
+ /* reset pixel clock when disable */
+ pinfo->clk_rate = hdmi_ctrl->actual_clk_rate;
+ rc = hdmi_tx_update_pixel_clk(hdmi_ctrl);
+ }
+ }
+
+ hdmi_ctrl->actual_clk_rate = pinfo->clk_rate;
+ hdmi_ctrl->pll_update_enable = enable;
+
+ pr_debug("HDMI PLL update: %s\n",
+ hdmi_ctrl->pll_update_enable ? "enable" : "disable");
+
+ return rc;
+}
+
static int hdmi_tx_evt_handle_register(struct hdmi_tx_ctrl *hdmi_ctrl)
{
int rc = 0;
@@ -3986,6 +4104,7 @@ static int hdmi_tx_evt_handle_panel_off(struct hdmi_tx_ctrl *hdmi_ctrl)
}
hdmi_ctrl->timing_gen_on = false;
+ hdmi_ctrl->pll_update_enable = false;
end:
return rc;
}
@@ -4045,6 +4164,7 @@ static int hdmi_tx_pre_evt_handle_update_fps(struct hdmi_tx_ctrl *hdmi_ctrl)
static int hdmi_tx_post_evt_handle_unblank(struct hdmi_tx_ctrl *hdmi_ctrl)
{
hdmi_tx_ack_state(hdmi_ctrl, true);
+ hdmi_tx_send_audio_notification(hdmi_ctrl, true);
return 0;
}
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_tx.h b/drivers/video/fbdev/msm/mdss_hdmi_tx.h
index ad02003631f6..92b9d84e9107 100644
--- a/drivers/video/fbdev/msm/mdss_hdmi_tx.h
+++ b/drivers/video/fbdev/msm/mdss_hdmi_tx.h
@@ -143,6 +143,9 @@ struct hdmi_tx_ctrl {
char disp_switch_name[MAX_SWITCH_NAME_SIZE];
+ u64 actual_clk_rate;
+ bool pll_update_enable;
+
/* pre/post is done in the context without tx_lock */
hdmi_tx_evt_handler pre_evt_handler[MDSS_EVENT_MAX - 1];
hdmi_tx_evt_handler evt_handler[MDSS_EVENT_MAX - 1];
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_util.c b/drivers/video/fbdev/msm/mdss_hdmi_util.c
index 5bc46d8c8f92..1384c39d4c6f 100644
--- a/drivers/video/fbdev/msm/mdss_hdmi_util.c
+++ b/drivers/video/fbdev/msm/mdss_hdmi_util.c
@@ -632,6 +632,12 @@ const char *msm_hdmi_mode_2string(u32 mode)
case HDMI_RES_AR_16_10:
aspect_ratio = "16/10";
break;
+ case HDMI_RES_AR_64_27:
+ aspect_ratio = "64/27";
+ break;
+ case HDMI_RES_AR_256_135:
+ aspect_ratio = "256/135";
+ break;
default:
aspect_ratio = "???";
};
diff --git a/drivers/video/fbdev/msm/mdss_mdp.c b/drivers/video/fbdev/msm/mdss_mdp.c
index 410d36a3ac31..a77e5c4435bb 100644
--- a/drivers/video/fbdev/msm/mdss_mdp.c
+++ b/drivers/video/fbdev/msm/mdss_mdp.c
@@ -1,7 +1,7 @@
/*
* MDSS MDP Interface (used by framebuffer core)
*
- * Copyright (c) 2007-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2007-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2007 Google Incorporated
*
* This software is licensed under the terms of the GNU General Public
@@ -2436,6 +2436,8 @@ static u32 mdss_mdp_scaler_init(struct mdss_data_type *mdata,
ret = mdss_mdp_ds_addr_setup(mdata);
}
+ mutex_init(&mdata->scaler_off->scaler_lock);
+
return ret;
}
diff --git a/drivers/video/fbdev/msm/mdss_mdp_ctl.c b/drivers/video/fbdev/msm/mdss_mdp_ctl.c
index 54fb21a5f35d..632d73e909a3 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_ctl.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_ctl.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -5912,9 +5912,7 @@ int mdss_mdp_display_commit(struct mdss_mdp_ctl *ctl, void *arg,
} else {
sctl_flush_bits = sctl->flush_bits;
}
- sctl->commit_in_progress = true;
}
- ctl->commit_in_progress = true;
ctl_flush_bits = ctl->flush_bits;
ATRACE_END("postproc_programming");
@@ -5928,6 +5926,9 @@ int mdss_mdp_display_commit(struct mdss_mdp_ctl *ctl, void *arg,
MDP_COMMIT_STAGE_SETUP_DONE,
commit_cb->data);
ret = mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_READY);
+ ctl->commit_in_progress = true;
+ if (sctl)
+ sctl->commit_in_progress = true;
/*
* When wait for fence timed out, driver ignores the fences
diff --git a/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c b/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c
index 747b4e3e2f81..2c92a480af6b 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c
@@ -406,7 +406,7 @@ static void mdss_mdp_cmd_wait4_autorefresh_pp(struct mdss_mdp_ctl *ctl)
return;
}
- if (line_out < ctl->mixer_left->roi.h) {
+ if ((line_out < ctl->mixer_left->roi.h) && (line_out)) {
reinit_completion(&ctx->autorefresh_ppdone);
/* enable ping pong done */
@@ -2510,6 +2510,7 @@ int mdss_mdp_cmd_set_autorefresh_mode(struct mdss_mdp_ctl *mctl, int frame_cnt)
*/
ctx->autorefresh_state = MDP_AUTOREFRESH_ON_REQUESTED;
ctx->autorefresh_frame_cnt = frame_cnt;
+ mctl->mdata->serialize_wait4pp = true;
/* Cancel GATE Work Item */
if (cancel_work_sync(&ctx->gate_clk_work))
@@ -2523,8 +2524,10 @@ int mdss_mdp_cmd_set_autorefresh_mode(struct mdss_mdp_ctl *mctl, int frame_cnt)
if (frame_cnt == 0) {
ctx->autorefresh_state = MDP_AUTOREFRESH_OFF;
ctx->autorefresh_frame_cnt = 0;
+ mctl->mdata->serialize_wait4pp = false;
} else {
ctx->autorefresh_frame_cnt = frame_cnt;
+ mctl->mdata->serialize_wait4pp = true;
}
break;
case MDP_AUTOREFRESH_ON:
@@ -2536,6 +2539,7 @@ int mdss_mdp_cmd_set_autorefresh_mode(struct mdss_mdp_ctl *mctl, int frame_cnt)
ctx->autorefresh_state = MDP_AUTOREFRESH_OFF_REQUESTED;
} else {
ctx->autorefresh_frame_cnt = frame_cnt;
+ mctl->mdata->serialize_wait4pp = true;
}
break;
case MDP_AUTOREFRESH_OFF_REQUESTED:
@@ -2545,6 +2549,7 @@ int mdss_mdp_cmd_set_autorefresh_mode(struct mdss_mdp_ctl *mctl, int frame_cnt)
pr_debug("cancelling autorefresh off request\n");
ctx->autorefresh_state = MDP_AUTOREFRESH_ON;
ctx->autorefresh_frame_cnt = frame_cnt;
+ mctl->mdata->serialize_wait4pp = true;
}
break;
default:
@@ -2873,6 +2878,7 @@ static int mdss_mdp_disable_autorefresh(struct mdss_mdp_ctl *ctl,
cfg |= BIT(20);
mdss_mdp_pingpong_write(pp_base,
MDSS_MDP_REG_PP_SYNC_CONFIG_VSYNC, cfg);
+ ctl->mdata->serialize_wait4pp = false;
return 0;
}
diff --git a/drivers/video/fbdev/msm/mdss_mdp_overlay.c b/drivers/video/fbdev/msm/mdss_mdp_overlay.c
index 11c159630747..f3984201fbc5 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_overlay.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_overlay.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -6140,6 +6140,9 @@ static void __vsync_retire_signal(struct msm_fb_data_type *mfd, int val)
sw_sync_timeline_inc(mdp5_data->vsync_timeline, val);
mdp5_data->retire_cnt -= min(val, mdp5_data->retire_cnt);
+ pr_debug("Retire signaled! timeline val=%d remaining=%d\n",
+ mdp5_data->vsync_timeline->value,
+ mdp5_data->retire_cnt);
if (mdp5_data->retire_cnt == 0) {
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
mdp5_data->ctl->ops.remove_vsync_handler(mdp5_data->ctl,
@@ -6361,6 +6364,13 @@ void mdss_mdp_footswitch_ctrl_handler(bool on)
mdss_mdp_footswitch_ctrl(mdata, on);
}
+static void mdss_mdp_signal_retire_fence(struct msm_fb_data_type *mfd,
+ int retire_cnt)
+{
+ __vsync_retire_signal(mfd, retire_cnt);
+ pr_debug("Signaled (%d) pending retire fence\n", retire_cnt);
+}
+
int mdss_mdp_overlay_init(struct msm_fb_data_type *mfd)
{
struct device *dev = mfd->fbi->dev;
@@ -6402,6 +6412,7 @@ int mdss_mdp_overlay_init(struct msm_fb_data_type *mfd)
mdp5_interface->splash_init_fnc = mdss_mdp_splash_init;
mdp5_interface->configure_panel = mdss_mdp_update_panel_info;
mdp5_interface->input_event_handler = mdss_mdp_input_event_handler;
+ mdp5_interface->signal_retire_fence = mdss_mdp_signal_retire_fence;
/*
* Register footswitch control only for primary fb pm
@@ -6631,14 +6642,18 @@ static int mdss_mdp_scaler_lut_init(struct mdss_data_type *mdata,
if (!mdata->scaler_off)
return -EFAULT;
+ mutex_lock(&mdata->scaler_off->scaler_lock);
+
qseed3_lut_tbl = &mdata->scaler_off->lut_tbl;
if ((lut_tbl->dir_lut_size !=
DIR_LUT_IDX * DIR_LUT_COEFFS * sizeof(uint32_t)) ||
(lut_tbl->cir_lut_size !=
CIR_LUT_IDX * CIR_LUT_COEFFS * sizeof(uint32_t)) ||
(lut_tbl->sep_lut_size !=
- SEP_LUT_IDX * SEP_LUT_COEFFS * sizeof(uint32_t)))
- return -EINVAL;
+ SEP_LUT_IDX * SEP_LUT_COEFFS * sizeof(uint32_t))) {
+ mutex_unlock(&mdata->scaler_off->scaler_lock);
+ return -EINVAL;
+ }
if (!qseed3_lut_tbl->dir_lut) {
qseed3_lut_tbl->dir_lut = devm_kzalloc(&mdata->pdev->dev,
@@ -6646,7 +6661,7 @@ static int mdss_mdp_scaler_lut_init(struct mdss_data_type *mdata,
GFP_KERNEL);
if (!qseed3_lut_tbl->dir_lut) {
ret = -ENOMEM;
- goto fail;
+ goto err;
}
}
@@ -6656,7 +6671,7 @@ static int mdss_mdp_scaler_lut_init(struct mdss_data_type *mdata,
GFP_KERNEL);
if (!qseed3_lut_tbl->cir_lut) {
ret = -ENOMEM;
- goto fail;
+ goto fail_free_dir_lut;
}
}
@@ -6666,44 +6681,52 @@ static int mdss_mdp_scaler_lut_init(struct mdss_data_type *mdata,
GFP_KERNEL);
if (!qseed3_lut_tbl->sep_lut) {
ret = -ENOMEM;
- goto fail;
+ goto fail_free_cir_lut;
}
}
/* Invalidate before updating */
qseed3_lut_tbl->valid = false;
-
if (copy_from_user(qseed3_lut_tbl->dir_lut,
(void *)(unsigned long)lut_tbl->dir_lut,
lut_tbl->dir_lut_size)) {
ret = -EINVAL;
- goto err;
+ goto fail_free_sep_lut;
}
if (copy_from_user(qseed3_lut_tbl->cir_lut,
(void *)(unsigned long)lut_tbl->cir_lut,
lut_tbl->cir_lut_size)) {
ret = -EINVAL;
- goto err;
+ goto fail_free_sep_lut;
}
if (copy_from_user(qseed3_lut_tbl->sep_lut,
(void *)(unsigned long)lut_tbl->sep_lut,
lut_tbl->sep_lut_size)) {
ret = -EINVAL;
- goto err;
+ goto fail_free_sep_lut;
}
qseed3_lut_tbl->valid = true;
+ mutex_unlock(&mdata->scaler_off->scaler_lock);
+
return ret;
-fail:
- kfree(qseed3_lut_tbl->dir_lut);
- kfree(qseed3_lut_tbl->cir_lut);
- kfree(qseed3_lut_tbl->sep_lut);
+fail_free_sep_lut:
+ devm_kfree(&mdata->pdev->dev, qseed3_lut_tbl->sep_lut);
+fail_free_cir_lut:
+ devm_kfree(&mdata->pdev->dev, qseed3_lut_tbl->cir_lut);
+fail_free_dir_lut:
+ devm_kfree(&mdata->pdev->dev, qseed3_lut_tbl->dir_lut);
err:
+ qseed3_lut_tbl->dir_lut = NULL;
+ qseed3_lut_tbl->cir_lut = NULL;
+ qseed3_lut_tbl->sep_lut = NULL;
qseed3_lut_tbl->valid = false;
+ mutex_unlock(&mdata->scaler_off->scaler_lock);
+
return ret;
}
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pp.c b/drivers/video/fbdev/msm/mdss_mdp_pp.c
index 9c2b1d42bd35..5b9798e2c24e 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_pp.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_pp.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -453,6 +453,13 @@ static u32 igc_limited[IGC_LUT_ENTRIES] = {
#define PP_FLAGS_DIRTY_SHARP 0x200
#define PP_FLAGS_DIRTY_PA_DITHER 0x400
+#define PP_EARLY_PROGRAM_DIRTY_MASK (PP_FLAGS_DIRTY_PCC | \
+ PP_FLAGS_DIRTY_ENHIST | PP_FLAGS_DIRTY_HIST_COL)
+#define PP_DEFERRED_PROGRAM_DIRTY_MASK (PP_FLAGS_DIRTY_IGC | \
+ PP_FLAGS_DIRTY_PGC | PP_FLAGS_DIRTY_ARGC | \
+ PP_FLAGS_DIRTY_GAMUT | PP_FLAGS_DIRTY_PA | \
+ PP_FLAGS_DIRTY_DITHER | PP_FLAGS_DIRTY_PA_DITHER)
+
/* Leave space for future features */
#define PP_FLAGS_RESUME_COMMIT 0x10000000
@@ -1604,11 +1611,16 @@ int mdss_mdp_scaler_lut_cfg(struct mdp_scale_data_v2 *scaler,
};
mdata = mdss_mdp_get_mdata();
+
+ mutex_lock(&mdata->scaler_off->scaler_lock);
+
lut_tbl = &mdata->scaler_off->lut_tbl;
if ((!lut_tbl) || (!lut_tbl->valid)) {
+ mutex_unlock(&mdata->scaler_off->scaler_lock);
pr_err("%s:Invalid QSEED3 LUT TABLE\n", __func__);
return -EINVAL;
}
+
if ((scaler->lut_flag & SCALER_LUT_DIR_WR) ||
(scaler->lut_flag & SCALER_LUT_Y_CIR_WR) ||
(scaler->lut_flag & SCALER_LUT_UV_CIR_WR) ||
@@ -1656,6 +1668,8 @@ int mdss_mdp_scaler_lut_cfg(struct mdp_scale_data_v2 *scaler,
}
}
+ mutex_unlock(&mdata->scaler_off->scaler_lock);
+
return 0;
}
@@ -2853,10 +2867,15 @@ int mdss_mdp_pp_setup_locked(struct mdss_mdp_ctl *ctl,
}
}
+ if (info->pp_program_mask & PP_NORMAL_PROGRAM_MASK) {
+ mdss_pp_res->pp_disp_flags[disp_num] &=
+ ~PP_EARLY_PROGRAM_DIRTY_MASK;
+ }
if (info->pp_program_mask & PP_DEFER_PROGRAM_MASK) {
/* clear dirty flag */
if (disp_num < MDSS_BLOCK_DISP_NUM) {
- mdss_pp_res->pp_disp_flags[disp_num] = 0;
+ mdss_pp_res->pp_disp_flags[disp_num] &=
+ ~PP_DEFERRED_PROGRAM_DIRTY_MASK;
if (disp_num < mdata->nad_cfgs)
mdata->ad_cfgs[disp_num].reg_sts = 0;
}
diff --git a/drivers/video/fbdev/msm/msm_dba/adv7533.c b/drivers/video/fbdev/msm/msm_dba/adv7533.c
index 8802b58116fb..15fe77d05091 100644
--- a/drivers/video/fbdev/msm/msm_dba/adv7533.c
+++ b/drivers/video/fbdev/msm/msm_dba/adv7533.c
@@ -129,6 +129,7 @@ struct adv7533 {
bool hdcp_enabled;
bool cec_enabled;
bool is_power_on;
+ bool is_vreg_on;
void *edid_data;
u8 edid_buf[EDID_SEG_SIZE];
u8 audio_spkr_data[AUDIO_DATA_SIZE];
@@ -190,7 +191,7 @@ static struct adv7533_reg_cfg adv7533_video_en[] = {
static struct adv7533_reg_cfg adv7533_video_disable[] = {
/* Timing Generator Disable */
- {I2C_ADDR_CEC_DSI, 0x27, 0x4B, 0},
+ {I2C_ADDR_CEC_DSI, 0x27, 0x0B, 0},
/* SPDIF disable */
{I2C_ADDR_MAIN, 0x0B, 0x00, 0},
/* Gate CEC Clock */
@@ -760,7 +761,7 @@ static void adv7533_notify_clients(struct msm_dba_device_info *dev,
u32 adv7533_read_edid(struct adv7533 *pdata, u32 size, char *edid_buf)
{
u32 ret = 0, read_size = size / 2;
- u8 edid_addr;
+ u8 edid_addr = 0;
int ndx;
if (!pdata || !edid_buf)
@@ -1007,7 +1008,7 @@ end:
static void *adv7533_handle_hpd_intr(struct adv7533 *pdata)
{
int ret = 0;
- u8 hpd_state;
+ u8 hpd_state = 0;
u8 connected = 0, disconnected = 0;
if (!pdata) {
@@ -1039,7 +1040,8 @@ end:
static int adv7533_enable_interrupts(struct adv7533 *pdata, int interrupts)
{
int ret = 0;
- u8 reg_val, init_reg_val;
+ u8 reg_val = 0;
+ u8 init_reg_val;
if (!pdata) {
pr_err("%s: invalid input\n", __func__);
@@ -1085,7 +1087,8 @@ end:
static int adv7533_disable_interrupts(struct adv7533 *pdata, int interrupts)
{
int ret = 0;
- u8 reg_val, init_reg_val;
+ u8 reg_val = 0;
+ u8 init_reg_val;
if (!pdata) {
pr_err("%s: invalid input\n", __func__);
@@ -1375,6 +1378,85 @@ static int adv7533_check_hpd(void *client, u32 flags)
return connected;
}
+static int adv7533_enable_vreg(struct adv7533 *pdata, int enable)
+{
+ int rc = 0;
+ struct dss_module_power *power_data = NULL;
+
+ if (!pdata) {
+ pr_err("invalid input\n");
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ power_data = &pdata->power_data;
+ if (!power_data || !power_data->num_vreg) {
+ pr_warn("%s: Error: invalid power data\n", __func__);
+ return 0;
+ }
+
+ if (enable) {
+ rc = msm_dss_enable_vreg(power_data->vreg_config,
+ power_data->num_vreg, 1);
+ if (rc) {
+ pr_err("%s: Failed to enable vreg. Err=%d\n",
+ __func__, rc);
+ goto exit;
+ }
+ pdata->is_vreg_on = true;
+ } else {
+ rc = msm_dss_enable_vreg(power_data->vreg_config,
+ power_data->num_vreg, 0);
+ if (rc) {
+ pr_err("%s: Failed to disable vreg. Err=%d\n",
+ __func__, rc);
+ goto exit;
+ }
+ pdata->is_vreg_on = false;
+ }
+exit:
+ return rc;
+}
+
+static int adv7533_config_vreg(struct adv7533 *pdata, int enable)
+{
+ int rc = 0;
+ struct dss_module_power *power_data = NULL;
+
+ if (!pdata) {
+ pr_err("invalid input\n");
+ rc = -EINVAL;
+ goto exit;
+ }
+ power_data = &pdata->power_data;
+ if (!power_data || !power_data->num_vreg) {
+ pr_warn("%s: Error: invalid power data\n", __func__);
+ return 0;
+ }
+
+ if (enable) {
+ rc = msm_dss_config_vreg(&pdata->i2c_client->dev,
+ power_data->vreg_config,
+ power_data->num_vreg, 1);
+ if (rc) {
+ pr_err("%s: Failed to config vreg. Err=%d\n",
+ __func__, rc);
+ goto exit;
+ }
+ } else {
+ rc = msm_dss_config_vreg(&pdata->i2c_client->dev,
+ power_data->vreg_config,
+ power_data->num_vreg, 0);
+ if (rc) {
+ pr_err("%s: Failed to deconfig vreg. Err=%d\n",
+ __func__, rc);
+ goto exit;
+ }
+ }
+exit:
+ return rc;
+}
+
/* Device Operations */
static int adv7533_power_on(void *client, bool on, u32 flags)
{
@@ -1390,6 +1472,16 @@ static int adv7533_power_on(void *client, bool on, u32 flags)
mutex_lock(&pdata->ops_mutex);
if (on && !pdata->is_power_on) {
+ if (!pdata->is_vreg_on) {
+ ret = adv7533_config_vreg(pdata, 1);
+ if (!ret) {
+ adv7533_enable_vreg(pdata, 1);
+ } else {
+ pr_err("%s: Failed to config vreg\n", __func__);
+ goto end;
+ }
+ }
+
if (gpio_is_valid(pdata->switch_gpio)) {
gpio_set_value(pdata->switch_gpio,
pdata->switch_flags);
@@ -1419,6 +1511,10 @@ static int adv7533_power_on(void *client, bool on, u32 flags)
ret = 0;
adv7533_notify_clients(&pdata->dev_info,
MSM_DBA_CB_HPD_DISCONNECT);
+ if (pdata->is_vreg_on) {
+ adv7533_enable_vreg(pdata, 0);
+ adv7533_config_vreg(pdata, 0);
+ }
}
end:
mutex_unlock(&pdata->ops_mutex);
@@ -1491,86 +1587,6 @@ static void adv7533_video_setup(struct adv7533 *pdata,
adv7533_write(pdata, I2C_ADDR_CEC_DSI, 0x37, ((vbp & 0xF) << 4));
}
-static int adv7533_config_vreg(struct adv7533 *pdata, int enable)
-{
- int rc = 0;
- struct dss_module_power *power_data = NULL;
-
- if (!pdata) {
- pr_err("invalid input\n");
- rc = -EINVAL;
- goto exit;
- }
-
- power_data = &pdata->power_data;
- if (!power_data || !power_data->num_vreg) {
- pr_warn("%s: Error: invalid power data\n", __func__);
- return 0;
- }
-
- if (enable) {
- rc = msm_dss_config_vreg(&pdata->i2c_client->dev,
- power_data->vreg_config,
- power_data->num_vreg, 1);
- if (rc) {
- pr_err("%s: Failed to config vreg. Err=%d\n",
- __func__, rc);
- goto exit;
- }
- } else {
- rc = msm_dss_config_vreg(&pdata->i2c_client->dev,
- power_data->vreg_config,
- power_data->num_vreg, 0);
- if (rc) {
- pr_err("%s: Failed to deconfig vreg. Err=%d\n",
- __func__, rc);
- goto exit;
- }
- }
-exit:
- return rc;
-
-}
-
-static int adv7533_enable_vreg(struct adv7533 *pdata, int enable)
-{
- int rc = 0;
- struct dss_module_power *power_data = NULL;
-
- if (!pdata) {
- pr_err("invalid input\n");
- rc = -EINVAL;
- goto exit;
- }
-
- power_data = &pdata->power_data;
- if (!power_data || !power_data->num_vreg) {
- pr_warn("%s: Error: invalid power data\n", __func__);
- return 0;
- }
-
- if (enable) {
- rc = msm_dss_enable_vreg(power_data->vreg_config,
- power_data->num_vreg, 1);
- if (rc) {
- pr_err("%s: Failed to enable vreg. Err=%d\n",
- __func__, rc);
- goto exit;
- }
- } else {
- rc = msm_dss_enable_vreg(power_data->vreg_config,
- power_data->num_vreg, 0);
- if (rc) {
- pr_err("%s: Failed to disable vreg. Err=%d\n",
- __func__, rc);
- goto exit;
- }
- }
-exit:
- return rc;
-
-}
-
static int adv7533_video_on(void *client, bool on,
struct msm_dba_video_cfg *cfg, u32 flags)
{
@@ -1638,7 +1654,7 @@ static int adv7533_hdcp_enable(void *client, bool hdcp_on,
bool enc_on, u32 flags)
{
int ret = -EINVAL;
- u8 reg_val;
+ u8 reg_val = 0;
struct adv7533 *pdata =
adv7533_get_platform_data(client);
diff --git a/drivers/video/fbdev/sis/init301.c b/drivers/video/fbdev/sis/init301.c
index 295e0dedaf1f..20f7234e809e 100644
--- a/drivers/video/fbdev/sis/init301.c
+++ b/drivers/video/fbdev/sis/init301.c
@@ -2151,17 +2151,15 @@ SiS_GetVCLK2Ptr(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned shor
unsigned short RefreshRateTableIndex)
{
unsigned short CRT2Index, VCLKIndex = 0, VCLKIndexGEN = 0, VCLKIndexGENCRT = 0;
- unsigned short modeflag, resinfo, tempbx;
+ unsigned short resinfo, tempbx;
const unsigned char *CHTVVCLKPtr = NULL;
if(ModeNo <= 0x13) {
- modeflag = SiS_Pr->SiS_SModeIDTable[ModeIdIndex].St_ModeFlag;
resinfo = SiS_Pr->SiS_SModeIDTable[ModeIdIndex].St_ResInfo;
CRT2Index = SiS_Pr->SiS_SModeIDTable[ModeIdIndex].St_CRT2CRTC;
VCLKIndexGEN = (SiS_GetRegByte((SiS_Pr->SiS_P3ca+0x02)) >> 2) & 0x03;
VCLKIndexGENCRT = VCLKIndexGEN;
} else {
- modeflag = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_ModeFlag;
resinfo = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_RESINFO;
CRT2Index = SiS_Pr->SiS_RefIndex[RefreshRateTableIndex].Ext_CRT2CRTC;
VCLKIndexGEN = SiS_Pr->SiS_RefIndex[RefreshRateTableIndex].Ext_CRTVCLK;
@@ -7270,7 +7268,7 @@ SiS_ShiftXPos(struct SiS_Private *SiS_Pr, int shift)
static void
SiS_SetGroup4_C_ELV(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex)
{
- unsigned short temp, temp1, resinfo = 0;
+ unsigned short temp, temp1;
unsigned char *ROMAddr = SiS_Pr->VirtualRomBase;
if(!(SiS_Pr->SiS_VBType & VB_SIS30xCLV)) return;
@@ -7282,10 +7280,6 @@ SiS_SetGroup4_C_ELV(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned
if(!(ROMAddr[0x61] & 0x04)) return;
}
- if(ModeNo > 0x13) {
- resinfo = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_RESINFO;
- }
-
SiS_SetRegOR(SiS_Pr->SiS_Part4Port,0x3a,0x08);
temp = SiS_GetReg(SiS_Pr->SiS_Part4Port,0x3a);
if(!(temp & 0x01)) {
diff --git a/drivers/video/fbdev/sm501fb.c b/drivers/video/fbdev/sm501fb.c
index d0a4e2f79a57..d215faacce04 100644
--- a/drivers/video/fbdev/sm501fb.c
+++ b/drivers/video/fbdev/sm501fb.c
@@ -1600,6 +1600,7 @@ static int sm501fb_start(struct sm501fb_info *info,
info->fbmem = ioremap(res->start, resource_size(res));
if (info->fbmem == NULL) {
dev_err(dev, "cannot remap framebuffer\n");
+ ret = -ENXIO;
goto err_mem_res;
}
diff --git a/drivers/video/fbdev/sm712fb.c b/drivers/video/fbdev/sm712fb.c
index 629bfa2d2f51..86ae1d4556fc 100644
--- a/drivers/video/fbdev/sm712fb.c
+++ b/drivers/video/fbdev/sm712fb.c
@@ -28,9 +28,7 @@
#include <linux/console.h>
#include <linux/screen_info.h>
-#ifdef CONFIG_PM
#include <linux/pm.h>
-#endif
#include "sm712.h"
@@ -1545,8 +1543,7 @@ static void smtcfb_pci_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-#ifdef CONFIG_PM
-static int smtcfb_pci_suspend(struct device *device)
+static int __maybe_unused smtcfb_pci_suspend(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
struct smtcfb_info *sfb;
@@ -1569,7 +1566,7 @@ static int smtcfb_pci_suspend(struct device *device)
return 0;
}
-static int smtcfb_pci_resume(struct device *device)
+static int __maybe_unused smtcfb_pci_resume(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
struct smtcfb_info *sfb;
@@ -1610,20 +1607,13 @@ static int smtcfb_pci_resume(struct device *device)
}
static SIMPLE_DEV_PM_OPS(sm7xx_pm_ops, smtcfb_pci_suspend, smtcfb_pci_resume);
-#define SM7XX_PM_OPS (&sm7xx_pm_ops)
-
-#else /* !CONFIG_PM */
-
-#define SM7XX_PM_OPS NULL
-
-#endif /* !CONFIG_PM */
static struct pci_driver smtcfb_driver = {
.name = "smtcfb",
.id_table = smtcfb_pci_table,
.probe = smtcfb_pci_probe,
.remove = smtcfb_pci_remove,
- .driver.pm = SM7XX_PM_OPS,
+ .driver.pm = &sm7xx_pm_ops,
};
static int __init sm712fb_init(void)
diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
index 53326badfb61..2add8def83be 100644
--- a/drivers/video/fbdev/udlfb.c
+++ b/drivers/video/fbdev/udlfb.c
@@ -1487,15 +1487,25 @@ static struct device_attribute fb_device_attrs[] = {
static int dlfb_select_std_channel(struct dlfb_data *dev)
{
int ret;
- u8 set_def_chn[] = { 0x57, 0xCD, 0xDC, 0xA7,
+ void *buf;
+ static const u8 set_def_chn[] = {
+ 0x57, 0xCD, 0xDC, 0xA7,
0x1C, 0x88, 0x5E, 0x15,
0x60, 0xFE, 0xC6, 0x97,
0x16, 0x3D, 0x47, 0xF2 };
+ buf = kmemdup(set_def_chn, sizeof(set_def_chn), GFP_KERNEL);
+
+ if (!buf)
+ return -ENOMEM;
+
ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
NR_USB_REQUEST_CHANNEL,
(USB_DIR_OUT | USB_TYPE_VENDOR), 0, 0,
- set_def_chn, sizeof(set_def_chn), USB_CTRL_SET_TIMEOUT);
+ buf, sizeof(set_def_chn), USB_CTRL_SET_TIMEOUT);
+
+ kfree(buf);
+
return ret;
}
diff --git a/drivers/video/fbdev/vfb.c b/drivers/video/fbdev/vfb.c
index 728cb6b23c42..7d8dfc7f1269 100644
--- a/drivers/video/fbdev/vfb.c
+++ b/drivers/video/fbdev/vfb.c
@@ -298,8 +298,23 @@ static int vfb_check_var(struct fb_var_screeninfo *var,
*/
static int vfb_set_par(struct fb_info *info)
{
+ switch (info->var.bits_per_pixel) {
+ case 1:
+ info->fix.visual = FB_VISUAL_MONO01;
+ break;
+ case 8:
+ info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
+ break;
+ case 16:
+ case 24:
+ case 32:
+ info->fix.visual = FB_VISUAL_TRUECOLOR;
+ break;
+ }
+
info->fix.line_length = get_line_length(info->var.xres_virtual,
info->var.bits_per_pixel);
+
return 0;
}
@@ -540,6 +555,8 @@ static int vfb_probe(struct platform_device *dev)
goto err2;
platform_set_drvdata(dev, info);
+ vfb_set_par(info);
+
fb_info(info, "Virtual frame buffer device, using %ldK of video memory\n",
videomemorysize >> 10);
return 0;
diff --git a/drivers/video/fbdev/via/viafbdev.c b/drivers/video/fbdev/via/viafbdev.c
index f9718f012aae..badee04ef496 100644
--- a/drivers/video/fbdev/via/viafbdev.c
+++ b/drivers/video/fbdev/via/viafbdev.c
@@ -1630,16 +1630,14 @@ static void viafb_init_proc(struct viafb_shared *shared)
}
static void viafb_remove_proc(struct viafb_shared *shared)
{
- struct proc_dir_entry *viafb_entry = shared->proc_entry,
- *iga1_entry = shared->iga1_proc_entry,
- *iga2_entry = shared->iga2_proc_entry;
+ struct proc_dir_entry *viafb_entry = shared->proc_entry;
if (!viafb_entry)
return;
- remove_proc_entry("output_devices", iga2_entry);
+ remove_proc_entry("output_devices", shared->iga2_proc_entry);
remove_proc_entry("iga2", viafb_entry);
- remove_proc_entry("output_devices", iga1_entry);
+ remove_proc_entry("output_devices", shared->iga1_proc_entry);
remove_proc_entry("iga1", viafb_entry);
remove_proc_entry("supported_output_devices", viafb_entry);
diff --git a/drivers/video/hdmi.c b/drivers/video/hdmi.c
index 1cf907ecded4..111a0ab6280a 100644
--- a/drivers/video/hdmi.c
+++ b/drivers/video/hdmi.c
@@ -321,6 +321,17 @@ int hdmi_vendor_infoframe_init(struct hdmi_vendor_infoframe *frame)
}
EXPORT_SYMBOL(hdmi_vendor_infoframe_init);
+static int hdmi_vendor_infoframe_length(const struct hdmi_vendor_infoframe *frame)
+{
+ /* for side by side (half) we also need to provide 3D_Ext_Data */
+ if (frame->s3d_struct >= HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF)
+ return 6;
+ else if (frame->vic != 0 || frame->s3d_struct != HDMI_3D_STRUCTURE_INVALID)
+ return 5;
+ else
+ return 4;
+}
+
/**
* hdmi_vendor_infoframe_pack() - write a HDMI vendor infoframe to binary buffer
* @frame: HDMI infoframe
@@ -341,19 +352,11 @@ ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame,
u8 *ptr = buffer;
size_t length;
- /* empty info frame */
- if (frame->vic == 0 && frame->s3d_struct == HDMI_3D_STRUCTURE_INVALID)
- return -EINVAL;
-
/* only one of those can be supplied */
if (frame->vic != 0 && frame->s3d_struct != HDMI_3D_STRUCTURE_INVALID)
return -EINVAL;
- /* for side by side (half) we also need to provide 3D_Ext_Data */
- if (frame->s3d_struct >= HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF)
- frame->length = 6;
- else
- frame->length = 5;
+ frame->length = hdmi_vendor_infoframe_length(frame);
length = HDMI_INFOFRAME_HEADER_SIZE + frame->length;
@@ -372,14 +375,16 @@ ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame,
ptr[5] = 0x0c;
ptr[6] = 0x00;
- if (frame->vic) {
- ptr[7] = 0x1 << 5; /* video format */
- ptr[8] = frame->vic;
- } else {
+ if (frame->s3d_struct != HDMI_3D_STRUCTURE_INVALID) {
ptr[7] = 0x2 << 5; /* video format */
ptr[8] = (frame->s3d_struct & 0xf) << 4;
if (frame->s3d_struct >= HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF)
ptr[9] = (frame->s3d_ext_data & 0xf) << 4;
+ } else if (frame->vic) {
+ ptr[7] = 0x1 << 5; /* video format */
+ ptr[8] = frame->vic;
+ } else {
+ ptr[7] = 0x0 << 5; /* video format */
}
hdmi_infoframe_set_checksum(buffer, length);
@@ -1165,7 +1170,7 @@ hdmi_vendor_any_infoframe_unpack(union hdmi_vendor_any_infoframe *frame,
if (ptr[0] != HDMI_INFOFRAME_TYPE_VENDOR ||
ptr[1] != 1 ||
- (ptr[2] != 5 && ptr[2] != 6))
+ (ptr[2] != 4 && ptr[2] != 5 && ptr[2] != 6))
return -EINVAL;
length = ptr[2];
@@ -1193,16 +1198,22 @@ hdmi_vendor_any_infoframe_unpack(union hdmi_vendor_any_infoframe *frame,
hvf->length = length;
- if (hdmi_video_format == 0x1) {
- hvf->vic = ptr[4];
- } else if (hdmi_video_format == 0x2) {
+ if (hdmi_video_format == 0x2) {
+ if (length != 5 && length != 6)
+ return -EINVAL;
hvf->s3d_struct = ptr[4] >> 4;
if (hvf->s3d_struct >= HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF) {
- if (length == 6)
- hvf->s3d_ext_data = ptr[5] >> 4;
- else
+ if (length != 6)
return -EINVAL;
+ hvf->s3d_ext_data = ptr[5] >> 4;
}
+ } else if (hdmi_video_format == 0x1) {
+ if (length != 5)
+ return -EINVAL;
+ hvf->vic = ptr[4];
+ } else {
+ if (length != 4)
+ return -EINVAL;
}
return 0;
diff --git a/drivers/video/msm/ba/msm_ba.c b/drivers/video/msm/ba/msm_ba.c
index 95edb5bd48a9..d00f6169bdd9 100644
--- a/drivers/video/msm/ba/msm_ba.c
+++ b/drivers/video/msm/ba/msm_ba.c
@@ -347,6 +347,7 @@ int msm_ba_g_fmt(void *instance, struct v4l2_format *f)
} else {
f->fmt.pix.height = sd_fmt.format.height;
f->fmt.pix.width = sd_fmt.format.width;
+ f->fmt.pix.field = sd_fmt.format.field;
switch (sd_fmt.format.code) {
case MEDIA_BUS_FMT_YUYV8_2X8:
f->fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV;
@@ -574,6 +575,24 @@ long msm_ba_private_ioctl(void *instance, int cmd, void *arg)
}
}
break;
+ case VIDIOC_G_AVI_INFOFRAME: {
+ dprintk(BA_DBG, "VIDIOC_G_AVI_INFOFRAME\n");
+ sd = inst->sd;
+ if (!sd) {
+ dprintk(BA_ERR, "No sd registered");
+ return -EINVAL;
+ }
+ if (arg) {
+ rc = v4l2_subdev_call(sd, core, ioctl, cmd, arg);
+ if (rc)
+ dprintk(BA_ERR, "%s failed: %ld on cmd: 0x%x",
+ __func__, rc, cmd);
+ } else {
+ dprintk(BA_ERR, "%s: NULL argument provided", __func__);
+ rc = -EINVAL;
+ }
+ }
+ break;
case VIDIOC_G_FIELD_INFO: {
dprintk(BA_DBG, "VIDIOC_G_FIELD_INFO");
sd = inst->sd;
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 84c6add93f1f..f978f258eab8 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -244,12 +244,14 @@ static void update_balloon_stats(struct virtio_balloon *vb)
all_vm_events(events);
si_meminfo(&i);
+#ifdef CONFIG_VM_EVENT_COUNTERS
update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_IN,
pages_to_bytes(events[PSWPIN]));
update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_OUT,
pages_to_bytes(events[PSWPOUT]));
update_stat(vb, idx++, VIRTIO_BALLOON_S_MAJFLT, events[PGMAJFAULT]);
update_stat(vb, idx++, VIRTIO_BALLOON_S_MINFLT, events[PGFAULT]);
+#endif
update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMFREE,
pages_to_bytes(i.freeram));
update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMTOT,
diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c
index 016bd9355190..aa93df5833dc 100644
--- a/drivers/watchdog/f71808e_wdt.c
+++ b/drivers/watchdog/f71808e_wdt.c
@@ -450,7 +450,7 @@ static bool watchdog_is_running(void)
is_running = (superio_inb(watchdog.sioaddr, SIO_REG_ENABLE) & BIT(0))
&& (superio_inb(watchdog.sioaddr, F71808FG_REG_WDT_CONF)
- & F71808FG_FLAG_WD_EN);
+ & BIT(F71808FG_FLAG_WD_EN));
superio_exit(watchdog.sioaddr);
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index 286369d4f0f5..be99112fad00 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -51,6 +51,7 @@ static char expect_release;
static unsigned long hpwdt_is_open;
static void __iomem *pci_mem_addr; /* the PCI-memory address */
+static unsigned long __iomem *hpwdt_nmistat;
static unsigned long __iomem *hpwdt_timer_reg;
static unsigned long __iomem *hpwdt_timer_con;
@@ -474,6 +475,11 @@ static int hpwdt_time_left(void)
}
#ifdef CONFIG_HPWDT_NMI_DECODING
+static int hpwdt_my_nmi(void)
+{
+ return ioread8(hpwdt_nmistat) & 0x6;
+}
+
/*
* NMI Handler
*/
@@ -485,6 +491,9 @@ static int hpwdt_pretimeout(unsigned int ulReason, struct pt_regs *regs)
if (!hpwdt_nmi_decoding)
goto out;
+ if ((ulReason == NMI_UNKNOWN) && !hpwdt_my_nmi())
+ return NMI_DONE;
+
spin_lock_irqsave(&rom_lock, rom_pl);
if (!die_nmi_called && !is_icru && !is_uefi)
asminline_call(&cmn_regs, cru_rom_addr);
@@ -700,7 +709,7 @@ static void dmi_find_icru(const struct dmi_header *dm, void *dummy)
smbios_proliant_ptr = (struct smbios_proliant_info *) dm;
if (smbios_proliant_ptr->misc_features & 0x01)
is_icru = 1;
- if (smbios_proliant_ptr->misc_features & 0x408)
+ if (smbios_proliant_ptr->misc_features & 0x1400)
is_uefi = 1;
}
}
@@ -840,6 +849,7 @@ static int hpwdt_init_one(struct pci_dev *dev,
retval = -ENOMEM;
goto error_pci_iomap;
}
+ hpwdt_nmistat = pci_mem_addr + 0x6e;
hpwdt_timer_reg = pci_mem_addr + 0x70;
hpwdt_timer_con = pci_mem_addr + 0x72;
diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
index 29ef719a6a3c..d69ab1e28d7d 100644
--- a/drivers/watchdog/imx2_wdt.c
+++ b/drivers/watchdog/imx2_wdt.c
@@ -161,15 +161,21 @@ static void imx2_wdt_timer_ping(unsigned long arg)
mod_timer(&wdev->timer, jiffies + wdog->timeout * HZ / 2);
}
-static int imx2_wdt_set_timeout(struct watchdog_device *wdog,
- unsigned int new_timeout)
+static void __imx2_wdt_set_timeout(struct watchdog_device *wdog,
+ unsigned int new_timeout)
{
struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog);
- wdog->timeout = new_timeout;
-
regmap_update_bits(wdev->regmap, IMX2_WDT_WCR, IMX2_WDT_WCR_WT,
WDOG_SEC_TO_COUNT(new_timeout));
+}
+
+static int imx2_wdt_set_timeout(struct watchdog_device *wdog,
+ unsigned int new_timeout)
+{
+ __imx2_wdt_set_timeout(wdog, new_timeout);
+
+ wdog->timeout = new_timeout;
return 0;
}
@@ -353,7 +359,11 @@ static int imx2_wdt_suspend(struct device *dev)
/* The watchdog IP block is running */
if (imx2_wdt_is_running(wdev)) {
- imx2_wdt_set_timeout(wdog, IMX2_WDT_MAX_TIME);
+ /*
+ * Don't update wdog->timeout, we'll restore the current value
+ * during resume.
+ */
+ __imx2_wdt_set_timeout(wdog, IMX2_WDT_MAX_TIME);
imx2_wdt_ping(wdog);
/* The watchdog is not active */
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 73708acce3ca..3a14948269b1 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -239,7 +239,7 @@ config XEN_ACPI_HOTPLUG_CPU
config XEN_ACPI_PROCESSOR
tristate "Xen ACPI processor"
- depends on XEN && X86 && ACPI_PROCESSOR && CPU_FREQ
+ depends on XEN && XEN_DOM0 && X86 && ACPI_PROCESSOR && CPU_FREQ
default m
help
This ACPI processor uploads Power Management information to the Xen
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index a4d749665c9f..1865bcfa869b 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -378,10 +378,8 @@ static int unmap_grant_pages(struct grant_map *map, int offset, int pages)
}
range = 0;
while (range < pages) {
- if (map->unmap_ops[offset+range].handle == -1) {
- range--;
+ if (map->unmap_ops[offset+range].handle == -1)
break;
- }
range++;
}
err = __unmap_grant_pages(map, offset, range);
@@ -876,8 +874,10 @@ unlock_out:
out_unlock_put:
mutex_unlock(&priv->lock);
out_put_map:
- if (use_ptemod)
+ if (use_ptemod) {
map->vma = NULL;
+ unmap_grant_pages(map, 0, map->count);
+ }
gntdev_put_map(priv, map);
return err;
}